Example usage for org.antlr.v4.runtime Lexer DEFAULT_TOKEN_CHANNEL

List of usage examples for org.antlr.v4.runtime Lexer DEFAULT_TOKEN_CHANNEL

Introduction

In this page you can find the example usage for org.antlr.v4.runtime Lexer DEFAULT_TOKEN_CHANNEL.

Prototype

int DEFAULT_TOKEN_CHANNEL

To view the source code for org.antlr.v4.runtime Lexer DEFAULT_TOKEN_CHANNEL.

Click Source Link

Usage

From source file:net.sourceforge.pmd.cpd.token.AntlrToken.java

License:BSD License

public boolean isDefault() {
    return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}

From source file:org.tvl.goworks.editor.go.completion.GoCompletionProvider.java

License:Open Source License

@Override
public Token getContext(Document document, int offset) {
    Parameters.notNull("document", document);

    if (document instanceof AbstractDocument) {
        ((AbstractDocument) document).readLock();
    }//from  ww w  .ja va  2  s .c om

    try {
        //            try {
        ParserTaskManager taskManager = Lookup.getDefault().lookup(ParserTaskManager.class);
        DocumentSnapshot snapshot = VersionedDocumentUtilities.getVersionedDocument(document)
                .getCurrentSnapshot();
        Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot,
                GoParserDataDefinitions.LEXER_TOKENS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
        if (futureTokensData == null) {
            return null;
        }

        Tagger<TokenTag<Token>> tagger;
        try {
            tagger = futureTokensData.get().getData();
            if (tagger == null) {
                return null;
            }
        } catch (InterruptedException | ExecutionException ex) {
            LOGGER.log(Level.WARNING, "An exception occurred while getting tokens.", ex);
            return null;
        }

        // get the token(s) at the cursor position, with affinity both directions
        OffsetRegion region = OffsetRegion.fromBounds(Math.max(0, offset - 1),
                Math.min(snapshot.length(), offset + 1));
        Iterable<TaggedPositionRegion<TokenTag<Token>>> tags = tagger.getTags(
                new NormalizedSnapshotPositionRegionCollection(new SnapshotPositionRegion(snapshot, region)));

        // TODO: cache tokens
        //                ANTLRInputStream input = new ANTLRInputStream(document.getText(0, document.getLength()));
        //                GoLexer lexer = new GoLexer(input);
        //                CommonTokenStream tokenStream = new CommonTokenStream(lexer);
        Token token = null;
        //                for (token = tokenStream.LT(1); token != null && token.getType() != Token.EOF; token = tokenStream.LT(1)) {
        //                    tokenStream.consume();
        //                    if (token.getStartIndex() <= offset && token.getStopIndex() >= offset) {
        //                        break;
        //                    }
        //                }
        for (TaggedPositionRegion<TokenTag<Token>> taggedRegion : tags) {
            if (taggedRegion.getTag().getToken().getChannel() != Lexer.DEFAULT_TOKEN_CHANNEL) {
                continue;
            }

            token = taggedRegion.getTag().getToken();
            if (token.getStartIndex() <= offset && token.getStopIndex() >= offset) {
                break;
            }
        }

        if (token == null) {
            // try again without skipping off-channel tokens
            for (TaggedPositionRegion<TokenTag<Token>> taggedRegion : tags) {
                token = taggedRegion.getTag().getToken();
                if (token.getStartIndex() <= offset && token.getStopIndex() >= offset) {
                    break;
                }
            }
        }

        return token;
        //List<Token> tokens;
        //            } catch (BadLocationException ex) {
        //                Exceptions.printStackTrace(ex);
        //                return null;
        //            }
    } finally {
        if (document instanceof AbstractDocument) {
            ((AbstractDocument) document).readUnlock();
        }
    }
}

From source file:org.tvl.goworks.editor.go.completion.GoCompletionProvider.java

License:Open Source License

static boolean isGoContext(Token token, int offset, boolean allowInStrings) {
    if (token == null) {
        return false;
    }/*from   ww w  .j av  a2 s .  c  o  m*/

    switch (token.getType()) {
    case GoLexer.COMMENT:
        return false;

    case GoLexer.CharLiteral:
    case GoLexer.StringLiteral:
        return allowInStrings;

    case GoLexer.WS:
    case GoLexer.NEWLINE:
        return true;

    default:
        return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
    }
}

From source file:org.tvl.goworks.editor.go.highlighter.MarkOccurrencesHighlighter.java

License:Open Source License

@CheckForNull
public static Token getContext(SnapshotPosition position) {
    ParserTaskManager taskManager = Lookup.getDefault().lookup(ParserTaskManager.class);
    DocumentSnapshot snapshot = position.getSnapshot();
    int offset = position.getOffset();
    Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot,
            GoParserDataDefinitions.LEXER_TOKENS,
            EnumSet.of(ParserDataOptions.NO_UPDATE, ParserDataOptions.SYNCHRONOUS));
    if (futureTokensData == null) {
        return null;
    }//from ww w . j  a  v a 2  s.c  om

    ParserData<Tagger<TokenTag<Token>>> tokensData;
    try {
        tokensData = futureTokensData.get();
        if (tokensData == null) {
            return null;
        }
    } catch (InterruptedException | ExecutionException ex) {
        LOGGER.log(Level.WARNING, "An exception occurred while getting token data.", ex);
        return null;
    }

    Tagger<TokenTag<Token>> tagger = tokensData.getData();
    if (tagger == null) {
        return null;
    }

    // get the token(s) at the cursor position, with affinity both directions
    OffsetRegion region = OffsetRegion.fromBounds(Math.max(0, offset - 1),
            Math.min(snapshot.length(), offset + 1));
    Iterable<TaggedPositionRegion<TokenTag<Token>>> tags = tagger.getTags(
            new NormalizedSnapshotPositionRegionCollection(new SnapshotPositionRegion(snapshot, region)));

    Token token = null;
    for (TaggedPositionRegion<TokenTag<Token>> taggedRegion : tags) {
        if (taggedRegion.getTag().getToken().getChannel() != Lexer.DEFAULT_TOKEN_CHANNEL) {
            continue;
        }

        Token previousToken = token;
        Token nextToken = taggedRegion.getTag().getToken();
        if (nextToken.getStartIndex() <= offset && nextToken.getStopIndex() + 1 >= offset) {
            if (previousToken != null && previousToken.getStopIndex() + 1 == offset) {
                // prefer the end of a word token to the beginning of a non-word token
                if (CompletionParserATNSimulator.WORDLIKE_TOKEN_TYPES.contains(previousToken.getType())) {
                    break;
                }
            }

            token = nextToken;
        }
    }

    if (token == null) {
        // try again without skipping off-channel tokens
        for (TaggedPositionRegion<TokenTag<Token>> taggedRegion : tags) {
            token = taggedRegion.getTag().getToken();
            if (token.getStartIndex() <= offset && token.getStopIndex() >= offset) {
                break;
            }
        }
    }

    return token;
}