Example usage for org.antlr.v4.runtime Token getChannel

List of usage examples for org.antlr.v4.runtime Token getChannel

Introduction

In this page you can find the example usage for org.antlr.v4.runtime Token getChannel.

Prototype

int getChannel();

Source Link

Document

Return the channel this token.

Usage

From source file:ai.grakn.graql.internal.parser.ChannelTokenSource.java

License:Open Source License

@Override
public Token nextToken() {
    Token token;

    do {//  ww w. j  av  a 2 s.  c  o m
        token = source().nextToken();
    } while (token.getChannel() != channel());

    return token;
}

From source file:com.espertech.esper.epl.parse.ParseHelper.java

License:Open Source License

private static Token getTokenBefore(int i, CommonTokenStream tokens) {
    int position = i - 1;
    while (position >= 0) {
        Token t = tokens.get(position);
        if (t.getChannel() != 99 && t.getType() != EsperEPL2GrammarLexer.WS) {
            return t;
        }//from ww w  .j a v a  2s . c  o m
        position--;
    }
    return null;
}

From source file:com.espertech.esper.epl.parse.ParseHelper.java

License:Open Source License

private static int findEndTokenScript(int startIndex, CommonTokenStream tokens, int tokenTypeSearch,
        Set<Integer> afterScriptTokens, boolean requireAfterScriptToken) {
    int found = -1;
    for (int i = startIndex; i < tokens.size(); i++) {
        if (tokens.get(i).getType() == tokenTypeSearch) {
            if (!requireAfterScriptToken) {
                return i;
            }/*from  ww  w  .j  av  a2s .c o  m*/
            // The next non-comment token must be among the afterScriptTokens, i.e. SELECT/INSERT/ON/DELETE/UPDATE
            // Find next non-comment token.
            for (int j = i + 1; j < tokens.size(); j++) {
                Token next = tokens.get(j);
                if (next.getChannel() == 0) {
                    if (afterScriptTokens.contains(next.getType())) {
                        found = i;
                    }
                    break;
                }
            }
        }
        if (found != -1) {
            break;
        }
    }
    return found;
}

From source file:com.github.jknack.handlebars.internal.TemplateBuilder.java

License:Apache License

@Override
public Template visitSpaces(final SpacesContext ctx) {
    Token space = ctx.SPACE().getSymbol();
    String text = space.getText();
    line.append(text);//ww  w  . j  av  a 2s  .c o  m
    if (space.getChannel() == Token.HIDDEN_CHANNEL) {
        return null;
    }
    return new Text(text).filename(source.filename()).position(ctx.start.getLine(),
            ctx.start.getCharPositionInLine());
}

From source file:com.github.jknack.handlebars.internal.TemplateBuilder.java

License:Apache License

@Override
public BaseTemplate visitNewline(final NewlineContext ctx) {
    Token newline = ctx.NL().getSymbol();
    if (newline.getChannel() == Token.HIDDEN_CHANNEL) {
        return null;
    }//  w  w w  .  java  2s  .  c o  m
    line.setLength(0);
    return new Text(newline.getText()).filename(source.filename()).position(newline.getLine(),
            newline.getCharPositionInLine());
}

From source file:de.bioviz.parser.BioParser.java

License:Open Source License

/**
 * Parses the annotations in a file.// ww  w .  j a v  a2  s  .  com
 * @param input an ANTLRInputStream
 * @param channel the channel to parse
 * @return A List of Strings containing the annotations.
 */
private static List<String> parseChannel(final ANTLRInputStream input, final int channel) {
    BioLexerGrammar lexer = new BioLexerGrammar(input);

    lexer.reset();
    CommonTokenStream cts = new CommonTokenStream(lexer);
    List<String> channelTokens = new ArrayList<>();

    // this one gets everything that is in the stream.
    cts.getText();
    // now we can use size() to run over the tokens
    for (int i = 0; i < cts.size(); i++) {
        Token token = cts.get(i);
        // and check here if the token is on the right channel
        if (token.getChannel() == channel) {
            logger.trace("Parsing Comment: " + token.getText());
            channelTokens.add(token.getText());
        }
    }

    return channelTokens;
}

From source file:io.prestosql.sql.parser.StatementSplitter.java

License:Apache License

public static boolean isEmptyStatement(String sql) {
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            return true;
        }//from   w  ww .ja  va2 s .  c o  m
        if (token.getChannel() != Token.HIDDEN_CHANNEL) {
            return false;
        }
    }
}

From source file:net.certiv.json.parser.JsonErrorListener.java

License:Open Source License

@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine,
        String msg, RecognitionException e) {

    Parser parser = (Parser) recognizer;
    String name = parser.getSourceName();
    TokenStream tokens = parser.getInputStream();

    Token offSymbol = (Token) offendingSymbol;
    int thisError = offSymbol.getTokenIndex();
    if (offSymbol.getType() == -1 && thisError == tokens.size() - 1) {
        Log.debug(this, name + ": Incorrect error: " + msg);
        return;//ww w.  j a va  2 s . co m
    }
    String offSymName = JsonLexer.VOCABULARY.getSymbolicName(offSymbol.getType());
    if (thisError > lastError + 10) {
        lastError = thisError - 10;
    }
    for (int idx = lastError + 1; idx <= thisError; idx++) {
        Token token = tokens.get(idx);
        if (token.getChannel() != Token.HIDDEN_CHANNEL)
            Log.error(this, name + ":" + token.toString());
    }
    lastError = thisError;

    List<String> stack = parser.getRuleInvocationStack();
    Collections.reverse(stack);

    Log.error(this, name + " rule stack: " + stack);
    Log.error(this, name + " line " + line + ":" + charPositionInLine + " at " + offSymName + ": " + msg);
}

From source file:net.sourceforge.pmd.cpd.SwiftTokenizer.java

License:BSD License

@Override
public void tokenize(SourceCode sourceCode, Tokens tokenEntries) {
    StringBuilder buffer = sourceCode.getCodeBuffer();

    try {//from w w w.  ja va2s .  c  om
        ANTLRInputStream ais = new ANTLRInputStream(buffer.toString());
        SwiftLexer lexer = new SwiftLexer(ais);

        lexer.removeErrorListeners();
        lexer.addErrorListener(new ErrorHandler());
        Token token = lexer.nextToken();

        while (token.getType() != Token.EOF) {
            if (token.getChannel() != Lexer.HIDDEN) {
                TokenEntry tokenEntry = new TokenEntry(token.getText(), sourceCode.getFileName(),
                        token.getLine());

                tokenEntries.add(tokenEntry);
            }
            token = lexer.nextToken();
        }
    } catch (ANTLRSyntaxError err) {
        // Wrap exceptions of the Swift tokenizer in a TokenMgrError, so
        // they are correctly handled
        // when CPD is executed with the '--skipLexicalErrors' command line
        // option
        throw new TokenMgrError("Lexical error in file " + sourceCode.getFileName() + " at line "
                + err.getLine() + ", column " + err.getColumn() + ".  Encountered: " + err.getMessage(),
                TokenMgrError.LEXICAL_ERROR);
    } finally {
        tokenEntries.add(TokenEntry.getEOF());
    }
}

From source file:org.apache.sysml.parser.pydml.PydmlLexer.java

License:Apache License

@Override
public Token nextToken() {
    if (_input.LA(1) == EOF && !this.indents.isEmpty()) {
        if (debugIndentRules)
            System.out.println("EOF reached and expecting some DEDENTS, so emitting them");

        tokens.poll();/*from ww  w  .j a  v a 2 s  . c o m*/
        this.emit(commonToken(PydmlParser.NEWLINE, "\n"));

        // Now emit as much DEDENT tokens as needed.
        while (!indents.isEmpty()) {
            if (debugIndentRules)
                System.out.println("Emitting (inserted) DEDENTS");

            this.emit(createDedent());
            indents.pop();
        }
        // Put the EOF back on the token stream.
        this.emit(commonToken(PydmlParser.EOF, "<EOF>"));
    }
    Token next = super.nextToken();
    if (next.getChannel() == Token.DEFAULT_CHANNEL) {
        // Keep track of the last token on the default channel.
        this.lastToken = next;
    }
    Token retVal = tokens.isEmpty() ? next : tokens.poll();

    if (debugIndentRules)
        System.out.println("Returning nextToken: [" + retVal + "]<<" + tokens.isEmpty());

    return retVal;
}