Example usage for org.antlr.v4.runtime Token EOF

List of usage examples for org.antlr.v4.runtime Token EOF

Introduction

In this page you can find the example usage for org.antlr.v4.runtime Token EOF.

Prototype

int EOF

To view the source code for org.antlr.v4.runtime Token EOF.

Click Source Link

Usage

From source file:com.sample.JavaErrorStrategy.java

License:BSD License

/**
 * Conjure up a missing token during error recovery.
 *
 * The recognizer attempts to recover from single missing symbols. But,
 * actions might refer to that missing symbol. For example, x=ID {f($x);}.
 * The action clearly assumes that there has been an identifier matched
 * previously and that $x points at that token. If that token is missing,
 * but the next token in the stream is what we want we assume that this
 * token is missing and we keep going. Because we have to return some token
 * to replace the missing token, we have to conjure one up. This method
 * gives the user control over the tokens returned for missing tokens.
 * Mostly, you will want to create something special for identifier tokens.
 * For literals such as '{' and ',', the default action in the parser or
 * tree parser works. It simply creates a CommonToken of the appropriate
 * type. The text will be the token. If you change what tokens must be
 * created by the lexer, override this method to create the appropriate
 * tokens./*  w  w w.j  ava2  s.c  o m*/
 */
@NotNull
protected Token getMissingSymbol(@NotNull Parser recognizer) {
    Token currentSymbol = recognizer.getCurrentToken();
    IntervalSet expecting = getExpectedTokens(recognizer);
    int expectedTokenType = expecting.getMinElement(); // get any element
    String tokenText;
    if (expectedTokenType == Token.EOF)
        tokenText = "<missing EOF>";
    else
        tokenText = "<missing " + recognizer.getTokenNames()[expectedTokenType] + ">";
    Token current = currentSymbol;
    Token lookback = recognizer.getInputStream().LT(-1);
    if (current.getType() == Token.EOF && lookback != null) {
        current = lookback;
    }
    return recognizer.getTokenFactory()
            .create(new Pair<TokenSource, CharStream>(current.getTokenSource(),
                    current.getTokenSource().getInputStream()), expectedTokenType, tokenText,
                    Token.DEFAULT_CHANNEL, -1, -1, current.getLine(), current.getCharPositionInLine());
}

From source file:com.sample.JavaErrorStrategy.java

License:BSD License

/**
 * How should a token be displayed in an error message? The default is to
 * display just the text, but during development you might want to have a
 * lot of information spit out. Override in that case to use t.toString()
 * (which, for CommonToken, dumps everything about the token). This is
 * better than forcing you to override a method in your token objects
 * because you don't have to go modify your lexer so that it creates a new
 * Java type.// www .ja v a2s. co  m
 */
protected String getTokenErrorDisplay(Token t) {
    if (t == null)
        return "<no token>";
    String s = getSymbolText(t);
    if (s == null) {
        if (getSymbolType(t) == Token.EOF) {
            s = "<EOF>";
        } else {
            s = "<" + getSymbolType(t) + ">";
        }
    }
    return escapeWSAndQuote(s);
}

From source file:com.sample.JavaErrorStrategy.java

License:BSD License

/** Consume tokens until one matches the given token set. */
protected void consumeUntil(@NotNull Parser recognizer, @NotNull IntervalSet set) {
    // System.err.println("consumeUntil("+set.toString(recognizer.getTokenNames())+")");
    int ttype = recognizer.getInputStream().LA(1);
    while (ttype != Token.EOF && !set.contains(ttype)) {
        // System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
        // recognizer.getInputStream().consume();
        recognizer.consume();//  w w w.j  ava 2  s  .com
        ttype = recognizer.getInputStream().LA(1);
    }
}

From source file:com.spotify.heroic.grammar.CoreQueryParser.java

License:Apache License

private QueryListener parse(Function<HeroicQueryParser, ParserRuleContext> op, String input) {
    final HeroicQueryLexer lexer = new HeroicQueryLexer(new ANTLRInputStream(input));

    final CommonTokenStream tokens = new CommonTokenStream(lexer);
    final HeroicQueryParser parser = new HeroicQueryParser(tokens);

    parser.removeErrorListeners();// w ww  .j a  v a2s  . co  m
    parser.setErrorHandler(new BailErrorStrategy());

    final ParserRuleContext context;

    try {
        context = op.apply(parser);
    } catch (final ParseCancellationException e) {
        if (!(e.getCause() instanceof RecognitionException)) {
            throw e;
        }

        throw toParseException((RecognitionException) e.getCause());
    }

    final QueryListener listener = new QueryListener();

    ParseTreeWalker.DEFAULT.walk(listener, context);

    final Token last = lexer.getToken();

    if (last.getType() != Token.EOF) {
        throw new ParseException(String.format("garbage at end of string: '%s'", last.getText()), null,
                last.getLine(), last.getCharPositionInLine());
    }

    return listener;
}

From source file:com.sri.ai.praise.sgsolver.demo.editor.HOGMCodeArea.java

License:Open Source License

private static StyleSpans<Collection<String>> computeHighlighting(String text) {
    StyleSpansBuilder<Collection<String>> spansBuilder = new StyleSpansBuilder<>();
    int lastTokenEnd = 0;
    ANTLRInputStream input = new ANTLRInputStream(text);
    HOGMLexer lexer = new HOGMLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    tokens.fill();/*from   w  w w. j  a  va  2  s.  c  o m*/
    for (int i = 0; i < tokens.size(); i++) {
        Token t = tokens.get(i);
        if (t.getType() == Token.EOF) {
            break;
        }
        String styleClass;
        if (t.getType() == HOGMLexer.COMMENT || t.getType() == HOGMLexer.LINE_COMMENT) {
            styleClass = "hogmCodeComment";
        } else if (HOGMTerminalSymbols.isTerminalSymbol(t.getText())) {
            styleClass = "hogmCodeKeyword";
        } else {
            styleClass = "hogmCodeOther";
        }
        int spacing = t.getStartIndex() - lastTokenEnd;
        if (spacing > 0) {
            spansBuilder.add(Collections.emptyList(), spacing);
        }
        int stylesize = (t.getStopIndex() - t.getStartIndex()) + 1;
        spansBuilder.add(Collections.singleton(styleClass), stylesize);
        lastTokenEnd = t.getStopIndex() + 1;
    }

    return spansBuilder.create();
}

From source file:com.yahoo.yqlplus.language.internal.ast.ErrorNode.java

public ErrorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
    //System.out.println("start: "+start+", stop: "+stop);
    if (stop == null || (stop.getTokenIndex() < start.getTokenIndex() && stop.getType() != Token.EOF)) {
        // sometimes resync does not consume a token (when LT(1) is
        // in follow set.  So, stop will be 1 to left to start. adjust.
        // Also handle case where start is the first token and no token
        // is consumed during recovery; LT(-1) will return null.
        stop = start;//w  w  w . j av  a 2s. c o m
    }
    this.input = input;
    this.start = start;
    this.stop = stop;
    this.trappedException = e;
}

From source file:com.yahoo.yqlplus.language.internal.ast.ErrorNode.java

public String getText() {
    String badText;// w w w.j ava 2s  .c  om
    if (start != null) {
        int i = start.getTokenIndex();
        int j = stop.getTokenIndex();
        if (stop.getType() == Token.EOF) {
            j = input.size();
        }
        badText = ((TokenStream) input).getText(start, stop);
    } else {
        // people should subclass if they alter the tree type so this
        // next one is for sure correct.
        badText = "<unknown>";
    }
    return badText;
}

From source file:io.mindmaps.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * @param inputStream a stream representing a list of patterns
 * @return a stream of patterns/*from  w  w w  . j  a  v  a 2 s  . co  m*/
 */
public Stream<Pattern> parsePatterns(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:io.mindmaps.graql.QueryParser.java

License:Open Source License

public Stream<Pattern> parsePatternsStream(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }/*w  ww  .j  ava  2 s .c  o m*/

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:io.prestosql.sql.parser.StatementSplitter.java

License:Apache License

public StatementSplitter(String sql, Set<String> delimiters) {
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }/*from  w  w  w.ja v  a 2  s. c om*/
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        } else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}