Example usage for org.antlr.v4.runtime UnbufferedTokenStream UnbufferedTokenStream

List of usage examples for org.antlr.v4.runtime UnbufferedTokenStream UnbufferedTokenStream

Introduction

In this page you can find the example usage for org.antlr.v4.runtime UnbufferedTokenStream UnbufferedTokenStream.

Prototype

public UnbufferedTokenStream(TokenSource tokenSource) 

Source Link

Usage

From source file:ai.grakn.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * @param queryString a string representing several queries
 * @return a list of queries/*from  w w  w . j  av  a2 s .  c o m*/
 */
public <T extends Query<?>> Stream<T> parseList(String queryString) {
    GraqlLexer lexer = getLexer(queryString);

    GraqlErrorListener errorListener = new GraqlErrorListener(queryString);
    lexer.removeErrorListeners();
    lexer.addErrorListener(errorListener);

    UnbufferedTokenStream tokenStream = new UnbufferedTokenStream(lexer);

    // Merge any match...insert queries together
    // TODO: Find a way to NOT do this horrid thing
    AbstractIterator<T> iterator = new AbstractIterator<T>() {
        @Nullable
        T previous = null;

        @Override
        protected T computeNext() {
            if (tokenStream.LA(1) == GraqlLexer.EOF) {
                if (previous != null) {
                    return swapPrevious(null);
                } else {
                    endOfData();
                    return null;
                }
            }

            TokenSource oneQuery = consumeOneQuery(tokenStream);
            T current = parseQueryFragment(GraqlParser::query, (q, t) -> (T) q.visitQuery(t), oneQuery,
                    errorListener);

            if (previous == null) {
                previous = current;
                return computeNext();
            } else if (previous instanceof MatchQuery && current instanceof InsertQuery) {
                return (T) joinMatchInsert((MatchQuery) swapPrevious(null), (InsertQuery) current);
            } else {
                return swapPrevious(current);
            }
        }

        private T swapPrevious(T newPrevious) {
            T oldPrevious = previous;
            previous = newPrevious;
            return oldPrevious;
        }

        private InsertQuery joinMatchInsert(MatchQuery match, InsertQuery insert) {
            return match.insert(insert.admin().getVars());
        }
    };

    Iterable<T> iterable = () -> iterator;
    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:ai.grakn.graql.internal.parser.QueryParserImpl.java

License:Open Source License

/**
 * @param reader a reader representing several queries
 * @return a list of queries/*from w  w w  .j  a  v a2s  . c om*/
 */
@Override
public <T extends Query<?>> Stream<T> parseList(Reader reader) {
    UnbufferedCharStream charStream = new UnbufferedCharStream(reader);
    GraqlErrorListener errorListener = GraqlErrorListener.withoutQueryString();
    GraqlLexer lexer = createLexer(charStream, errorListener);

    /*
    We tell the lexer to copy the text into each generated token.
    Normally when calling `Token#getText`, it will look into the underlying `TokenStream` and call
    `TokenStream#size` to check it is in-bounds. However, `UnbufferedTokenStream#size` is not supported
    (because then it would have to read the entire input). To avoid this issue, we set this flag which will
    copy over the text into each `Token`, s.t. that `Token#getText` will just look up the copied text field.
    */
    lexer.setTokenFactory(new CommonTokenFactory(true));

    // Use an unbuffered token stream so we can handle extremely large input strings
    UnbufferedTokenStream tokenStream = new UnbufferedTokenStream(ChannelTokenSource.of(lexer));

    GraqlParser parser = createParser(tokenStream, errorListener);

    /*
    The "bail" error strategy prevents us reading all the way to the end of the input, e.g.
            
    ```
    match $x isa person; insert $x has name "Bob"; match $x isa movie; get;
                                                   ^
    ```
            
    In this example, when ANTLR reaches the indicated `match`, it considers two possibilities:
            
    1. this is the end of the query
    2. the user has made a mistake. Maybe they accidentally pasted the `match` here.
            
    Because of case 2, ANTLR will parse beyond the `match` in order to produce a more helpful error message.
    This causes memory issues for very large queries, so we use the simpler "bail" strategy that will
    immediately stop when it hits `match`.
    */
    parser.setErrorHandler(new BailErrorStrategy());

    // This is a lazy iterator that will only consume a single query at a time, without parsing any further.
    // This means it can pass arbitrarily long streams of queries in constant memory!
    Iterable<T> queryIterator = () -> new AbstractIterator<T>() {
        @Nullable
        @Override
        protected T computeNext() {
            int latestToken = tokenStream.LA(1);
            if (latestToken == Token.EOF) {
                endOfData();
                return null;
            } else {
                // This will parse and consume a single query, even if it doesn't reach an EOF
                // When we next run it, it will start where it left off in the stream
                return (T) QUERY.parse(parser, errorListener);
            }
        }
    };

    return StreamSupport.stream(queryIterator.spliterator(), false);
}

From source file:com.linkedin.pinot.pql.parsers.Pql2Compiler.java

License:Apache License

@Override
public BrokerRequest compileToBrokerRequest(String expression) throws Pql2CompilationException {
    try {/*from  w  w w .jav a 2s . c  o m*/
        //
        CharStream charStream = new ANTLRInputStream(expression);
        PQL2Lexer lexer = new PQL2Lexer(charStream);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        TokenStream tokenStream = new UnbufferedTokenStream<CommonToken>(lexer);
        PQL2Parser parser = new PQL2Parser(tokenStream);
        parser.setErrorHandler(new BailErrorStrategy());

        // Parse
        ParseTree parseTree = parser.root();

        ParseTreeWalker walker = new ParseTreeWalker();
        Pql2AstListener listener = new Pql2AstListener();
        walker.walk(listener, parseTree);

        AstNode rootNode = listener.getRootNode();
        // System.out.println(rootNode.toString(0));

        BrokerRequest brokerRequest = new BrokerRequest();
        rootNode.updateBrokerRequest(brokerRequest);
        return brokerRequest;
    } catch (Pql2CompilationException e) {
        throw e;
    } catch (Exception e) {
        throw new Pql2CompilationException(e.getMessage());
    }
}

From source file:illarion.easynpc.gui.syntax.AbstractAntlrTokenMaker.java

License:Open Source License

@Override
public Token getTokenList(@Nonnull Segment text, int initialTokenType, int startOffset) {
    try (Reader textReader = new CharArrayReader(text.array, text.offset, text.count)) {
        lexer.setInputStream(new ANTLRInputStream(textReader));
        TokenStream tokenStream = new UnbufferedTokenStream(lexer);

        resetTokenList();/*from ww w. jav a  2s  .  c  o m*/

        while (true) {
            org.antlr.v4.runtime.Token currentToken = tokenStream.LT(1);

            if (currentToken.getType() == org.antlr.v4.runtime.Token.EOF) {
                break;
            }
            tokenStream.consume();

            // convert the ANTLR token to a RSyntaxTextArea token and add it to the linked list
            int tokenStart = currentToken.getCharPositionInLine() + text.offset;
            int tokenEnd = (tokenStart + currentToken.getText().length()) - 1;
            int tokenOffset = startOffset + currentToken.getCharPositionInLine();

            addToken(text.array, tokenStart, tokenEnd, convertTokenType(currentToken.getType()), tokenOffset);
        } // end while

        // add a null token to indicate end of line; note that the test grammar has no multiline token types
        addNullToken();
    } catch (IOException e) {
        e.printStackTrace();
    }
    return firstToken;
}

From source file:io.mindmaps.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * @param inputStream a stream representing a list of patterns
 * @return a stream of patterns/*from   ww  w. j  av a2 s .c  om*/
 */
public Stream<Pattern> parsePatterns(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:io.mindmaps.graql.QueryParser.java

License:Open Source License

public Stream<Pattern> parsePatternsStream(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }//from w ww.j ava  2s .  com

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:org.chocosolver.parser.flatzinc.Flatzinc.java

License:Open Source License

public void parse(Model target, Datas data, InputStream is) {
    CharStream input = new UnbufferedCharStream(is);
    Flatzinc4Lexer lexer = new Flatzinc4Lexer(input);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    TokenStream tokens = new UnbufferedTokenStream<CommonToken>(lexer);
    Flatzinc4Parser parser = new Flatzinc4Parser(tokens);
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.setBuildParseTree(false);/* w  w  w.j  ava  2s. c o  m*/
    parser.setTrimParseTree(false);
    parser.flatzinc_model(target, data, all, free);
}

From source file:org.jgrapht.io.DOTImporter.java

License:LGPL

/**
 * {@inheritDoc}/*from  w w w .  java2  s  .c o  m*/
 */
@Override
public void importGraph(Graph<V, E> g, Reader in) throws ImportException {
    try {
        /**
         * Create lexer with unbuffered input stream and use a token factory which copies
         * characters from the input stream into the text of the tokens.
         */
        DOTLexer lexer = new DOTLexer(new UnbufferedCharStream(in));
        lexer.setTokenFactory(new CommonTokenFactory(true));
        lexer.removeErrorListeners();
        ThrowingErrorListener errorListener = new ThrowingErrorListener();
        lexer.addErrorListener(errorListener);

        /**
         * Create parser with unbuffered token stream.
         */
        DOTParser parser = new DOTParser(new UnbufferedTokenStream<>(lexer));
        parser.removeErrorListeners();
        parser.addErrorListener(errorListener);

        /**
         * Disable parse tree building and attach listener.
         */
        parser.setBuildParseTree(false);
        parser.addParseListener(new CreateGraphDOTListener(g));

        /**
         * Parse
         */
        parser.graph();
    } catch (ParseCancellationException | IllegalArgumentException e) {
        throw new ImportException("Failed to import DOT graph: " + e.getMessage(), e);
    }
}

From source file:org.obeonetwork.m2doc.generator.M2DocValidator.java

License:Open Source License

/**
 * Parses while matching an AQL expression.
 * //w ww  .  j  a  va 2s . co  m
 * @param queryEnvironment
 *            the {@link IReadOnlyQueryEnvironment}
 * @param type
 *            the type to parse
 * @return the corresponding {@link AstResult}
 */
private static AstResult parseWhileAqlTypeLiteral(IReadOnlyQueryEnvironment queryEnvironment, String type) {
    final IQueryBuilderEngine.AstResult result;

    if (type != null && type.length() > 0) {
        AstBuilderListener astBuilder = new AstBuilderListener((IQueryEnvironment) queryEnvironment);
        CharStream input = new UnbufferedCharStream(new StringReader(type), type.length());
        QueryLexer lexer = new QueryLexer(input);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        lexer.removeErrorListeners();
        lexer.addErrorListener(astBuilder.getErrorListener());
        TokenStream tokens = new UnbufferedTokenStream<CommonToken>(lexer);
        QueryParser parser = new QueryParser(tokens);
        parser.addParseListener(astBuilder);
        parser.removeErrorListeners();
        parser.addErrorListener(astBuilder.getErrorListener());
        // parser.setTrace(true);
        parser.typeLiteral();
        result = astBuilder.getAstResult();
    } else {
        ErrorTypeLiteral errorTypeLiteral = (ErrorTypeLiteral) EcoreUtil
                .create(AstPackage.eINSTANCE.getErrorTypeLiteral());
        List<org.eclipse.acceleo.query.ast.Error> errors = new ArrayList<org.eclipse.acceleo.query.ast.Error>(
                1);
        errors.add(errorTypeLiteral);
        final Map<Object, Integer> positions = new HashMap<Object, Integer>();
        if (type != null) {
            positions.put(errorTypeLiteral, Integer.valueOf(0));
        }
        final BasicDiagnostic diagnostic = new BasicDiagnostic();
        diagnostic.add(new BasicDiagnostic(Diagnostic.ERROR, AstBuilderListener.PLUGIN_ID, 0,
                "null or empty type.", new Object[] { errorTypeLiteral }));
        result = new AstResult(errorTypeLiteral, positions, positions, errors, diagnostic);
    }

    return result;
}

From source file:org.obeonetwork.m2doc.parser.BodyParser.java

License:Open Source License

/**
 * Parses while matching an AQL expression.
 * /*from ww  w  .jav a2 s  .co  m*/
 * @param expression
 *            the expression to parse
 * @return the corresponding {@link AstResult}
 */
private AstResult parseWhileAqlExpression(String expression) {
    final IQueryBuilderEngine.AstResult result;

    if (expression != null && expression.length() > 0) {
        AstBuilderListener astBuilder = new AstBuilderListener(queryEnvironment);
        CharStream input = new UnbufferedCharStream(new StringReader(expression), expression.length());
        QueryLexer lexer = new QueryLexer(input);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        lexer.removeErrorListeners();
        lexer.addErrorListener(astBuilder.getErrorListener());
        TokenStream tokens = new UnbufferedTokenStream<CommonToken>(lexer);
        QueryParser parser = new QueryParser(tokens);
        parser.addParseListener(astBuilder);
        parser.removeErrorListeners();
        parser.addErrorListener(astBuilder.getErrorListener());
        // parser.setTrace(true);
        parser.expression();
        result = astBuilder.getAstResult();
    } else {
        ErrorExpression errorExpression = (ErrorExpression) EcoreUtil
                .create(AstPackage.eINSTANCE.getErrorExpression());
        List<org.eclipse.acceleo.query.ast.Error> errors = new ArrayList<>(1);
        errors.add(errorExpression);
        final Map<Object, Integer> positions = new HashMap<>();
        if (expression != null) {
            positions.put(errorExpression, Integer.valueOf(0));
        }
        final BasicDiagnostic diagnostic = new BasicDiagnostic();
        diagnostic.add(new BasicDiagnostic(Diagnostic.ERROR, AstBuilderListener.PLUGIN_ID, 0,
                "null or empty string.", new Object[] { errorExpression }));
        result = new AstResult(errorExpression, positions, positions, errors, diagnostic);
    }

    return result;
}