Example usage for org.antlr.v4.runtime CommonTokenStream CommonTokenStream

List of usage examples for org.antlr.v4.runtime CommonTokenStream CommonTokenStream

Introduction

In this page you can find the example usage for org.antlr.v4.runtime CommonTokenStream CommonTokenStream.

Prototype

public CommonTokenStream(TokenSource tokenSource) 

Source Link

Document

Constructs a new CommonTokenStream using the specified token source and the default token channel ( Token#DEFAULT_CHANNEL ).

Usage

From source file:io.github.hjuergens.time.GenerateParseTreeAST.java

License:Apache License

@Test(dataProvider = "streams")
public void doit(CharStream input) {
    TokenSource lexer = new DatesLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    DatesParser parser = new DatesParser(tokens);
    ParserRuleContext tree = parser.date(); // parse

    ParseTreeWalker walker = new ParseTreeWalker(); // create standard walker
    walker.walk(extractor, tree); // initiate walk of tree with listener
}

From source file:io.github.hjuergens.time.GenerateParseTreeAST.java

License:Apache License

/**
 * <a href="https://stackoverflow.com/questions/10614659/no-viable-alternative-at-input"></a>
 *///from   ww  w  . j  av a  2  s  .  c om
@Test(expectedExceptions = java.lang.NoSuchFieldError.class)
public void dotTree() {

    // the expression
    String src = "(ICOM LIKE '%bridge%' or ICOM LIKE '%Munich%')";

    // create a lexer & parser
    //DatesLexer lexer = new DatesLexer(new ANTLRStringStream(src));
    //DatesParser parser = new DatesParser(new CommonTokenStream(lexer));

    DatesLexer lexer = new DatesLexer(new ANTLRInputStream(src));
    DatesParser parser = new DatesParser(new CommonTokenStream(lexer));

    // invoke the entry point of the parser (the parse() method) and get the AST
    Tree tree = parser.date();
    String grammarFileName = lexer.getGrammarFileName();
    log.info("grammar file name=" + grammarFileName);

    // print the DOT representation of the AST 
    Grammar grammar = Grammar.load(grammarFileName);
    DOTGenerator gen = new DOTGenerator(grammar);

    ATNState startState = new ATNState() {
        @Override
        public int getStateType() {
            return ATNState.BASIC;
        }
    };
    String st = gen.getDOT(startState);
    log.info(st);
}

From source file:io.kodokojo.commons.docker.model.StringToImageNameConverter.java

License:Open Source License

@Override
public ImageName apply(String input) {
    if (StringUtils.isBlank(input)) {
        throw new IllegalArgumentException("input  must be defined.");
    }/*from   w  w w.  j  a  va2 s  .  c  om*/
    input = input.trim();
    if (!input.endsWith("\n")) {
        input += "\n";
    }
    try {
        ANTLRInputStream antlrInputStream = new ANTLRInputStream(input);
        DockerImageNameLexer lexer = new DockerImageNameLexer(antlrInputStream);
        lexer.removeErrorListeners();
        CommonTokenStream tokens = new CommonTokenStream(lexer);
        DockerImageNameParser parser = new DockerImageNameParser(tokens);
        DockerImageAntlrListener listener = new DockerImageAntlrListener();
        parser.addParseListener(listener);
        parser.removeErrorListeners();
        parser.imageName();

        return listener.getImageName();
    } catch (RecognitionException e) {
        LOGGER.debug("Unable to parse following image name '{}' : {}", input, e);
        return null;
    } catch (RuntimeException e) {

        LOGGER.debug("Unable to parse following image name '{}' : {}", input, e);
        return null;
    }
}

From source file:io.mindmaps.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * Parse any part of a Graql query//from   w  w w  . ja v a2s.c o  m
 * @param parseRule a method on GraqlParser that yields the parse rule you want to use (e.g. GraqlParser::variable)
 * @param visit a method on QueryVisitor that visits the parse rule you specified (e.g. QueryVisitor::visitVariable)
 * @param queryString the string to parse
 * @param <T> The type the query is expected to parse to
 * @param <S> The type of the parse rule being used
 * @return the parsed result
 */
private <T, S extends ParseTree> T parseQueryFragment(Function<GraqlParser, S> parseRule,
        BiFunction<QueryVisitor, S, T> visit, String queryString) {
    GraqlLexer lexer = getLexer(queryString);

    GraqlErrorListener errorListener = new GraqlErrorListener(queryString);
    lexer.removeErrorListeners();
    lexer.addErrorListener(errorListener);

    CommonTokenStream tokens = new CommonTokenStream(lexer);

    return parseQueryFragment(parseRule, visit, errorListener, tokens);
}

From source file:io.mxnet.caffetranslator.Converter.java

License:Apache License

public boolean parseTrainingPrototxt() {

    CharStream cs = null;//from   w w w . j  a v  a  2  s .c  o  m
    try {
        FileInputStream fis = new FileInputStream(new File(trainPrototxt));
        cs = CharStreams.fromStream(fis, StandardCharsets.UTF_8);
    } catch (IOException e) {
        System.err.println("Unable to read prototxt: " + trainPrototxt);
        return false;
    }

    CaffePrototxtLexer lexer = new CaffePrototxtLexer(cs);

    CommonTokenStream tokens = new CommonTokenStream(lexer);
    CaffePrototxtParser parser = new CaffePrototxtParser(tokens);

    CreateModelListener modelCreator = new CreateModelListener(parser, mlModel);
    parser.addParseListener(modelCreator);
    parser.prototxt();

    return true;
}

From source file:io.mxnet.caffetranslator.misc.CollectStats.java

License:Apache License

public static void main(String arsg[]) {
    String filePath = "path";

    CharStream cs = null;//from   w ww  .jav  a2s  .  co m
    try {
        FileInputStream fis = new FileInputStream(new File(filePath));
        cs = CharStreams.fromStream(fis, StandardCharsets.UTF_8);
    } catch (Exception e) {
        e.printStackTrace();
    }

    CaffePrototxtLexer lexer = new CaffePrototxtLexer(cs);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    CaffePrototxtParser parser = new CaffePrototxtParser(tokens);

    StatsListener statsListener = new StatsListener();
    parser.addParseListener(statsListener);
    parser.prototxt();

    Map<String, Set<String>> attrMap = statsListener.getAttrMap();

    Iterator it = attrMap.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<String, Set<String>> pair = (Map.Entry) it.next();
        System.out.println(pair.getKey() + ":");
        for (String value : pair.getValue()) {
            System.out.println("    " + value);
        }
    }
}

From source file:io.mxnet.caffetranslator.Solver.java

License:Apache License

public boolean parsePrototxt() {
    CharStream cs = null;//from  w ww  . j a v a2s . c o m
    try {
        FileInputStream fis = new FileInputStream(new File(solverPath));
        cs = CharStreams.fromStream(fis, StandardCharsets.UTF_8);
    } catch (IOException e) {
        System.err.println("Unable to read prototxt " + solverPath);
        return false;
    }

    CaffePrototxtLexer lexer = new CaffePrototxtLexer(cs);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    CaffePrototxtParser parser = new CaffePrototxtParser(tokens);

    SolverListener solverListener = new SolverListener();
    parser.addParseListener(solverListener);
    parser.solver();

    properties = solverListener.getProperties();

    setFields(properties);

    parseDone = true;
    return true;
}

From source file:io.prestosql.sql.parser.SqlParser.java

License:Apache License

private Node invokeParser(String name, String sql, Function<SqlBaseParser, ParserRuleContext> parseFunction,
        ParsingOptions parsingOptions) {
    try {// ww w  . jav a2  s .  com
        SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(CharStreams.fromString(sql)));
        CommonTokenStream tokenStream = new CommonTokenStream(lexer);
        SqlBaseParser parser = new SqlBaseParser(tokenStream);

        // Override the default error strategy to not attempt inserting or deleting a token.
        // Otherwise, it messes up error reporting
        parser.setErrorHandler(new DefaultErrorStrategy() {
            @Override
            public Token recoverInline(Parser recognizer) throws RecognitionException {
                if (nextTokensContext == null) {
                    throw new InputMismatchException(recognizer);
                } else {
                    throw new InputMismatchException(recognizer, nextTokensState, nextTokensContext);
                }
            }
        });

        parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames())));

        lexer.removeErrorListeners();
        lexer.addErrorListener(LEXER_ERROR_LISTENER);

        parser.removeErrorListeners();

        if (enhancedErrorHandlerEnabled) {
            parser.addErrorListener(PARSER_ERROR_HANDLER);
        } else {
            parser.addErrorListener(LEXER_ERROR_LISTENER);
        }

        ParserRuleContext tree;
        try {
            // first, try parsing with potentially faster SLL mode
            parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
            tree = parseFunction.apply(parser);
        } catch (ParseCancellationException ex) {
            // if we fail, parse with LL mode
            tokenStream.reset(); // rewind input stream
            parser.reset();

            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
            tree = parseFunction.apply(parser);
        }

        return new AstBuilder(parsingOptions).visit(tree);
    } catch (StackOverflowError e) {
        throw new ParsingException(name + " is too large (stack overflow while parsing)");
    }
}

From source file:io.proleap.cobol.asg.runner.impl.CobolParserRunnerImpl.java

License:Open Source License

protected void parsePreprocessInput(final String preProcessedInput, final String compilationUnitName,
        final Program program, final CobolParserParams params) throws IOException {
    // run the lexer
    final CobolLexer lexer = new CobolLexer(CharStreams.fromString(preProcessedInput));

    if (!params.getIgnoreSyntaxErrors()) {
        // register an error listener, so that preprocessing stops on errors
        lexer.removeErrorListeners();// w ww  .j a v  a2 s .c o  m
        lexer.addErrorListener(new ThrowingErrorListener());
    }

    // get a list of matched tokens
    final CommonTokenStream tokens = new CommonTokenStream(lexer);

    // pass the tokens to the parser
    final CobolParser parser = new CobolParser(tokens);

    if (!params.getIgnoreSyntaxErrors()) {
        // register an error listener, so that preprocessing stops on errors
        parser.removeErrorListeners();
        parser.addErrorListener(new ThrowingErrorListener());
    }

    // specify our entry point
    final StartRuleContext ctx = parser.startRule();

    // analyze contained compilation units
    final List<String> lines = splitLines(preProcessedInput);
    final ParserVisitor visitor = new CobolCompilationUnitVisitorImpl(compilationUnitName, lines, tokens,
            program);

    visitor.visit(ctx);
}

From source file:io.proleap.cobol.parser.runner.impl.CobolParserRunnerImpl.java

License:Open Source License

protected void parseFile(final File inputFile, final Program program, final CobolSourceFormat format,
        final CobolDialect dialect) throws IOException {
    if (isRelevant(inputFile)) {
        final File libDirectory = inputFile.getParentFile();

        // preprocess input stream
        final String preProcessedInput = CobolGrammarContext.getInstance().getCobolPreprocessor()
                .process(inputFile, libDirectory, format, dialect);

        LOG.info("Parsing file {}.", inputFile.getName());

        // run the lexer
        final Cobol85Lexer lexer = new Cobol85Lexer(new ANTLRInputStream(preProcessedInput));

        // get a list of matched tokens
        final CommonTokenStream tokens = new CommonTokenStream(lexer);

        // pass the tokens to the parser
        final Cobol85Parser parser = new Cobol85Parser(tokens);

        // specify our entry point
        final StartRuleContext ctx = parser.startRule();

        // determine the copy book name
        final String compilationUnitName = getCompilationUnitName(inputFile);

        // analyze contained copy books
        final ParserVisitor visitor = new CobolCompilationUnitVisitorImpl(program, compilationUnitName);

        LOG.info("Collecting units in file {}.", inputFile.getName());
        visitor.visit(ctx);/*from   w  w  w  . ja  v  a  2 s.c o m*/
    } else {
        LOG.info("Ignoring file {}", inputFile.getAbsoluteFile());
    }
}