Example usage for org.antlr.v4.runtime CommonTokenStream fill

List of usage examples for org.antlr.v4.runtime CommonTokenStream fill

Introduction

In this page you can find the example usage for org.antlr.v4.runtime CommonTokenStream fill.

Prototype

public void fill() 

Source Link

Document

Get all tokens from lexer until EOF

Usage

From source file:SparqlMain.java

License:Apache License

/**
 *
 * @param args//from  w  w  w. j  a va 2s  .c  om
 */
public static void main(String args[]) throws Exception {

    System.out.println("Work on file " + args[0]);

    int lineWidth = 80;
    if (args.length >= 2) {
        lineWidth = Integer.parseInt(args[1]);
    }

    SparqlLexer lex = null;
    try {
        lex = new SparqlLexer(new ANTLRFileStream(args[0]));
    } catch (IOException ex) {
        Logger.getLogger(SparqlMain.class.getName()).log(Level.SEVERE, null, ex);
    }
    CommonTokenStream tokens = new CommonTokenStream(lex);

    System.out.println("Tokens: -------------------------------");

    tokens.fill();
    System.out.println("Number of tokens " + tokens.getTokens().size());

    List tokenList = tokens.getTokens();

    System.out.println("TokenList: -------------------------------");
    Iterator it = tokenList.iterator();
    while (it.hasNext()) {
        Token t = (Token) it.next();
        System.out.println(t.toString());
    }
    System.out.flush();

    System.out.println("Input from token list: -------------------------------");

    it = tokenList.iterator();
    while (it.hasNext()) {
        Token t = (Token) it.next();
        if (t.getType() != SparqlParser.EOF) {
            if (t.getType() == SparqlParser.WS || t.getType() == SparqlParser.COMMENT) {
                String s = t.getText();
                s = s.replace("\r\n", "\n");
                if (!System.lineSeparator().equals("\n")) {
                    s = s.replace("\n", System.lineSeparator());
                }
                System.out.print(s);
            } else {
                System.out.print(t.getText());
            }
        }
    }
    System.out.flush();

    SparqlParser parser = new SparqlParser(tokens);
    parser.setBuildParseTree(true);

    System.out.println("Start parsing: -------------------------------");
    System.out.flush();

    ParserRuleContext t = parser.query();

    System.out.flush();
    System.out.println("Parse tree: -------------------------------");
    System.out.println(t.toStringTree(parser));

    // visualize parse tree in dialog box 
    t.inspect(parser);

    if (parser.getNumberOfSyntaxErrors() <= 0) {

        //ParseTreeWalker walker = new ParseTreeWalker();

        String groupFile = "ident.stg";
        if (args.length > 1) {
            groupFile = args[1];
        }
        System.out.println("Read StringTemplate Group File: " + groupFile + "-------------------------------");

        STGroup g = new STGroupFile(groupFile);
        IdentVisitor visitor = new IdentVisitor();
        visitor.setSTGroup(g);
        ST query = visitor.visit(t);

        System.out.println("Emit reformatted query: -------------------------------");

        System.out.println(query.render(lineWidth));

        System.out.println("Emit original query: -------------------------------");

        String q = query.render(lineWidth);

        /* get common token stream */
        File tmpFile = File.createTempFile("query_", ".rq");
        FileOutputStream fo = new FileOutputStream(tmpFile);
        OutputStreamWriter ow = new OutputStreamWriter(fo, "UTF8");
        ow.write(q);
        ow.close();
        /* transformation pipline
         * step 1: Unicode pre-processing
         * step 2: Lexical analysis
         */
        lex = new SparqlLexer(new ANTLRFileStream(tmpFile.getCanonicalPath(), "UTF8"));
        tokens = new CommonTokenStream(lex);

        List formattedTokenList = tokens.getTokens();

        it = tokenList.iterator();
        Iterator fit = formattedTokenList.iterator();

        boolean lineSeparatorHasToBeModified = !System.lineSeparator().equals("\n");

        while (it.hasNext()) {
            Token originalToken = (Token) it.next();
            if (originalToken.getType() != SparqlParser.EOF) {
                if (originalToken.getType() == SparqlParser.WS
                        || originalToken.getType() == SparqlParser.COMMENT) {
                    String s = originalToken.getText();
                    s = s.replace("\r\n", "\n");
                    if (lineSeparatorHasToBeModified) {
                        s = s.replace("\n", System.lineSeparator());
                    }
                    System.out.print(s);
                } else {
                    System.out.print(originalToken.getText());
                }
            }
        }
        System.out.flush();

    }
    System.out.println("-------------------------------");
    System.out.println("Number of errors encountered: " + parser.getNumberOfSyntaxErrors());
}

From source file:com.espertech.esper.core.deploy.EPLModuleUtil.java

License:Open Source License

public static ParseNode getModule(EPLModuleParseItem item, String resourceName)
        throws ParseException, IOException {
    CharStream input = new NoCaseSensitiveStream(new StringReader(item.getExpression()));

    EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input);
    CommonTokenStream tokenStream = new CommonTokenStream(lex);
    tokenStream.fill();

    List tokens = tokenStream.getTokens();
    int beginIndex = 0;
    boolean isMeta = false;
    boolean isModule = false;
    boolean isUses = false;
    boolean isExpression = false;

    while (beginIndex < tokens.size()) {
        Token t = (Token) tokens.get(beginIndex);
        if (t.getType() == EsperEPL2GrammarParser.EOF) {
            break;
        }//from  w w w  .  j  a v a 2  s .c  o  m
        if ((t.getType() == EsperEPL2GrammarParser.WS) || (t.getType() == EsperEPL2GrammarParser.SL_COMMENT)
                || (t.getType() == EsperEPL2GrammarParser.ML_COMMENT)) {
            beginIndex++;
            continue;
        }
        String tokenText = t.getText().trim().toLowerCase();
        if (tokenText.equals("module")) {
            isModule = true;
            isMeta = true;
        } else if (tokenText.equals("uses")) {
            isUses = true;
            isMeta = true;
        } else if (tokenText.equals("import")) {
            isMeta = true;
        } else {
            isExpression = true;
            break;
        }
        beginIndex++;
        beginIndex++; // skip space
        break;
    }

    if (isExpression) {
        return new ParseNodeExpression(item);
    }
    if (!isMeta) {
        return new ParseNodeComment(item);
    }

    // check meta tag (module, uses, import)
    StringWriter buffer = new StringWriter();
    for (int i = beginIndex; i < tokens.size(); i++) {
        Token t = (Token) tokens.get(i);
        if (t.getType() == EsperEPL2GrammarParser.EOF) {
            break;
        }
        if ((t.getType() != EsperEPL2GrammarParser.IDENT) && (t.getType() != EsperEPL2GrammarParser.DOT)
                && (t.getType() != EsperEPL2GrammarParser.STAR) && (!t.getText().matches("[a-zA-Z]*"))) {
            throw getMessage(isModule, isUses, resourceName, t.getType());
        }
        buffer.append(t.getText().trim());
    }

    String result = buffer.toString().trim();
    if (result.length() == 0) {
        throw getMessage(isModule, isUses, resourceName, -1);
    }

    if (isModule) {
        return new ParseNodeModule(item, result);
    } else if (isUses) {
        return new ParseNodeUses(item, result);
    }
    return new ParseNodeImport(item, result);
}

From source file:com.espertech.esper.core.deploy.EPLModuleUtil.java

License:Open Source License

public static List<EPLModuleParseItem> parse(String module) throws ParseException {

    CharStream input;/*w ww  .  j av  a 2  s  . c  om*/
    try {
        input = new NoCaseSensitiveStream(new StringReader(module));
    } catch (IOException ex) {
        log.error("Exception reading module expression: " + ex.getMessage(), ex);
        return null;
    }

    EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lex);
    try {
        tokens.fill();
    } catch (RuntimeException ex) {
        String message = "Unexpected exception recognizing module text";
        if (ex instanceof LexerNoViableAltException) {
            if (ParseHelper.hasControlCharacters(module)) {
                message = "Unrecognized control characters found in text, failed to parse text";
            } else {
                message += ", recognition failed for " + ex.toString();
            }
        } else if (ex instanceof RecognitionException) {
            RecognitionException recog = (RecognitionException) ex;
            message += ", recognition failed for " + recog.toString();
        } else if (ex.getMessage() != null) {
            message += ": " + ex.getMessage();
        }
        message += " [" + module + "]";
        log.error(message, ex);
        throw new ParseException(message);
    }

    List<EPLModuleParseItem> statements = new ArrayList<EPLModuleParseItem>();
    StringWriter current = new StringWriter();
    Integer lineNum = null;
    int charPosStart = 0;
    int charPos = 0;
    List<Token> tokenList = tokens.getTokens();
    Set<Integer> skippedSemicolonIndexes = getSkippedSemicolons(tokenList);
    int index = -1;
    for (Object token : tokenList) // Call getTokens first before invoking tokens.size! ANTLR problem
    {
        index++;
        Token t = (Token) token;
        boolean semi = t.getType() == EsperEPL2GrammarLexer.SEMI && !skippedSemicolonIndexes.contains(index);
        if (semi) {
            if (current.toString().trim().length() > 0) {
                statements.add(new EPLModuleParseItem(current.toString().trim(), lineNum == null ? 0 : lineNum,
                        charPosStart, charPos));
                lineNum = null;
            }
            current = new StringWriter();
        } else {
            if ((lineNum == null) && (t.getType() != EsperEPL2GrammarParser.WS)) {
                lineNum = t.getLine();
                charPosStart = charPos;
            }
            if (t.getType() != EsperEPL2GrammarLexer.EOF) {
                current.append(t.getText());
                charPos += t.getText().length();
            }
        }
    }

    if (current.toString().trim().length() > 0) {
        statements.add(new EPLModuleParseItem(current.toString().trim(), lineNum == null ? 0 : lineNum, 0, 0));
    }
    return statements;
}

From source file:com.espertech.esper.epl.db.DatabasePollingViewableFactory.java

License:Open Source License

/**
 * Lexes the sample SQL and inserts a "where 1=0" where-clause.
 * @param querySQL to inspect using lexer
 * @return sample SQL with where-clause inserted
 * @throws ExprValidationException to indicate a lexer problem
 *///from   w ww  .  ja  v  a  2s.  c o  m
protected static String lexSampleSQL(String querySQL) throws ExprValidationException {
    querySQL = querySQL.replaceAll("\\s\\s+|\\n|\\r", " ");
    StringReader reader = new StringReader(querySQL);
    CharStream input;
    try {
        input = new NoCaseSensitiveStream(reader);
    } catch (IOException ex) {
        throw new ExprValidationException("IOException lexing query SQL '" + querySQL + '\'', ex);
    }

    int whereIndex = -1;
    int groupbyIndex = -1;
    int havingIndex = -1;
    int orderByIndex = -1;
    List<Integer> unionIndexes = new ArrayList<Integer>();

    EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lex);
    tokens.fill();
    List tokenList = tokens.getTokens();

    for (int i = 0; i < tokenList.size(); i++) {
        Token token = (Token) tokenList.get(i);
        if ((token == null) || token.getText() == null) {
            break;
        }
        String text = token.getText().toLowerCase().trim();
        if (text.equals("where")) {
            whereIndex = token.getCharPositionInLine() + 1;
        }
        if (text.equals("group")) {
            groupbyIndex = token.getCharPositionInLine() + 1;
        }
        if (text.equals("having")) {
            havingIndex = token.getCharPositionInLine() + 1;
        }
        if (text.equals("order")) {
            orderByIndex = token.getCharPositionInLine() + 1;
        }
        if (text.equals("union")) {
            unionIndexes.add(token.getCharPositionInLine() + 1);
        }
    }

    // If we have a union, break string into subselects and process each
    if (unionIndexes.size() != 0) {
        StringWriter changedSQL = new StringWriter();
        int lastIndex = 0;
        for (int i = 0; i < unionIndexes.size(); i++) {
            int index = unionIndexes.get(i);
            String fragment;
            if (i > 0) {
                fragment = querySQL.substring(lastIndex + 5, index - 1);
            } else {
                fragment = querySQL.substring(lastIndex, index - 1);
            }
            String lexedFragment = lexSampleSQL(fragment);

            if (i > 0) {
                changedSQL.append("union ");
            }
            changedSQL.append(lexedFragment);
            lastIndex = index - 1;
        }

        // last part after last union
        String fragment = querySQL.substring(lastIndex + 5, querySQL.length());
        String lexedFragment = lexSampleSQL(fragment);
        changedSQL.append("union ");
        changedSQL.append(lexedFragment);

        return changedSQL.toString();
    }

    // Found a where clause, simplest cases
    if (whereIndex != -1) {
        StringWriter changedSQL = new StringWriter();
        String prefix = querySQL.substring(0, whereIndex + 5);
        String suffix = querySQL.substring(whereIndex + 5, querySQL.length());
        changedSQL.write(prefix);
        changedSQL.write("1=0 and ");
        changedSQL.write(suffix);
        return changedSQL.toString();
    }

    // No where clause, find group-by
    int insertIndex;
    if (groupbyIndex != -1) {
        insertIndex = groupbyIndex;
    } else if (havingIndex != -1) {
        insertIndex = havingIndex;
    } else if (orderByIndex != -1) {
        insertIndex = orderByIndex;
    } else {
        StringWriter changedSQL = new StringWriter();
        changedSQL.write(querySQL);
        changedSQL.write(" where 1=0 ");
        return changedSQL.toString();
    }

    try {
        StringWriter changedSQL = new StringWriter();
        String prefix = querySQL.substring(0, insertIndex - 1);
        changedSQL.write(prefix);
        changedSQL.write("where 1=0 ");
        String suffix = querySQL.substring(insertIndex - 1, querySQL.length());
        changedSQL.write(suffix);
        return changedSQL.toString();
    } catch (Exception ex) {
        String text = "Error constructing sample SQL to retrieve metadata for JDBC-drivers that don't support metadata, consider using the "
                + SAMPLE_WHERECLAUSE_PLACEHOLDER + " placeholder or providing a sample SQL";
        log.error(text, ex);
        throw new ExprValidationException(text, ex);
    }
}

From source file:com.espertech.esper.epl.parse.ParseHelper.java

License:Open Source License

/**
 * Parse expression using the rule the ParseRuleSelector instance supplies.
 *
 * @param expression           - text to parse
 * @param parseRuleSelector    - parse rule to select
 * @param addPleaseCheck       - true to include depth paraphrase
 * @param eplStatementErrorMsg - text for error
 * @return AST - syntax tree/*from  w ww.  j  a v  a2  s .  c o m*/
 * @throws EPException when the AST could not be parsed
 */
public static ParseResult parse(String expression, String eplStatementErrorMsg, boolean addPleaseCheck,
        ParseRuleSelector parseRuleSelector, boolean rewriteScript) throws EPException {
    if (log.isDebugEnabled()) {
        log.debug(".parse Parsing expr=" + expression);
    }

    CharStream input;
    try {
        input = new NoCaseSensitiveStream(new StringReader(expression));
    } catch (IOException ex) {
        throw new EPException("IOException parsing expression '" + expression + '\'', ex);
    }

    EsperEPL2GrammarLexer lex = newLexer(input);

    CommonTokenStream tokens = new CommonTokenStream(lex);
    EsperEPL2GrammarParser parser = ParseHelper.newParser(tokens);

    Tree tree;
    try {
        tree = parseRuleSelector.invokeParseRule(parser);
    } catch (RecognitionException ex) {
        tokens.fill();
        if (rewriteScript && isContainsScriptExpression(tokens)) {
            return handleScriptRewrite(tokens, eplStatementErrorMsg, addPleaseCheck, parseRuleSelector);
        }
        log.debug("Error parsing statement [" + expression + "]", ex);
        throw ExceptionConvertor.convertStatement(ex, eplStatementErrorMsg, addPleaseCheck, parser);
    } catch (RuntimeException e) {
        try {
            tokens.fill();
        } catch (RuntimeException ex) {
            log.debug("Token-fill produced exception: " + e.getMessage(), e);
        }
        if (log.isDebugEnabled()) {
            log.debug("Error parsing statement [" + eplStatementErrorMsg + "]", e);
        }
        if (e.getCause() instanceof RecognitionException) {
            if (rewriteScript && isContainsScriptExpression(tokens)) {
                return handleScriptRewrite(tokens, eplStatementErrorMsg, addPleaseCheck, parseRuleSelector);
            }
            throw ExceptionConvertor.convertStatement((RecognitionException) e.getCause(), eplStatementErrorMsg,
                    addPleaseCheck, parser);
        } else {
            throw e;
        }
    }

    // if we are re-writing scripts and contain a script, then rewrite
    if (rewriteScript && isContainsScriptExpression(tokens)) {
        return handleScriptRewrite(tokens, eplStatementErrorMsg, addPleaseCheck, parseRuleSelector);
    }

    if (log.isDebugEnabled()) {
        log.debug(".parse Dumping AST...");
        ASTUtil.dumpAST(tree);
    }

    String expressionWithoutAnnotation = expression;
    if (tree instanceof EsperEPL2GrammarParser.StartEPLExpressionRuleContext) {
        EsperEPL2GrammarParser.StartEPLExpressionRuleContext epl = (EsperEPL2GrammarParser.StartEPLExpressionRuleContext) tree;
        expressionWithoutAnnotation = getNoAnnotation(expression, epl.annotationEnum(), tokens);
    } else if (tree instanceof EsperEPL2GrammarParser.StartPatternExpressionRuleContext) {
        EsperEPL2GrammarParser.StartPatternExpressionRuleContext pattern = (EsperEPL2GrammarParser.StartPatternExpressionRuleContext) tree;
        expressionWithoutAnnotation = getNoAnnotation(expression, pattern.annotationEnum(), tokens);
    }

    return new ParseResult(tree, expressionWithoutAnnotation, tokens, Collections.<String>emptyList());
}

From source file:com.espertech.esper.event.property.PropertyParser.java

License:Open Source License

/**
 * Parses a given property name returning an AST.
 * @param propertyName to parse//w w w  .jav  a  2 s.co  m
 * @return AST syntax tree
 */
public static EsperEPL2GrammarParser.StartEventPropertyRuleContext parse(String propertyName) {
    CharStream input;
    try {
        input = new NoCaseSensitiveStream(new StringReader(propertyName));
    } catch (IOException ex) {
        throw new PropertyAccessException("IOException parsing property name '" + propertyName + '\'', ex);
    }

    EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lex);
    try {
        tokens.fill();
    } catch (RuntimeException e) {
        if (ParseHelper.hasControlCharacters(propertyName)) {
            throw new PropertyAccessException("Unrecognized control characters found in text");
        }
        throw new PropertyAccessException("Failed to parse text: " + e.getMessage());
    }

    EsperEPL2GrammarParser g = ParseHelper.newParser(tokens);
    EsperEPL2GrammarParser.StartEventPropertyRuleContext r;

    try {
        r = g.startEventPropertyRule();
    } catch (RecognitionException e) {
        return handleRecognitionEx(e, tokens, propertyName, g);
    } catch (RuntimeException e) {
        if (log.isDebugEnabled()) {
            log.debug("Error parsing property expression [" + propertyName + "]", e);
        }
        if (e.getCause() instanceof RecognitionException) {
            return handleRecognitionEx((RecognitionException) e.getCause(), tokens, propertyName, g);
        } else {
            throw e;
        }
    }

    return r;
}

From source file:com.fizzed.rocker.compiler.TemplateParser.java

License:Apache License

private TemplateModel parse(ANTLRInputStream input, String packageName, String templateName, long modifiedAt)
        throws ParserException {
    // construct path for more helpful error messages
    String templatePath = packageName.replace(".", File.separator) + "/" + templateName;

    // get our lexer
    log.trace("Lexer for input stream");
    RockerLexer lexer = new RockerLexer(input);
    lexer.removeErrorListeners();// w  w  w  . j  a  va  2  s  .co  m
    lexer.addErrorListener(new DescriptiveErrorListener());

    //
    // lexer
    //
    CommonTokenStream tokens = null;
    try {
        // get a list of matched tokens
        log.trace("Tokenizing lexer");
        tokens = new CommonTokenStream(lexer);
    } catch (ParserRuntimeException e) {
        throw unwrapParserRuntimeException(templatePath, e);
    }

    if (log.isTraceEnabled()) {
        // just for debugging lexer
        tokens.fill();
        for (Token token : tokens.getTokens()) {
            log.trace("{}", token);
        }
    }

    //
    // parser & new model
    //
    try {
        // pass the tokens to the parser
        log.trace("Parsing tokens");
        RockerParser parser = new RockerParser(tokens);
        parser.removeErrorListeners();
        parser.addErrorListener(new DescriptiveErrorListener());

        TemplateModel model = new TemplateModel(packageName, templateName, modifiedAt,
                configuration.getOptions().copy());

        // walk it and attach our listener
        TemplateParserListener listener = new TemplateParserListener(input, model, templatePath);
        ParseTreeWalker walker = new ParseTreeWalker();
        log.trace("Walking parse tree");
        walker.walk(listener, parser.template());

        if (model.getOptions().getCombineAdjacentPlain()) {
            combineAdjacentPlain(model);
        }

        // discard whitespace either globally or template-set or also fallsback
        // to the default per content-type
        if (model.getOptions().getDiscardLogicWhitespaceForContentType(model.getContentType())) {
            discardLogicWhitespace(model);
        }

        return model;
    } catch (ParserRuntimeException e) {
        throw unwrapParserRuntimeException(templatePath, e);
    }
}

From source file:com.github.jknack.css.CSS.java

License:Apache License

public StyleSheet parse(final String content) {
    CssLexer lexer = new CssLexer(new ANTLRInputStream(content));
    //    lexer.removeErrorListeners();
    if (!debug) {
        lexer.addErrorListener(new ErrorReporter());
    }/* w  ww  .ja  va  2s . co m*/
    CommonTokenStream tokens = new CommonTokenStream(lexer);

    tokens.fill();
    CssParser parser = new CssParser(tokens);
    //    parser.setErrorHandler(new BailErrorStrategy());
    //    parser.removeParseListeners();
    if (!debug) {
        parser.addErrorListener(new ErrorReporter());
    }
    if (debug) {
        for (Token tok : tokens.getTokens()) {
            CommonToken ct = (CommonToken) tok;
            String[] tokenNames = lexer.getTokenNames();
            int type = ct.getType();
            System.out.println((type > 0 ? tokenNames[type] : "EOF") + "(" + ct.getText() + ")");
        }
    }
    StyleSheetContext tree = parser.styleSheet();
    if (debug) {
        System.out.println(tree.toStringTree(parser));
    }
    CSSBuilder builder = new CSSBuilder();
    return (StyleSheet) builder.visit(tree);
}

From source file:com.sri.ai.praise.sgsolver.demo.editor.HOGMCodeArea.java

License:Open Source License

private static StyleSpans<Collection<String>> computeHighlighting(String text) {
    StyleSpansBuilder<Collection<String>> spansBuilder = new StyleSpansBuilder<>();
    int lastTokenEnd = 0;
    ANTLRInputStream input = new ANTLRInputStream(text);
    HOGMLexer lexer = new HOGMLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    tokens.fill();
    for (int i = 0; i < tokens.size(); i++) {
        Token t = tokens.get(i);/*from   ww  w .j a va2  s .  c  o m*/
        if (t.getType() == Token.EOF) {
            break;
        }
        String styleClass;
        if (t.getType() == HOGMLexer.COMMENT || t.getType() == HOGMLexer.LINE_COMMENT) {
            styleClass = "hogmCodeComment";
        } else if (HOGMTerminalSymbols.isTerminalSymbol(t.getText())) {
            styleClass = "hogmCodeKeyword";
        } else {
            styleClass = "hogmCodeOther";
        }
        int spacing = t.getStartIndex() - lastTokenEnd;
        if (spacing > 0) {
            spansBuilder.add(Collections.emptyList(), spacing);
        }
        int stylesize = (t.getStopIndex() - t.getStartIndex()) + 1;
        spansBuilder.add(Collections.singleton(styleClass), stylesize);
        lastTokenEnd = t.getStopIndex() + 1;
    }

    return spansBuilder.create();
}

From source file:eagle.query.parser.EagleQueryParser.java

License:Apache License

public ORExpression parse() throws EagleQueryParseException {
    try {/*from ww w.  j  ava2  s  .co m*/
        EagleFilterLexer lexer = new EagleFilterLexer(new ANTLRInputStream(_query));
        CommonTokenStream tokens = new CommonTokenStream(lexer);
        tokens.fill();
        EagleFilterParser p = new EagleFilterParser(tokens);
        p.setErrorHandler(new EagleANTLRErrorStrategy());
        p.setBuildParseTree(true);
        EagleQueryFilterListenerImpl listener = new EagleQueryFilterListenerImpl();
        p.addParseListener(listener);
        EagleFilterParser.FilterContext fc = p.filter();
        if (fc.exception != null) {
            LOG.error("Can not successfully parse the query:" + _query, fc.exception);
            throw fc.exception;
        }
        return listener.result();
    } catch (Exception ex) {
        LOG.error("Can not successfully parse the query:", ex);
        throw new EagleQueryParseException("can not successfully parse the query:" + _query);
    }
}