List of usage examples for org.antlr.v4.runtime CommonTokenStream fill
public void fill()
From source file:edu.iastate.cs.boa.ui.errors.FetchCompilerError.java
License:Apache License
protected CommonTokenStream lex(final String input, final int[] ids, final String[] strings, final String[] errors) throws IOException { lexer = new BoaLexer(new ANTLRInputStream(new StringReader(input))); lexer.removeErrorListeners();//from ww w. ja va 2 s.co m lexer.addErrorListener(new BaseErrorListener() { @Override public void syntaxError(final Recognizer<?, ?> recognizer, final Object offendingSymbol, final int line, final int charPositionInLine, final String msg, final RecognitionException e) { error("lexer", (BoaLexer) recognizer, offendingSymbol, line, charPositionInLine, 1, msg, e); } }); final CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); return tokens; }
From source file:es.ucm.fdi.ac.parser.AntlrTokenizer.java
License:Open Source License
public void tokenize(String source, String sourceFile, PrintWriter out) { Writer debugWriter = null;/*from ww w . j a v a 2s .c o m*/ try { Lexer lexer = (Lexer) language.lexerConstructor.newInstance(new ANTLRInputStream(source)); final CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); if (log.isDebugEnabled()) { try { debugWriter = new BufferedWriter( new FileWriter(Files.createTempFile("tokens-" + NDC.get() + "-", ".txt").toFile())); } catch (IOException ioe) { log.warn("Could not create debugWriter", ioe); } } for (final Token tok : tokens.getTokens()) { out.print(tokenToString(tok)); if (log.isDebugEnabled()) { log.debug(tok); if (debugWriter != null) { debugWriter.write(tokenToString(tok)); } } } if (parse) { Parser parser = (Parser) language.parserConstructor.newInstance(tokens); parser.setErrorHandler(new BailErrorStrategy()); ParserRuleContext parserRuleContext = (ParserRuleContext) language.parserMethod.invoke(parser); if (log.isDebugEnabled()) { log.debug(Trees.toStringTree(parserRuleContext, parser)); } } } catch (Exception e) { e.printStackTrace(); throw new IllegalArgumentException("Bad token in source, or failed to parse", e); } finally { out.flush(); if (log.isDebugEnabled() && debugWriter != null) { try { debugWriter.close(); } catch (IOException ioe) { log.warn("Could not close debugWriter", ioe); } } } }
From source file:groovy.ui.text.SmartDocumentFilter.java
License:Apache License
private void parseDocument() throws BadLocationException { GroovyLangLexer lexer;// w w w. java 2 s. co m try { lexer = createLexer(styledDocument.getText(0, styledDocument.getLength())); } catch (IOException e) { e.printStackTrace(); return; } CommonTokenStream tokenStream = new CommonTokenStream(lexer); try { tokenStream.fill(); } catch (LexerNoViableAltException | GroovySyntaxError e) { // ignore return; } catch (Exception e) { e.printStackTrace(); return; } List<Token> tokenList = tokenStream.getTokens(); List<Token> tokenListToRender = findTokensToRender(tokenList); for (Token token : tokenListToRender) { int tokenType = token.getType(); // if (token instanceof CommonToken) { // System.out.println(((CommonToken) token).toString(lexer)); // } if (EOF == tokenType) { continue; } int tokenStartIndex = token.getStartIndex(); int tokenStopIndex = token.getStopIndex(); int tokenLength = tokenStopIndex - tokenStartIndex + 1; styledDocument.setCharacterAttributes(tokenStartIndex, tokenLength, findStyleByTokenType(tokenType), true); if (GStringBegin == tokenType || GStringPart == tokenType) { styledDocument.setCharacterAttributes(tokenStartIndex + tokenLength - 1, 1, defaultStyle, true); } } this.latestTokenList = tokenList; }
From source file:javasharp.SyntaxTreeXmlFilter.java
License:Open Source License
private JavaParser.CompilationUnitContext readSourceFile(File javaSourceFile) throws FileNotFoundException, IOException { Reader fileReader = new FileReader(javaSourceFile); ANTLRInputStream is = new ANTLRInputStream(fileReader); lexer.setInputStream(is);/*from ww w.jav a 2 s.co m*/ CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); parser.setTokenStream(tokens); JavaParser.CompilationUnitContext ctx = parser.compilationUnit(); return ctx; }
From source file:net.certiv.json.test.base.AbstractBase.java
License:Open Source License
public String lexSource(String source, boolean output, boolean style) { CommonTokenStream tokens = produceTokens(source); tokens.fill(); StringBuilder sb = new StringBuilder(); for (Token token : tokens.getTokens()) { ((JsonToken) token).toStringStyle(style); sb.append(token.toString());/*from w w w. j a va 2s.c o m*/ if (output) System.out.print(token.toString()); } return sb.toString(); }
From source file:org.elasticsearch.xpack.sql.parser.SqlParser.java
License:Open Source License
private <T> T invokeParser(String sql, List<SqlTypedParamValue> params, Function<SqlBaseParser, ParserRuleContext> parseFunction, BiFunction<AstBuilder, ParserRuleContext, T> visitor) { SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); lexer.removeErrorListeners();//from ww w . ja va2s. c om lexer.addErrorListener(ERROR_LISTENER); Map<Token, SqlTypedParamValue> paramTokens = new HashMap<>(); TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); SqlBaseParser parser = new SqlBaseParser(tokenStream); parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); parser.removeErrorListeners(); parser.addErrorListener(ERROR_LISTENER); parser.getInterpreter().setPredictionMode(PredictionMode.SLL); if (DEBUG) { debug(parser); tokenStream.fill(); for (Token t : tokenStream.getTokens()) { String symbolicName = SqlBaseLexer.VOCABULARY.getSymbolicName(t.getType()); String literalName = SqlBaseLexer.VOCABULARY.getLiteralName(t.getType()); log.info(format(Locale.ROOT, " %-15s '%s'", symbolicName == null ? literalName : symbolicName, t.getText())); } ; } ParserRuleContext tree = parseFunction.apply(parser); if (DEBUG) { log.info("Parse tree {} " + tree.toStringTree()); } return visitor.apply(new AstBuilder(paramTokens), tree); }
From source file:org.napile.asm.io.text.in.type.TypeNodeUtil.java
License:Apache License
public static TypeNode fromString(@NotNull String string) { TypeNodeLexer lexer = new TypeNodeLexer(null); TypeNodeParser parser = new TypeNodeParser(null); parser.setBuildParseTree(true);/*from ww w . java 2 s. com*/ lexer.setInputStream(new ANTLRInputStream(string)); CommonTokenStream tokenStream = new CommonTokenStream(lexer); tokenStream.fill(); parser.setTokenStream(tokenStream); TypeNodeParser.TypeNodeContext typeNode = parser.typeNode(); TypeNodeWorker worker = new TypeNodeWorker(tokenStream); typeNode.enterRule(worker); return worker.toType(); }
From source file:org.semanticwb.rdf.sparql.SparqlMain.java
License:Apache License
/** * * @param args//w w w .jav a 2 s .c o m */ public static void main(String args[]) throws Exception { // System.out.println("Work on file " + args[0]); int lineWidth = 80; if (args.length >= 2) { lineWidth = Integer.parseInt(args[1]); } SparqlLexer lex = null; try { String q = "PREFIX map: <http://datosabiertos.gob.mx/ontology/mapas.owl#>\n" + "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n" + "\n" + "SELECT \n" + " (CONCAT(?descripcionRecursoGeografico,\" (\",?tituloCapa,\")\") as ?titulo)\n" + " (CONCAT(\n" + " \"<h2>\",?titulo,\"</h2>\",\n" + " \"Estado:\",?estado,\"<br/>\",\n" + " \"Colonia:\",?colonia,\"<br/>\",\n" + " \"Calle:\",?calle,\"<br/>\",\n" + " \"CP:\",?cp,\"<br/>\",\n" + " \"(\",?latitud,\", \",?longitud,\")\"\n" + " ) as ?descripcion) \n" + " ?latitud \n" + " ?longitud \n" + "WHERE {\n" + " ?uri rdf:type map:RecursoGeografico .\n" + " ?uri map:capa ?capa .\n" + " ?capa map:titulo ?tituloCapa .\n" + " ?uri map:descripcionRecursoGeografico ?descripcionRecursoGeografico .\n" + " ?uri map:estado ?estado .\n" + " ?uri map:colonia ?colonia .\n" + " ?uri map:calle ?calle .\n" + " ?uri map:cp ?cp .\n" + " ?uri map:latitud ?latitud .\n" + " ?uri map:longitud ?longitud .\n" + " filter( (?latitud>\"19.2\"^^xsd:double) && (?latitud<\"19.3\"^^xsd:double) && (?longitud<\"-99.1\"^^xsd:double) && (?longitud>\"-99.2\"^^xsd:double) ) .\n" + "}\n" + "LIMIT 100"; //lex = new SparqlLexer(new ANTLRFileStream(args[0])); lex = new SparqlLexer(new ANTLRInputStream( "select (count(*) as ?c) ?s ?p ?o where {?s a ?o; hola:asd <http://sdf.ser:sadasd>. ?s ?p2 ?o2}")); //lex = new SparqlLexer(new ANTLRInputStream(q)); } catch (Exception ex) { Logger.getLogger(SparqlMain.class.getName()).log(Level.SEVERE, null, ex); } CommonTokenStream tokens = new CommonTokenStream(lex); System.out.println("Tokens: -------------------------------"); tokens.fill(); System.out.println("Number of tokens " + tokens.getTokens().size()); List tokenList = tokens.getTokens(); System.out.println("TokenList: -------------------------------"); Iterator it = tokenList.iterator(); while (it.hasNext()) { Token t = (Token) it.next(); System.out.println(t.toString()); } System.out.flush(); System.out.println("Input from token list: -------------------------------"); it = tokenList.iterator(); while (it.hasNext()) { Token t = (Token) it.next(); if (t.getType() != SparqlParser.EOF) { if (t.getType() == SparqlParser.WS || t.getType() == SparqlParser.COMMENT) { String s = t.getText(); s = s.replace("\r\n", "\n"); System.out.print(s); } else { System.out.print(t.getText()); } } } System.out.flush(); SparqlParser parser = new SparqlParser(tokens); parser.setBuildParseTree(true); System.out.println("Start parsing: -------------------------------"); System.out.flush(); ParserRuleContext t = parser.query(); System.out.flush(); System.out.println("Parse tree: -------------------------------"); System.out.println(t.toStringTree(parser)); int x = t.getRuleIndex(); String rnames[] = parser.getRuleNames(); getTreeText(t, rnames); //if(true)return; // visualize parse tree in dialog box t.inspect(parser); if (parser.getNumberOfSyntaxErrors() <= 0) { //ParseTreeWalker walker = new ParseTreeWalker(); String groupFile = "/programming/proys/SWB4/swb/SWBPlatform/src/org/semanticwb/rdf/sparql/ident.stg"; if (args.length > 1) { groupFile = args[1]; } System.out.println("Read StringTemplate Group File: " + groupFile + "-------------------------------"); STGroup g = new STGroupFile(groupFile); // IdentVisitor visitor = new IdentVisitor(); // visitor.setSTGroup(g); // ST query = visitor.visit(t); // // System.out.println("Emit reformatted query: -------------------------------"); // // System.out.println(query.render(lineWidth)); // // System.out.println("Emit original query: -------------------------------"); // // String q = query.render(lineWidth); // // /* get common token stream */ // File tmpFile = File.createTempFile("query_", ".rq"); // FileOutputStream fo = new FileOutputStream(tmpFile); // OutputStreamWriter ow = new OutputStreamWriter(fo, "UTF8"); // ow.write(q); // ow.close(); // /* transformation pipline // * step 1: Unicode pre-processing // * step 2: Lexical analysis // */ // lex = new SparqlLexer(new ANTLRFileStream(tmpFile.getCanonicalPath(), "UTF8")); tokens = new CommonTokenStream(lex); List formattedTokenList = tokens.getTokens(); it = tokenList.iterator(); Iterator fit = formattedTokenList.iterator(); while (it.hasNext()) { Token originalToken = (Token) it.next(); if (originalToken.getType() != SparqlParser.EOF) { if (originalToken.getType() == SparqlParser.WS || originalToken.getType() == SparqlParser.COMMENT) { String s = originalToken.getText(); s = s.replace("\r\n", "\n"); System.out.print(s); } else { System.out.print(originalToken.getText()); } } } System.out.flush(); } System.out.println("-------------------------------"); System.out.println("Number of errors encountered: " + parser.getNumberOfSyntaxErrors()); }
From source file:us.ihmc.idl.generator.IDLGenerator.java
License:Apache License
public static void printTokenStream(CommonTokenStream tokens) { tokens.fill(); for (int index = 0; index < tokens.size(); index++) { printToken(tokens, index, tokens.get(index)); // printToken(tokens, index, tokens.LA(index)); // System.out.println(tokens.LA(index)); // printToken(tokens, index, tokens.LT(index)); // printToken(tokens, index, tokens.LB(index)); }/*from w w w . ja v a 2s . co m*/ }