List of usage examples for org.antlr.v4.runtime CommonTokenStream CommonTokenStream
public CommonTokenStream(TokenSource tokenSource)
From source file:nl.han.ica.ap.nlp.App.java
License:Open Source License
/** * Parses the input to an uml diagram xml file. * @param input The input to be parsed/* w ww . j a v a 2 s. c om*/ */ private void parseInput(ANTLRInputStream input) { NlpLexer lexer = new NlpLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); NlpParser parser = new NlpParser(tokens); // parser.setErrorHandler(new ErrorHandler()); controller = new TreeController(); parser.addErrorListener(controller); ParseTree tree = parser.tekst(); // begin parsing at init rule controller.walkTree(tree, parser, export); }
From source file:nl.han.ica.ap.nlp.util.CSVtoG4.java
License:Open Source License
public boolean export() { try {/*from ww w . j av a 2s . c o m*/ importfile.read(); } catch (FileNotFoundException e) { System.out.println("File not found"); e.printStackTrace(); } String csvfile = importfile.getContent(); ANTLRInputStream input = new ANTLRInputStream(csvfile); CSVtoG4Lexer lexer = new CSVtoG4Lexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); CSVtoG4Parser parser = new CSVtoG4Parser(tokens); ParseTreeWalker walker = new ParseTreeWalker(); ParseTree tree = parser.csv(); CSVRowListener listener = new CSVRowListener(parser); walker.walk(listener, tree); ArrayList<String> conjugations = listener.getConjugations(); String werkwoorden = "lexer grammar NlpWerkwoorden;\n\nWERKWOORD : ("; for (int i = 0; i < conjugations.size(); i++) { werkwoorden += "'" + conjugations.get(i) + "'|"; } werkwoorden = werkwoorden.substring(0, werkwoorden.length() - 1); werkwoorden += ");"; exportfile.setContent(werkwoorden); return exportfile.write(); }
From source file:nl.han.ica.ap.purify.App.java
License:Open Source License
public static void main(String[] args) { if (args.length < 1) { System.err.println(COMMAND_LINE_PARAM_MISSING); return;/*from w ww .j a va 2 s .co m*/ } graph = new CallGraph(); ClassNodeListener classNodelistener = new ClassNodeListener(graph); EdgeListener edgelistener = new EdgeListener(graph); ParseTreeWalker walker = new ParseTreeWalker(); List<SourceFile> sourceFiles = new ArrayList<SourceFile>(); List<IDetector> detectors = new ArrayList<IDetector>(); detectors.add(new DuplicatedCodeDetector()); detectors.add(new MagicNumberDetector()); detectors.add(new RemoveParameterDetector()); detectors.add(new UnusedMethodDetector()); List<ISolver> solvers = new ArrayList<ISolver>(); solvers.add(new MagicNumberSolver()); solvers.add(new RemoveParameterSolver()); solvers.add(new UnusedMethodSolver()); for (int i = 0; i < args.length; i++) { ANTLRInputStream input = null; InputStream is = null; if (args[i] != null) { try { is = new FileInputStream(args[i]); } catch (FileNotFoundException e) { e.printStackTrace(); continue; } } try { input = new ANTLRInputStream(is); } catch (IOException e) { e.printStackTrace(); continue; } JavaLexer lexer = new JavaLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); JavaParser parser = new JavaParser(tokens); ParseTree tree = parser.compilationUnit(); SourceFile file = new SourceFile(args[i], tokens, tree); sourceFiles.add(file); } for (SourceFile file : sourceFiles) { classNodelistener.setCurrentSourceFile(file); walker.walk(classNodelistener, file.getParseTree()); } for (SourceFile file : sourceFiles) { edgelistener.setSourceFile(file); walker.walk(edgelistener, file.getParseTree()); } for (SourceFile file : sourceFiles) { for (IDetector detector : detectors) { detector.analyze(file); } } for (IDetector detector : detectors) { detector.detect(); } for (SourceFile file : sourceFiles) { for (ISolver solver : solvers) { solver.solve(file); } } for (SourceFile file : sourceFiles) { System.out.println("--------- FILE: " + file.getPath() + " ---------"); for (int i = file.getIssuesSize() - 1; i >= 0; i--) { System.out.println(file.getIssue(i)); } } for (SourceFile file : sourceFiles) { System.out.println("====== " + file.getPath() + "====== "); System.out.println(file.getRewriter().getText()); } }
From source file:nl.han.ica.ap.purify.test.tools.ParserTools.java
License:Open Source License
/** * Parse a file and get the parse tree./*w ww . java2 s.c o m*/ * * @param filename File to parse. * @return Parse tree */ public static SourceFile getParseTreeSourceFile(String filename) { ANTLRInputStream input = null; InputStream is = null; is = ParserTools.class.getResourceAsStream(filename); try { input = new ANTLRInputStream(is); } catch (IOException e) { e.printStackTrace(); } JavaLexer lexer = new JavaLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); JavaParser parser = new JavaParser(tokens); SourceFile file = new SourceFile(filename, tokens, parser.compilationUnit()); return file; }
From source file:nl.knaw.huc.di.tag.tagml.importer.TAGMLImporter.java
License:Apache License
private TAGDocument importTAGML(CharStream antlrInputStream) throws TAGMLSyntaxError { TAGMLLexer lexer = new TAGMLLexer(antlrInputStream); ErrorListener errorListener = new ErrorListener(); lexer.addErrorListener(errorListener); CommonTokenStream tokens = new CommonTokenStream(lexer); TAGMLParser parser = new TAGMLParser(tokens); parser.addErrorListener(errorListener); TAGDocument document = usingListener(parser, errorListener); // DocumentWrapper documentWrapper = usingVisitor(parser, errorListener); int numberOfSyntaxErrors = parser.getNumberOfSyntaxErrors(); // LOG.info("parsed with {} parser syntax errors", numberOfSyntaxErrors); String errorMsg = ""; if (errorListener.hasErrors()) { // logDocumentGraph(document,""); String errors = String.join("\n", errorListener.getErrors()); errorMsg = "Parsing errors:\n" + errors; throw new TAGMLSyntaxError(errorMsg); }// ww w .j a va 2s .co m update(document.getDTO()); return document; }
From source file:nl.knaw.huygens.alexandria.query.AlexandriaQueryParser.java
License:Open Source License
List<WhereToken> tokenize(String whereString) { Log.info("whereString=<{}>", whereString); if (StringUtils.isEmpty(whereString)) { // parseErrors.add("empty or missing where"); return Lists.newArrayList(); }/* ww w . j a va2 s . c om*/ QueryErrorListener errorListener = new QueryErrorListener(); CharStream stream = new ANTLRInputStream(whereString); AQLLexer lex = new AQLLexer(stream); lex.removeErrorListeners(); CommonTokenStream tokenStream = new CommonTokenStream(lex); AQLParser parser = new AQLParser(tokenStream); parser.removeErrorListeners(); parser.addErrorListener(errorListener); parser.setBuildParseTree(true); ParseTree tree = parser.root(); Log.info("tree={}", tree.toStringTree(parser)); if (errorListener.heardErrors()) { parseErrors.addAll(errorListener.getParseErrors().stream()// .map(AlexandriaQueryParser::clarifyParseError)// .collect(toList())); return Lists.newArrayList(); } QueryVisitor visitor = new QueryVisitor(); visitor.visit(tree); parseErrors.addAll(errorListener.getParseErrors()); return visitor.getWhereTokens(); }
From source file:nl.knaw.huygens.alexandria.query.TAGQLQueryHandler.java
License:Apache License
public TAGQLResult execute(String statement) { CharStream stream = CharStreams.fromString(statement); ErrorListener errorListener = new ErrorListener(); TAGQLLexer lexer = new TAGQLLexer(stream); lexer.addErrorListener(errorListener); CommonTokenStream tokens = new CommonTokenStream(lexer); TAGQLParser tagqlParser = new TAGQLParser(tokens); tagqlParser.addErrorListener(errorListener); ParseTree parseTree = tagqlParser.query(); ParseTreeWalker parseTreeWalker = new ParseTreeWalker(); TAGQLQueryListener listener = new TAGQLQueryListener(document); parseTreeWalker.walk(listener, parseTree); List<TAGQLStatement> statements = listener.getStatements(); TAGQLResult result = new TAGQLResult(statement); statements.stream()// .map(this::execute)// .forEach(result::addResult); result.getErrors().addAll(errorListener.getErrors()); return result; }
From source file:nl.knaw.huygens.alexandria.texmecs.importer.TexMECSImporter.java
License:Apache License
private TAGDocument importTexMECS(CharStream antlrInputStream) { TexMECSLexer lexer = new TexMECSLexer(antlrInputStream); ErrorListener errorListener = new ErrorListener(); lexer.addErrorListener(errorListener); CommonTokenStream tokens = new CommonTokenStream(lexer); TexMECSParser parser = new TexMECSParser(tokens); parser.addErrorListener(errorListener); parser.setBuildParseTree(true);/* ww w . j a v a 2 s. c o m*/ ParseTree parseTree = parser.document(); int numberOfSyntaxErrors = parser.getNumberOfSyntaxErrors(); LOG.debug("parsed with {} syntax errors", numberOfSyntaxErrors); ParseTreeWalker parseTreeWalker = new ParseTreeWalker(); TexMECSListener listener = new TexMECSListener(store); parseTreeWalker.walk(listener, parseTree); TAGDocument document = listener.getDocument(); handleMarkupDominance(document); String errorMsg = ""; if (listener.hasErrors()) { String errors = String.join("\n", listener.getErrors()); errorMsg = "Parsing errors:\n" + errors; } if (numberOfSyntaxErrors > 0) { String errors = String.join("\n", errorListener.getErrors()); errorMsg += "\n\nTokenizing errors:\n" + errors; } if (!errorMsg.isEmpty()) { throw new TexMECSSyntaxError(errorMsg); } return document; }
From source file:nl.knaw.huygens.alexandria.texmecs.importer.TexMECSImporterInMemory.java
License:Apache License
private Document importTexMECS(CharStream antlrInputStream) { TexMECSLexer lexer = new TexMECSLexer(antlrInputStream); ErrorListener errorListener = new ErrorListener(); lexer.addErrorListener(errorListener); CommonTokenStream tokens = new CommonTokenStream(lexer); TexMECSParser parser = new TexMECSParser(tokens); parser.addErrorListener(errorListener); parser.setBuildParseTree(true);/*w ww . j a v a 2s . c om*/ ParseTree parseTree = parser.document(); int numberOfSyntaxErrors = parser.getNumberOfSyntaxErrors(); LOG.debug("parsed with {} syntax errors", numberOfSyntaxErrors); ParseTreeWalker parseTreeWalker = new ParseTreeWalker(); TexMECSListenerInMemory listener = new TexMECSListenerInMemory(); parseTreeWalker.walk(listener, parseTree); Document document = listener.getDocument(); handleMarkupDominance(document.value()); String errorMsg = ""; if (listener.hasErrors()) { String errors = String.join("\n", listener.getErrors()); errorMsg = "Parsing errors:\n" + errors; } if (numberOfSyntaxErrors > 0) { String errors = String.join("\n", errorListener.getErrors()); errorMsg += "\n\nTokenizing errors:\n" + errors; } if (!errorMsg.isEmpty()) { throw new TexMECSSyntaxError(errorMsg); } return document; }
From source file:nl.lxtreme.libtdl.grammar.TdlFactory.java
License:Apache License
/** * Directly creates a parse tree from the given input using the given * configuration.//from w ww .jav a2 s . c o m * * @param config * the configuration to use, cannot be <code>null</code>; * @param input * the input to parse, cannot be <code>null</code>. * @return a {@link ParseTree}, never <code>null</code>. */ public static ParseTree createParseTree(TdlConfig config, String input) { Lexer lexer = createLexer(config); lexer.setInputStream(new ANTLRInputStream(input)); Parser parser = createParser(config); parser.setInputStream(new CommonTokenStream(lexer)); TdlDialect dialect = config.getDialect(); switch (dialect) { case BASIC: return ((BasicTdlParser) parser).prog(); case ADVANCED: return ((AdvTdlParser) parser).prog(); default: throw new RuntimeException("Invalid/unknown dialect: " + dialect); } }