List of usage examples for org.antlr.v4.runtime Token getText
String getText();
From source file:org.netbeans.gnu.buildsystem.automake.lexer.AMLexer.java
License:Open Source License
@Override public org.netbeans.api.lexer.Token<AMTokenId> nextToken() { Token token = lexer.nextToken(); logger.fine((token == null) ? "token: null" : token.getText() + ":" + token.getType()); if (token == null) { throw new IllegalStateException("Token unexpectedly null."); }//from w w w .java2 s . c o m if (token.getType() == automakeLexer.EOF) { return null; } return info.tokenFactory().createToken(AMLanguageHierarchy.getToken(token.getType())); }
From source file:org.opencypher.tools.g4processors.BNFListener.java
License:Apache License
private String findHiddenTextAfter(ParserRuleContext ctx) { Token endCtx = ctx.getStop();//from w w w .j a va2s. co m int i = endCtx.getTokenIndex(); List<Token> normalTextChannel = tokens.getHiddenTokensToRight(i, BNFLexer.HIDDEN); if (normalTextChannel != null) { // the quasi-comment (description) may be the end of a rule or start of the next. separation is on // a blank line int nextLine = endCtx.getLine() + 1; List<String> content = new ArrayList<>(); for (Token lineToken : normalTextChannel) { if (lineToken.getLine() == nextLine) { content.add(lineToken.getText().replaceFirst("// ?", "")); nextLine++; } else { break; } } return content.stream().collect(Collectors.joining("\n")); } return ""; }
From source file:org.openehr.adl.parser.tree.AdlTreeParserUtils.java
License:Open Source License
@Nullable static String collectText(Token token) { if (token == null) return null; if (token.getType() == adlLexer.STRING) { return unescapeString(token.getText()); }// w w w.j av a 2s . c om return token.getText(); }
From source file:org.osate.ba.parser.AadlAntlrErrorReporter.java
License:Open Source License
protected boolean isKeywordError(Object offendingSymbol, RecognitionException ex) { Token symb = (Token) offendingSymbol; if (symb.getType() <= AadlBaLexer.KEYWORD_MAX_ID) // Select only keyword. {/*from w w w.j a v a2s. co m*/ String symbol = '\'' + symb.getText() + '\''; return Aadl2Utils.contains(symbol, Arrays.asList(AadlBaLexer.tokenNames)); } else { return false; } }
From source file:org.osate.ba.parser.AadlBaParserVisitor.java
License:Open Source License
/** * Sets obj's location reference based on full token informations. * * @param obj the AObject to be set// ww w . ja va 2s .c o m * @param src the token */ protected void setLocationReference(AObject obj, Token token) { int offset = ((CommonToken) token).getStartIndex(); int length = token.getText().length(); int column = token.getCharPositionInLine() + 1; // Zero index based. int line = token.getLine(); AadlBaLocationReference location = new AadlBaLocationReference(_annexOffset, _filename, line, offset, length, column, behaviorElementId); obj.setLocationReference(location); }
From source file:org.osate.ba.texteditor.XtextAadlBaHighlighter.java
License:Open Source License
@Override public void addToHighlighting(int annexOffset, Token token, String id) { int offset = ((CommonToken) token).getStartIndex(); int length = token.getText().length(); int column = token.getCharPositionInLine(); _elementToHighlight.add(new AadlBaLocationReference(annexOffset, offset, length, column, id)); }
From source file:org.pshdl.model.parser.SourceInfo.java
License:Open Source License
public SourceInfo(BufferedTokenStream tokens, ParserRuleContext context) { this.context = context; this.startLine = context.start.getLine(); this.totalOffset = context.start.getStartIndex(); this.startPosInLine = context.start.getCharPositionInLine(); if (context.stop != null) { this.endLine = context.stop.getLine(); this.endPosInLine = context.stop.getCharPositionInLine(); } else {/* w ww . j a va 2 s . co m*/ this.endLine = startLine; this.endPosInLine = startPosInLine; } if (tokens != null) { this.length = tokens.getText(context.getSourceInterval()).length(); final List<Token> hidden = tokens.getHiddenTokensToLeft(context.start.getTokenIndex(), PSHDLLangLexer.COMMENTS); if (hidden != null) { for (final Token token : hidden) { comments.add(token.getText()); } } } else { this.length = -1; } }
From source file:org.qcert.sql.PrestoEncoder.java
License:Apache License
/** * Apply necessary fixups at the lexical level (needed to get the query to even be parsed by presto-parser). * 1. Convert occurances of 'NN [days|months|years]' to 'interval NN [day|month|year]' (needed by many TPC-DS queries). * 2. Remove parenthesized numeric field after an interval unit field (needed to run TPC-H query 1). * 3. Remove parenthesized name list in 'create view NAME (...) as' and relocate the names into the body of the statement (needed to run TPC-H query 15). * 4. On a 'create table' (schema), remove occurances of NOT NULL, which presto does not handle. * Fixup 3 is only partially lexical; the lexical phase remembers the names and a visitor updates the body after parsing. * @param query the original query// w ww . jav a 2 s. c o m * @param foundNames an initially empty list to which names found in fixup 3 may be added for later processing * @return the altered query */ private static String applyLexicalFixups(String query, List<String> foundNames) { if (VERBOSE_LEXICAL) { System.out.println("Before:"); System.out.println(query); } CharStream stream = new CaseInsensitiveStream(new ANTLRInputStream(query)); SqlBaseLexer lexer = new SqlBaseLexer(stream); StringBuilder buffer = new StringBuilder(); Token savedInteger = null; List<Token> savedWS = new ArrayList<>(); FixupState state = FixupState.OPEN; for (Token token : lexer.getAllTokens()) { /* The 'state' is used for fixups 2 and 3 */ switch (state) { case ELIDE1: state = FixupState.ELIDE2; continue; case ELIDE2: state = FixupState.OPEN; continue; case ELIDELIST: if (token.getType() == SqlBaseLexer.AS) { buffer.append(token.getText()); state = FixupState.OPEN; } else if (token.getType() != SqlBaseLexer.WS && !token.getText().equals(",") && !token.getText().equals(")")) { foundNames.add(token.getText()); } continue; case CREATE: buffer.append(token.getText()); if (token.getType() == SqlBaseLexer.VIEW) state = FixupState.VIEW; else if (token.getType() == SqlBaseLexer.TABLE) state = FixupState.TABLE; else if (token.getType() != SqlBaseLexer.WS) state = FixupState.OPEN; continue; case VIEW: if (token.getText().equals("(")) { state = FixupState.ELIDELIST; } else { buffer.append(token.getText()); if (token.getType() == SqlBaseLexer.AS) state = FixupState.OPEN; } continue; case TABLE: if (token.getType() != SqlBaseLexer.NOT && token.getType() != SqlBaseLexer.NULL) buffer.append(token.getText()); continue; case INTERVAL: buffer.append(token.getText()); if (getUnit(token.getText()) != null) state = FixupState.UNIT; continue; case UNIT: if (token.getText().equals("(")) { state = FixupState.ELIDE1; } else { buffer.append(token.getText()); if (token.getType() != SqlBaseLexer.WS) state = FixupState.OPEN; } continue; case OPEN: if (token.getType() == SqlBaseLexer.INTERVAL) { state = FixupState.INTERVAL; buffer.append(token.getText()); continue; } else if (token.getType() == SqlBaseLexer.CREATE) { state = FixupState.CREATE; buffer.append(token.getText()); continue; } // If 'open' and there is not a transition to another state, break the switch and try fixup 1. This should be the only break in the switch. break; } /* The 'savedInteger' is used for fixup 1 */ if (token.getType() == SqlBaseLexer.INTEGER_VALUE) savedInteger = token; else if (savedInteger != null) { String unit = getUnit(token.getText()); if (unit != null) { buffer.append("interval '").append(savedInteger.getText()).append("' ").append(unit); savedInteger = null; savedWS.clear(); } else if (token.getType() == SqlBaseLexer.WS) savedWS.add(token); else { buffer.append(savedInteger.getText()); for (Token ws : savedWS) buffer.append(ws.getText()); buffer.append(token.getText()); savedInteger = null; savedWS.clear(); } } else buffer.append(token.getText()); } if (savedInteger != null) buffer.append(savedInteger.getText()); query = buffer.toString(); if (VERBOSE_LEXICAL) { System.out.println("After:"); System.out.println(query); } return query; }
From source file:org.reaktivity.nukleus.maven.plugin.internal.AbstractMojo.java
License:Apache License
private AstSpecificationNode parseSpecification(String resourceName, URL resource) throws IOException { try (InputStream input = resource.openStream()) { ANTLRInputStream ais = new ANTLRInputStream(input); NukleusLexer lexer = new NukleusLexer(ais); CommonTokenStream tokens = new CommonTokenStream(lexer); NukleusParser parser = new NukleusParser(tokens); parser.setErrorHandler(new BailErrorStrategy()); SpecificationContext ctx = parser.specification(); return new AstParser().visitSpecification(ctx); } catch (ParseCancellationException ex) { Throwable cause = ex.getCause(); if (cause instanceof RecognitionException) { RecognitionException re = (RecognitionException) cause; Token token = re.getOffendingToken(); if (token != null) { String message = String.format("Parse failed in %s at %d:%d on \"%s\"", resourceName, token.getLine(), token.getCharPositionInLine(), token.getText()); getLog().error(message); }//from w w w . ja v a2s . c om } throw ex; } }
From source file:org.semanticwb.rdf.sparql.SparqlMain.java
License:Apache License
public static String getNodeText(Tree t, String ruleNames[]) { if (ruleNames != null) { if ((t instanceof RuleNode)) { int ruleIndex = ((RuleNode) t).getRuleContext().getRuleIndex(); String ruleName = (String) ruleNames[ruleIndex]; return "RuleNode:" + ruleName; }/*from w w w. ja v a 2 s. c o m*/ if ((t instanceof ErrorNode)) { return "ErrorNode:" + t.toString(); } if ((t instanceof TerminalNode)) { Token symbol = ((TerminalNode) t).getSymbol(); if (symbol != null) { String s = symbol.getText(); return "TerminalNode:" + s; } } } return null; }