List of usage examples for org.antlr.v4.runtime Token getStopIndex
int getStopIndex();
From source file:org.eclipse.titan.designer.AST.LargeLocation.java
License:Open Source License
/** * Constructor for ANTLR v4 tokens// ww w . j av a 2 s .co m * @param aFile the parsed file * @param aStartToken the 1st token, its line and start position will be used for the location * NOTE: start position is the column index of the tokens 1st character. * Column index starts with 0. * @param aEndToken the last token, its end position will be used for the location. * NOTE: end position is the column index after the token's last character. */ public LargeLocation(IFile aFile, Token aStartToken, Token aEndToken) { super(aFile, aStartToken.getLine(), aStartToken.getStartIndex(), -1); endLine = -1; if (aEndToken != null) { setEndOffset(aEndToken.getStopIndex() + 1); endLine = aEndToken.getLine(); } }
From source file:org.eclipse.titan.designer.AST.Location.java
License:Open Source License
/** * Constructor for ANTLR v4 tokens//w w w.java2 s . c om * @param aFile the parsed file * @param aStartToken the 1st token, its line and start position will be used for the location * NOTE: start position is the column index of the tokens 1st character. * Column index starts with 0. * @param aEndToken the last token, its end position will be used for the location. * NOTE: end position is the column index after the token's last character. */ public Location(final IResource aFile, final Token aStartToken, final Token aEndToken) { setLocation(aFile, aStartToken.getLine(), aStartToken.getStartIndex(), aEndToken.getStopIndex() + 1); }
From source file:org.eclipse.titan.designer.AST.Location.java
License:Open Source License
/** * Sets the end offset with an ANTLR v4 end token * @param aEndToken the new end token//from w w w. ja v a 2 s . c o m */ public final void setEndOffset(final Token aEndToken) { this.setEndOffset(aEndToken.getStopIndex() + 1); // after the last character of the aEndToken }
From source file:org.eclipse.titan.designer.parsers.asn1parser.TokenWithIndexAndSubTokens.java
License:Open Source License
public TokenWithIndexAndSubTokens(Token tok) { super(tok);/*from w ww . j a v a 2 s. co m*/ tokenList = new ArrayList<Token>(); super.setStartIndex(tok.getStartIndex()); super.setStopIndex(tok.getStopIndex()); }
From source file:org.eclipse.titan.designer.parsers.ttcn3parser.ConditionalTransition.java
License:Open Source License
@Override public int fetch(int n) { if (fetchedEOF) { return 0; }//from w w w .java 2 s. c o m int i = 0; do { Token t; if (tokenStreamStack.isEmpty()) { t = getTokenSource().nextToken(); } else { t = tokenStreamStack.peek().getTokenSource().nextToken(); } if (t == null) { return 0; } int tokenType = t.getType(); if (tokenType == Ttcn3Lexer.PREPROCESSOR_DIRECTIVE) { lastPPDirectiveLocation = new Location(actualFile, t.getLine(), t.getStartIndex(), t.getStopIndex() + 1); // 1. the first # shall be discarded // 2. "\\\n" strings are removed, so multiline tokens, which are split by backslash are extracted to one line final String text = t.getText().substring(1).replace("\\\n", ""); Reader reader = new StringReader(text); CharStream charStream = new UnbufferedCharStream(reader); PreprocessorDirectiveLexer lexer = new PreprocessorDirectiveLexer(charStream); lexer.setTokenFactory(new PPDirectiveTokenFactory(true, t)); lexerListener = new PPListener(); lexer.removeErrorListeners(); lexer.addErrorListener(lexerListener); lexer.setLine(t.getLine()); lexer.setCharPositionInLine(t.getCharPositionInLine()); // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug. // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU // pr_PatternChunk[StringBuilder builder, boolean[] uni]: // $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341 // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer. final CommonTokenStream tokenStream = new CommonTokenStream(lexer); PreprocessorDirectiveParser localParser = new PreprocessorDirectiveParser(tokenStream); localParser.setBuildParseTree(false); parserListener = new PPListener(localParser); localParser.removeErrorListeners(); localParser.addErrorListener(parserListener); localParser.setIsActiveCode(condStateStack.isPassing()); localParser.setMacros(macros); localParser.setLine(t.getLine()); PreprocessorDirective ppDirective = null; ppDirective = localParser.pr_Directive().ppDirective; errorsStored.addAll(localParser.getErrorStorage()); warnings.addAll(localParser.getWarnings()); unsupportedConstructs.addAll(localParser.getUnsupportedConstructs()); if (ppDirective != null) { ppDirective.line = t.getLine(); if (ppDirective.isConditional()) { boolean preIsPassing = condStateStack.isPassing(); condStateStack.processDirective(ppDirective); boolean postIsPassing = condStateStack.isPassing(); if (preIsPassing != postIsPassing && tokenStreamStack.isEmpty() && getTokenSource() instanceof Ttcn3Lexer) { // included files are ignored because of ambiguity Location ppLocation = lastPPDirectiveLocation; if (ppLocation != null) { if (preIsPassing) { // switched to inactive: begin a new inactive location Location loc = new Location(actualFile, ppLocation.getLine(), ppLocation.getEndOffset(), ppLocation.getEndOffset()); inactiveCodeLocations.add(loc); } else { // switched to active: end the current inactive location int iclSize = inactiveCodeLocations.size(); if (iclSize > 0) { Location lastLocation = inactiveCodeLocations.get(iclSize - 1); lastLocation.setEndOffset(ppLocation.getOffset()); } } } } } else { // other directive types if (condStateStack.isPassing()) { // do something with the // directive switch (ppDirective.type) { case INCLUDE: { if (tokenStreamStack.size() > RECURSION_LIMIT) { // dumb but safe defense against infinite recursion, default value from gcc TITANMarker marker = new TITANMarker("Maximum #include recursion depth reached", ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL); unsupportedConstructs.add(marker); } else { //TODO: Makes the Eclipse slow down processIncludeDirective(ppDirective); } } break; case ERROR: { String errorMessage = ppDirective.str == null ? "" : ppDirective.str; TITANMarker marker = new TITANMarker(errorMessage, ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL); unsupportedConstructs.add(marker); } break; case WARNING: { String warningMessage = ppDirective.str == null ? "" : ppDirective.str; TITANMarker marker = new TITANMarker(warningMessage, ppDirective.line, -1, -1, IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL); warnings.add(marker); } break; case LINECONTROL: case LINEMARKER: case PRAGMA: case NULL: { String reportPreference = Platform.getPreferencesService().getString( ProductConstants.PRODUCT_ID_DESIGNER, PreferenceConstants.REPORT_IGNORED_PREPROCESSOR_DIRECTIVES, GeneralConstants.WARNING, null); if (!GeneralConstants.IGNORE.equals(reportPreference)) { boolean isError = GeneralConstants.ERROR.equals(reportPreference); TITANMarker marker = new TITANMarker( MessageFormat.format("Preprocessor directive {0} is ignored", ppDirective.type.getName()), ppDirective.line, -1, -1, isError ? IMarker.SEVERITY_ERROR : IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL); if (isError) { unsupportedConstructs.add(marker); } else { warnings.add(marker); } } } break; default: // ignore } } } } } else if (tokenType == Token.EOF) { if (!tokenStreamStack.isEmpty()) { // the included file ended, drop lexer // from the stack and ignore EOF token TokenStreamData tsd = tokenStreamStack.pop(); if (parser != null) { if (tokenStreamStack.isEmpty()) { parser.setActualFile(actualFile); parser.setLexer(actualLexer); } else { parser.setActualFile(tokenStreamStack.peek().file); parser.setLexer(tokenStreamStack.peek().lexer); } } if (tsd.reader != null) { try { tsd.reader.close(); } catch (IOException e) { } } } else { fetchedEOF = true; condStateStack.eofCheck(); tokens.add(t); ((CommonToken) t).setTokenIndex(tokens.size() - 1); --n; ++i; if (n == 0) { return i; } } } else { if (condStateStack.isPassing()) { tokens.add(t); ((CommonToken) t).setTokenIndex(tokens.size() - 1); --n; ++i; if (n == 0) { return i; } } } } while (true); }
From source file:org.eclipse.titan.runtime.core.cfgparser.CfgLocation.java
License:Open Source License
/** * Constructor for ANTLR v4 tokens// w ww. j ava 2s.c om * @param aFile the parsed file * @param aStartToken the 1st token, its line and start position will be used for the location * NOTE: start position is the column index of the tokens 1st character. * Column index starts with 0. * @param aEndToken the last token, its end position will be used for the location. * NOTE: end position is the column index after the token's last character. */ public CfgLocation(final File aFile, final Token aStartToken, final Token aEndToken) { setLocation(aFile, aStartToken.getLine(), aStartToken.getStartIndex(), aEndToken.getStopIndex() + 1); }
From source file:org.geotoolkit.cql.JCQLTextPane.java
License:Open Source License
private void syntaxHighLight(ParseTree tree, StyledDocument doc, AtomicInteger position) { if (tree instanceof ParserRuleContext) { final ParserRuleContext prc = (ParserRuleContext) tree; if (prc.exception != null) { //error nodes final Token tokenStart = prc.getStart(); Token tokenEnd = prc.getStop(); if (tokenEnd == null) tokenEnd = tokenStart;// w w w.j a v a 2 s. c om final int offset = tokenStart.getStartIndex(); final int length = tokenEnd.getStopIndex() - tokenStart.getStartIndex() + 1; doc.setCharacterAttributes(offset, length, styleError, true); return; } //special case for functions if (prc instanceof CQLParser.ExpressionTermContext) { final CQLParser.ExpressionTermContext ctx = (CQLParser.ExpressionTermContext) prc; if (ctx.NAME() != null && ctx.LPAREN() != null) { final int nbChild = tree.getChildCount(); for (int i = 0; i < nbChild; i++) { final ParseTree pt = tree.getChild(i); if (pt instanceof TerminalNode && ((TerminalNode) pt).getSymbol().getType() == CQLLexer.NAME) { final TerminalNode tn = (TerminalNode) pt; // if index<0 = missing token final Token token = tn.getSymbol(); final int offset = token.getStartIndex(); final int length = token.getStopIndex() - token.getStartIndex() + 1; position.addAndGet(length); doc.setCharacterAttributes(offset, length, styleFunction, true); } else { syntaxHighLight(pt, doc, position); } } return; } } } if (tree instanceof TerminalNode) { final TerminalNode tn = (TerminalNode) tree; // if index<0 = missing token final Token token = tn.getSymbol(); final int offset = token.getStartIndex(); final int length = token.getStopIndex() - token.getStartIndex() + 1; position.addAndGet(length); switch (token.getType()) { case CQLLexer.COMMA: case CQLLexer.UNARY: case CQLLexer.MULT: doc.setCharacterAttributes(offset, length, styleDefault, true); break; // EXpressions ------------------------------------------------- case CQLLexer.TEXT: case CQLLexer.INT: case CQLLexer.FLOAT: case CQLLexer.DATE: case CQLLexer.DURATION_P: case CQLLexer.DURATION_T: case CQLLexer.POINT: case CQLLexer.LINESTRING: case CQLLexer.POLYGON: case CQLLexer.MPOINT: case CQLLexer.MLINESTRING: case CQLLexer.MPOLYGON: doc.setCharacterAttributes(offset, length, styleLiteral, true); break; case CQLLexer.PROPERTY_NAME: doc.setCharacterAttributes(offset, length, stylePropertyName, true); break; case CQLLexer.NAME: if (tree.getChildCount() == 0) { //property name doc.setCharacterAttributes(offset, length, stylePropertyName, true); } else { //function name doc.setCharacterAttributes(offset, length, styleFunction, true); } break; case CQLLexer.RPAREN: case CQLLexer.LPAREN: doc.setCharacterAttributes(offset, length, styleParenthese, true); break; case CQLLexer.COMPARE: case CQLLexer.LIKE: case CQLLexer.IS: case CQLLexer.BETWEEN: case CQLLexer.IN: doc.setCharacterAttributes(offset, length, styleOperator, true); break; case CQLLexer.AND: case CQLLexer.OR: case CQLLexer.NOT: doc.setCharacterAttributes(offset, length, styleBinary, true); break; case CQLLexer.BBOX: case CQLLexer.BEYOND: case CQLLexer.CONTAINS: case CQLLexer.CROSSES: case CQLLexer.DISJOINT: case CQLLexer.DWITHIN: case CQLLexer.EQUALS: case CQLLexer.INTERSECTS: case CQLLexer.OVERLAPS: case CQLLexer.TOUCHES: case CQLLexer.WITHIN: doc.setCharacterAttributes(offset, length, styleBinary, true); break; default: doc.setCharacterAttributes(offset, length, styleError, true); break; } } final int nbChild = tree.getChildCount(); for (int i = 0; i < nbChild; i++) { syntaxHighLight(tree.getChild(i), doc, position); } }
From source file:org.geotoolkit.gui.javafx.filter.FXCQLEditor.java
License:Open Source License
private void syntaxHighLight(ParseTree tree) { if (tree instanceof ParserRuleContext) { final ParserRuleContext prc = (ParserRuleContext) tree; if (prc.exception != null) { //error nodes final Token tokenStart = prc.getStart(); Token tokenEnd = prc.getStop(); if (tokenEnd == null) tokenEnd = tokenStart;//w w w. j av a 2 s. co m final int offset = tokenStart.getStartIndex(); final int end = tokenEnd.getStopIndex() + 1; if (end > offset) { codeArea.setStyle(offset, end, STYLE_ERROR); } return; } //special case for functions if (prc instanceof CQLParser.ExpressionTermContext) { final CQLParser.ExpressionTermContext ctx = (CQLParser.ExpressionTermContext) prc; if (ctx.NAME() != null && ctx.LPAREN() != null) { final int nbChild = tree.getChildCount(); for (int i = 0; i < nbChild; i++) { final ParseTree pt = tree.getChild(i); if (pt instanceof TerminalNode && ((TerminalNode) pt).getSymbol().getType() == CQLLexer.NAME) { final TerminalNode tn = (TerminalNode) pt; // if index<0 = missing token final Token token = tn.getSymbol(); final int offset = token.getStartIndex(); final int end = token.getStopIndex() + 1; if (end > offset) { codeArea.setStyle(offset, end, STYLE_FUNCTION); } } else { syntaxHighLight(pt); } } return; } } } if (tree instanceof TerminalNode) { final TerminalNode tn = (TerminalNode) tree; // if index<0 = missing token final Token token = tn.getSymbol(); final int offset = token.getStartIndex(); final int end = token.getStopIndex() + 1; switch (token.getType()) { case CQLLexer.COMMA: case CQLLexer.UNARY: case CQLLexer.MULT: codeArea.setStyle(offset, end, STYLE_DEFAULT); break; // EXpressions ------------------------------------------------- case CQLLexer.TEXT: case CQLLexer.INT: case CQLLexer.FLOAT: case CQLLexer.DATE: case CQLLexer.DURATION_P: case CQLLexer.DURATION_T: case CQLLexer.POINT: case CQLLexer.LINESTRING: case CQLLexer.POLYGON: case CQLLexer.MPOINT: case CQLLexer.MLINESTRING: case CQLLexer.MPOLYGON: codeArea.setStyle(offset, end, STYLE_LITERAL); break; case CQLLexer.PROPERTY_NAME: codeArea.setStyle(offset, end, STYLE_PROPERTY); break; case CQLLexer.NAME: if (tree.getChildCount() == 0) { //property name codeArea.setStyle(offset, end, STYLE_PROPERTY); } else { //function name codeArea.setStyle(offset, end, STYLE_FUNCTION); } break; case CQLLexer.RPAREN: case CQLLexer.LPAREN: codeArea.setStyle(offset, end, STYLE_PARENTHESE); break; case CQLLexer.COMPARE: case CQLLexer.LIKE: case CQLLexer.IS: case CQLLexer.BETWEEN: case CQLLexer.IN: codeArea.setStyle(offset, end, STYLE_OPERATOR); break; case CQLLexer.AND: case CQLLexer.OR: case CQLLexer.NOT: codeArea.setStyle(offset, end, STYLE_BINARY); break; case CQLLexer.BBOX: case CQLLexer.BEYOND: case CQLLexer.CONTAINS: case CQLLexer.CROSSES: case CQLLexer.DISJOINT: case CQLLexer.DWITHIN: case CQLLexer.EQUALS: case CQLLexer.INTERSECTS: case CQLLexer.OVERLAPS: case CQLLexer.TOUCHES: case CQLLexer.WITHIN: codeArea.setStyle(offset, end, STYLE_BINARY); break; default: codeArea.setStyle(offset, end, STYLE_ERROR); break; } } final int nbChild = tree.getChildCount(); for (int i = 0; i < nbChild; i++) { syntaxHighLight(tree.getChild(i)); } }
From source file:org.jruby.truffle.core.format.parser.PrintfTreeBuilder.java
License:Open Source License
private ByteList tokenAsBytes(Token token, int trim) { return new ByteList(source, token.getStartIndex() + trim, token.getStopIndex() - token.getStartIndex() + 1 - 2 * trim); }
From source file:org.jruby.truffle.core.format.printf.PrintfTreeBuilder.java
License:Open Source License
private byte[] tokenAsBytes(Token token, int trim) { final int from = token.getStartIndex() + trim; final int to = from + token.getStopIndex() - token.getStartIndex() + 1 - 2 * trim; return Arrays.copyOfRange(source, from, to); }