List of usage examples for org.antlr.v4.runtime Token INVALID_TYPE
int INVALID_TYPE
To view the source code for org.antlr.v4.runtime Token INVALID_TYPE.
Click Source Link
From source file:com.facebook.presto.sql.parser.DelimiterLexer.java
License:Apache License
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); }/*w w w . j a va 2s . c o m*/ // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
From source file:com.yahoo.yqlplus.language.internal.ast.ErrorNode.java
public int getType() { return Token.INVALID_TYPE; }
From source file:net.certiv.json.test.base.AbstractBase.java
License:Open Source License
protected void checkSymbols(Grammar g, String rulesStr, String allValidTokensStr) throws Exception { String[] typeToTokenName = g.getTokenNames(); Set<String> tokens = new HashSet<String>(); for (int i = 0; i < typeToTokenName.length; i++) { String t = typeToTokenName[i]; if (t != null) { if (t.startsWith(Grammar.AUTO_GENERATED_TOKEN_NAME_PREFIX)) { tokens.add(g.getTokenDisplayName(i)); } else { tokens.add(t);// ww w . j a v a 2 s . com } } } // make sure expected tokens are there StringTokenizer st = new StringTokenizer(allValidTokensStr, ", "); while (st.hasMoreTokens()) { String tokenName = st.nextToken(); assertTrue("token " + tokenName + " expected, but was undefined", g.getTokenType(tokenName) != Token.INVALID_TYPE); tokens.remove(tokenName); } // make sure there are not any others (other than <EOF> etc...) for (String tokenName : tokens) { assertTrue("unexpected token name " + tokenName, g.getTokenType(tokenName) < Token.MIN_USER_TOKEN_TYPE); } // make sure all expected rules are there st = new StringTokenizer(rulesStr, ", "); int n = 0; while (st.hasMoreTokens()) { String ruleName = st.nextToken(); assertNotNull("rule " + ruleName + " expected", g.getRule(ruleName)); n++; } Assert.assertEquals("number of rules mismatch; expecting " + n + "; found " + g.rules.size(), n, g.rules.size()); }