Example usage for org.antlr.v4.runtime Token getTokenIndex

List of usage examples for org.antlr.v4.runtime Token getTokenIndex

Introduction

In this page you can find the example usage for org.antlr.v4.runtime Token getTokenIndex.

Prototype

int getTokenIndex();

Source Link

Document

An index from 0..n-1 of the token object in the input stream.

Usage

From source file:org.eclipse.titan.common.parsers.ParserLogger.java

License:Open Source License

/**
 * Escaped rule text including hidden tokens
 * For logging purposes// ww  w. java  2 s  .co m
 * @param aRule rule
 * @param aTokens token list to get the tokens (all, hidden and not hidden also) from
 * @return escaped rule text
 */
private static String getEscapedRuleText(final ParserRuleContext aRule, final List<Token> aTokens) {
    final Token startToken = aRule.start;
    if (startToken == null) {
        println("ERROR: ParseLogger.getEscapedRuleText() startToken == null");
        return "";
    }
    final int startIndex = startToken.getTokenIndex();
    final Token stopToken = aRule.stop;
    if (stopToken == null) {
        println("ERROR: ParseLogger.getEscapedRuleText() stopToken == null");
        return "";
    }
    final int stopIndex = stopToken.getTokenIndex();
    final StringBuilder sb = new StringBuilder();
    for (int i = startIndex; i <= stopIndex; i++) {
        try {
            sb.append(getEscapedTokenText(aTokens.get(i)));
        } catch (IndexOutOfBoundsException e) {
            sb.append("_");
        }
    }
    return sb.toString();
}

From source file:org.eclipse.titan.common.parsers.ParserLogger.java

License:Open Source License

/**
 * Rule text including hidden tokens/*from www  . j  a v a2  s  .  c  om*/
 * For logging purposes
 * @param aRule rule
 * @param aTokens token list to get the tokens (all, hidden and not hidden also) from
 * @return rule text including hidden tokens. First and last tokens are non-hidden.
 */
public static String getRuleText(final ParserRuleContext aRule, final List<Token> aTokens) {
    final Token startToken = aRule.start;
    if (startToken == null) {
        println("ERROR: ParseLogger.getEscapedRuleText() startToken == null");
        return "";
    }
    final int startIndex = startToken.getTokenIndex();
    final Token stopToken = aRule.stop;
    if (stopToken == null) {
        println("ERROR: ParseLogger.getEscapedRuleText() stopToken == null");
        return "";
    }
    final int stopIndex = stopToken.getTokenIndex();
    final StringBuilder sb = new StringBuilder();
    for (int i = startIndex; i <= stopIndex; i++) {
        sb.append(aTokens.get(i));
    }
    return sb.toString();
}

From source file:org.eclipse.titan.common.parsers.ParserLogger.java

License:Open Source License

/**
 * Token info in string format for logging purpose
 * @param aToken token//from w w  w  . ja v  a2 s.co  m
 * @param aTokenNameResolver resolver to get token name
 * @return &lt;token name&gt;: '&lt;token text&gt;', @&lt;token index&gt;, &lt;line&gt;:&lt;column&gt;[, channel=&lt;channel&gt;]
 *         <br>where
 *         <br>&lt;token index&gt; starts  from 0,
 *         <br>&lt;line&gt; starts from 1,
 *         <br>&lt;column&gt; starts from 0,
 *         <br>channel info is provided if &lt;channel&gt; > 0 (hidden channel)
 */
private static String getTokenInfo(final Token aToken, final TokenNameResolver aTokenNameResolver) {
    final StringBuilder sb = new StringBuilder();
    final int tokenType = aToken.getType();
    final String tokenName = getTokenName(tokenType, aTokenNameResolver);
    sb.append(tokenName);
    sb.append(": ");

    sb.append("'");
    sb.append(getEscapedTokenText(aToken));
    sb.append("'");

    sb.append(", @" + aToken.getTokenIndex());
    sb.append(", " + aToken.getLine() + ":" + aToken.getCharPositionInLine());
    if (aToken.getChannel() > 0) {
        sb.append(", channel=");
        sb.append(aToken.getChannel());
    }
    return sb.toString();
}

From source file:org.ledyba.sora.parser.FortranTokenStream.java

License:Open Source License

/**
 * Create a subset list of the non-whitespace tokens in the current line.
 *///from  w w  w  .  j a  v a2 s .co m
private ArrayList<Token> createPackedList() {
    int i = 0;
    Token tk = null;

    ArrayList<Token> pList = new ArrayList<>(this.lineLength + 1);

    for (i = 0; i < currLine.size(); i++) {
        tk = getTokenFromCurrLine(i);
        try {
            if (tk.getChannel() != Token.HIDDEN_CHANNEL) {
                pList.add(tk);
            }
        } catch (Exception e) {
            e.printStackTrace();
            System.exit(1);
        }
    }

    // need to make sure the line was terminated with a T_EOS.  this may
    // not happen if we're working on a file that ended w/o a newline
    Token last = pList.get(pList.size() - 1);
    if (last.getType() != FortranLexer.T_EOS) {
        Pair<TokenSource, CharStream> src = new Pair<>(last.getTokenSource(), last.getInputStream());
        FortranToken eos = new FortranToken(src, FortranLexer.T_EOS, Token.DEFAULT_CHANNEL,
                last.getTokenIndex(), last.getTokenIndex() + 1);
        eos.setText("\n");
        packedList.add(eos);
    }

    return pList;
}

From source file:org.opencypher.tools.g4processors.BNFListener.java

License:Apache License

private String findHiddenTextBefore(ParserRuleContext ctx, boolean forHeader) {
    Token startCtx = ctx.getStart();
    int i = startCtx.getTokenIndex();
    List<Token> normalTextChannel = tokens.getHiddenTokensToLeft(i, BNFLexer.HIDDEN);
    if (normalTextChannel != null) {
        // find where the blank lines are
        // when called for a rule, is the quasi-comment part of the content of the previous rule or
        // the description of this one. Immaterial for grammar header

        List<Token> lineTokens = normalTextChannel.stream().collect(Collectors.toList());

        int precedingBlankLines = startCtx.getLine() - lineTokens.get(lineTokens.size() - 1).getLine() - 1;
        if (precedingBlankLines > 0) {
            if (forHeader) {
                // this will preserve the linefeeds
                return lineTokens.stream().map(tk -> tk.getText().replaceFirst("// ?", ""))
                        .collect(Collectors.joining("\n"));
            } // it wasn't a description (just a stray comment ?)
        } else {/*ww w .  j a v a  2 s.co  m*/
            if (forHeader) {
                // no blank line, so this is a description to the first 
                return "";
            }
            // description - go back and find any gap showing a last blank line
            int lastGoodLine = startCtx.getLine() - 1;
            int currentIndex = lineTokens.size() - 1;
            while (currentIndex >= 0 && lineTokens.get(currentIndex).getLine() == lastGoodLine) {
                currentIndex--;
                lastGoodLine--;
            }
            List<String> content = new ArrayList<>();
            for (int j = currentIndex + 1; j < lineTokens.size(); j++) {
                content.add(lineTokens.get(j).getText().replaceFirst("// ?", ""));
            }
            return content.stream().collect(Collectors.joining("\n"));
        }
    }
    return "";
}

From source file:org.opencypher.tools.g4processors.BNFListener.java

License:Apache License

private String findHiddenTextAfter(ParserRuleContext ctx) {
    Token endCtx = ctx.getStop();
    int i = endCtx.getTokenIndex();
    List<Token> normalTextChannel = tokens.getHiddenTokensToRight(i, BNFLexer.HIDDEN);
    if (normalTextChannel != null) {
        // the quasi-comment (description) may be the end of a rule or start of the next. separation is on
        // a blank line
        int nextLine = endCtx.getLine() + 1;
        List<String> content = new ArrayList<>();
        for (Token lineToken : normalTextChannel) {
            if (lineToken.getLine() == nextLine) {
                content.add(lineToken.getText().replaceFirst("// ?", ""));
                nextLine++;//  w w  w  . ja v  a2 s. com
            } else {
                break;
            }
        }
        return content.stream().collect(Collectors.joining("\n"));
    }
    return "";
}

From source file:org.opencypher.tools.g4processors.G4Listener.java

License:Apache License

private FreeTextItem findHiddenText(ParserRuleContext ctx) {
    // to suppress lexing, !! normal english text is a special comment //!! -> hidden
    // not sure i need to do that
    Token endAlt = ctx.getStop();
    int i = endAlt.getTokenIndex();
    List<Token> normalTextChannel = tokens.getHiddenTokensToRight(i, Gee4Lexer.HIDDEN);
    if (normalTextChannel != null) {
        // there should be only one line now
        String content = normalTextChannel.stream().map(tk -> tk.getText().replaceFirst("//!!\\s*", ""))
                .collect(Collectors.joining());
        return new FreeTextItem(content);
    }/*ww  w  .  ja v a 2 s. c o m*/
    return null;
}

From source file:org.sourcepit.ltk.parser.ParseTreeBuilder.java

License:Apache License

private Rule handleRuleNode(Terminal origin, Rule parent, RuleNode ruleNode) {
    final RuleContext ruleContext = ruleNode.getRuleContext();
    final List<ParseNode> children = new ArrayList<>();
    final Rule rule = new Rule(parent, children, ruleContext.getClass(), origin);
    for (int i = 0; i < ruleNode.getChildCount(); i++) {
        final ParseTree child = ruleNode.getChild(i);
        if (child instanceof RuleNode) {
            children.add(handleRuleNode(null, rule, (RuleNode) child));
        } else {/* ww w .  j a  v a2s.  co m*/
            final TerminalNode terminalNode = (TerminalNode) child;
            final ParseResult parseResult = parseResultStack.peek();
            final BufferedTokenStream tokenStream = parseResult.getTokenStream();
            final org.antlr.v4.runtime.Token token = terminalNode.getSymbol();
            final int tokenIndex = token.getTokenIndex();

            // final TerminalContext tc = new TerminalContext();
            // tc.terminalNode = terminalNode;
            // tc.tokenStream = tokenStream;

            // tokenIndexToTerminalNodeMap.put(Integer.valueOf(tokenIndex), tc);

            final List<org.antlr.v4.runtime.Token> hiddenTokensToLeft = tokenStream
                    .getHiddenTokensToLeft(tokenIndex);
            if (hiddenTokensToLeft != null && !hiddenTokensToLeft.isEmpty()) {
                int prevTokenIdx = hiddenTokensToLeft.get(0).getTokenIndex() - 1;
                int startIdx;
                if (prevTokenIdx < 0) {
                    startIdx = 0;
                } else {
                    // final TerminalContext preTc = tokenIndexToTerminalNodeMap.get(Integer.valueOf(prevTokenIdx));
                    // final TerminalNode preTerminalNode = preTc.terminalNode;
                    // startIdx = parserDelegeate.getLen((RuleNode) preTerminalNode.getParent(),
                    // preTerminalNode.getSymbol(),
                    // hiddenTokensToLeft, preTc.tokenStream);
                    startIdx = tokenIndexToHiddenWsLength.get(Integer.valueOf(prevTokenIdx));
                }
                for (int j = startIdx; j < hiddenTokensToLeft.size(); j++) {
                    final TerminalNodeImpl dummy = new TerminalNodeImpl(hiddenTokensToLeft.get(j));
                    dummy.parent = ruleNode;
                    children.add(handleTerminalNode(null, rule, dummy));
                }
            }

            if (terminalNode.getSymbol().getType() != Lexer.EOF) {
                children.add(handleTerminalNode(null, rule, terminalNode));
            }

            final List<org.antlr.v4.runtime.Token> hiddenTokensToRight = tokenStream
                    .getHiddenTokensToRight(tokenIndex);
            if (hiddenTokensToRight != null) {
                int len = parserDelegeate.getLen(ruleNode, token, hiddenTokensToRight, tokenStream);
                tokenIndexToHiddenWsLength.put(Integer.valueOf(tokenIndex), Integer.valueOf(len));
                for (int j = 0; j < len; j++) {
                    final TerminalNodeImpl dummy = new TerminalNodeImpl(hiddenTokensToRight.get(j));
                    dummy.parent = ruleNode;
                    children.add(handleTerminalNode(null, rule, dummy));
                }
            }
        }
    }
    return rule;
}

From source file:org.wso2.ballerinalang.compiler.parser.BLangWSPreservingParserListener.java

License:Open Source License

private void pushWS(Stack<Whitespace> whitespaceStack, Token previousNonWS, String wsString) {
    boolean isStatic = isAllUpper(BallerinaLexer.VOCABULARY.getSymbolicName(previousNonWS.getType()));
    Whitespace wsToken = new Whitespace(previousNonWS.getTokenIndex(), wsString, previousNonWS.getText(),
            isStatic);//from ww w .  ja  v  a2  s .c  o  m
    whitespaceStack.push(wsToken);
}

From source file:us.ihmc.idl.generator.IDLGenerator.java

License:Apache License

private static void printToken(CommonTokenStream tokens, int index, Token token) {
    if (token.getType() != IDLParser.WS) {
        String out = "";
        out += " Index: " + token.getTokenIndex();
        out += " Start: " + token.getStartIndex();
        out += " Stop: " + token.getStopIndex();
        out += " Channel: " + token.getChannel();
        out += " Type: " + token.getType();
        //         out += " Hidden: ";
        //         List<Token> hiddenTokensToLeft = tokens.getHiddenTokensToLeft(index);
        //         for (int i = 0; hiddenTokensToLeft != null && i < hiddenTokensToLeft.size(); i++)
        //         {
        //            if (hiddenTokensToLeft.get(i).getType() != IDLParser.WS)
        //            {
        //               out += "\n\t" + i + ":";
        //               out += "\n\tChannel: " + hiddenTokensToLeft.get(i).getChannel() + "  Type: " + hiddenTokensToLeft.get(i).getType();
        //               out += hiddenTokensToLeft.get(i).getText().replaceAll("\\s", "");
        //            }
        //         }
        out += " " + token.getText().replaceAll("\\s", "");
        System.out.println(out);/*from  w ww  .j ava 2s  . c  om*/
    }
}