Example usage for org.antlr.v4.runtime CommonTokenStream get

List of usage examples for org.antlr.v4.runtime CommonTokenStream get

Introduction

In this page you can find the example usage for org.antlr.v4.runtime CommonTokenStream get.

Prototype

@Override
    public Token get(int i) 

Source Link

Usage

From source file:controle.analiseTexto.AnalisePeriodo.java

public static ArrayList<String> etiquetarDescricaoFuncionalidadeIntellisense(CommonTokenStream tokens) {
    String verbos = "#dever#poder#";
    etiquetasSentenca = "";
    ultimaPalavraSentenca = tokens.getTokens().get(tokens.getTokens().size() - 2).getText();

    for (int i = 0; i < tokens.getTokens().size() - 1; i++) {
        //Deteco e atribuio das etiquetas aos seus respectivos tokens
        String palavraEtiquetada = tokens.getTokens().get(i).getText().toLowerCase();
        String lemma = "";
        try {//from   w w  w . j  ava  2  s. c  om
            lemma = tagger.getLemma(palavraEtiquetada);

        } catch (Exception e) {
            // System.out.println("erro em: " + palavraEtiquetada);
            e.printStackTrace();
        }
        switch (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(0)) {
        case 'V':
            String verbo = tokens.getTokens().get(i).getText().toLowerCase();
            verbo = Etiquetador.lemmas.get(verbo.toLowerCase());
            if (verbo.toLowerCase().equals("permitir")) {
                ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.PERMITIR);
                etiquetasSentenca = etiquetasSentenca + "PERMITIR";
                //System.out.print(palavraEtiquetada + ":" + "PERMITIR ");
            } else {
                if (verbos.contains(verbo.toLowerCase())) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.DEVE);
                    etiquetasSentenca = etiquetasSentenca + "DEVE";
                    //System.out.print(palavraEtiquetada + ":" + "DEVE ");
                } else {
                    ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.VERB);
                    etiquetasSentenca = etiquetasSentenca + "VERB";
                    //System.out.print(palavraEtiquetada + ":" + "VERB ");
                }
            }
            break;
        case 'N':
            if (tokens.getTokens().get(i).getText().toLowerCase().equals("sistema") && i <= 2) {
                ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.SISTEMA);
                etiquetasSentenca = etiquetasSentenca + "SISTEMA";
                //System.out.print(palavraEtiquetada + ":" + "SISTEMA ");
            } else {
                ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.SUBS);
                etiquetasSentenca = etiquetasSentenca + "SUBS";
                //System.out.print(palavraEtiquetada + ":" + "SUBS ");
            }
            break;
        case 'D':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.ART);
            //System.out.print(palavraEtiquetada + ":" + "ART ");
            break;
        case 'P':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.PRON);
            //System.out.print(palavraEtiquetada + ":" + "PRON ");
            break;
        case 'S':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.PREP);
            //System.out.print(palavraEtiquetada + ":" + "PREP ");
            break;
        case 'A':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.ADJ);
            //System.out.print(palavraEtiquetada + ":" + "ADJ ");
            break;
        case 'Z':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.NUM);
            //System.out.print(palavraEtiquetada + ":" + "NUM ");
            break;
        case 'C':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.CONJ);
            //System.out.print(palavraEtiquetada + ":" + "CONJ ");
            break;
        case 'F':
            try {
                if (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(1) == 'c') {
                    ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.SEPARADOR);
                    //System.out.print(palavraEtiquetada + ":" + "SEPARADOR ");
                } else if (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(1) == 'd') {
                } else {
                    if (tokens.getTokens().get(i).getText().toLowerCase().equals(".")) {
                        ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.PONTO);
                        //System.out.print(palavraEtiquetada + ":" + "PONTO ");
                    } else {
                        ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.SIMBOLOS);
                        //System.out.print(palavraEtiquetada + ":" + "SIMBOLOS ");
                    }
                }
            } catch (Exception e) {
                ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.PALAVRAESTRANGEIRA);
                //System.out.print(palavraEtiquetada + ":" + "PALAVRAESTRANGEIRA ");
            }
            break;
        case 'R':
            ((CommonToken) tokens.getTokens().get(i)).setType(IntellisenseGrammarParser.ADV);
            //System.out.print(palavraEtiquetada + ":" + "ADV ");
            break;
        }
    }
    return (ArrayList<String>) tagger.getEtiquetas();
}

From source file:controle.analiseTexto.AnalisePeriodo.java

public static ArrayList<String> etiquetarUC(CommonTokenStream tokens) {
    int iniciouSetras = -1;
    int inicio = -1;
    int fim = -1;
    int aspas = 0;
    boolean frasePara = false;
    boolean fraseEnquanto = false;

    String palavraAnterior = "";
    ultimaPalavraSentenca = tokens.getTokens().get(tokens.getTokens().size() - 2).getText();

    for (int i = 0; i < tokens.getTokens().size() - 1; i++) {
        //Deteco e atribuio das etiquetas aos seus respectivos tokens
        String palavraEtiquetada = tokens.getTokens().get(i).getText().toLowerCase();
        String lemma = "";
        try {//from w  ww. j  a va  2  s  .  co  m
            lemma = tagger.getLemma(palavraEtiquetada);
        } catch (Exception e) {
            //System.out.println("erro em: " + palavraEtiquetada);
            e.printStackTrace();
        }
        if (Character.isDigit(palavraEtiquetada.charAt(0))) {
            ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.NUMERO);
        } else {
            iniciouSetras++;
            if (i >= inicio && i <= fim) {
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PALAVRASRESERVADAS);
                palavraAnterior = tokens.getTokens().get(i).getText().toLowerCase();
                continue;
            }
            if (palavraEtiquetada.equals("\"")) {
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ASPAS);
                aspas = aspas == 0 ? 1 : 0;
                continue;
            }
            if (aspas == 1) {
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PALAVRA);
                continue;
            }

            switch (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(0)) {
            case 'V':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.VERB);
                if (Constante.SUBSTANTIVADOR.contains("#" + palavraAnterior + "#")) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SUBS);
                } else {
                    int numeroDePalavrasResevardas = ehPalavraReservada(tokens.getTokens(), i + 1);
                    if (numeroDePalavrasResevardas > 0) {
                        inicio = i + 1;
                        fim = i + numeroDePalavrasResevardas;
                    } else {
                        inicio = -1;
                        fim = -1;
                    }
                }
                break;
            case 'N':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SUBS);
                if (frasePara && (Etiquetador.hashVerboParticipio.get(palavraEtiquetada) != null
                        || Etiquetador.hashVerboGerundio.get(palavraEtiquetada) != null)) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.VERB);
                }
                break;
            case 'D':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ART);
                //                        if (iniciouSetras == 1 && palavraEtiquetada.toLowerCase().equals("cada")) {
                //                            ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.CADA);
                //                        }
                break;
            case 'P':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PRON);
                if (iniciouSetras == 0 && palavraEtiquetada.toLowerCase().equals("se")) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SE);
                }
                break;
            case 'S':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PREP);
                if (iniciouSetras == 0 && palavraEtiquetada.toLowerCase().equals("para")) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PARA);
                    frasePara = true;
                }
                break;
            case 'A':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ADJ);
                if (Constante.COMPARADOR.contains(tokens.getTokens().get(i).getText().toLowerCase())) {
                    //                            ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.COMPARADOR);
                }
                break;
            case 'Z':
                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.NUM);
                break;
            case 'C':
                if (Constante.CONJUNCAO.contains(tokens.getTokens().get(i).getText().toLowerCase())) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.CONJUNCAO);
                } else if (tokens.getTokens().get(i).getText().toLowerCase().equals("se")) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SE);
                } else {
                    if (Constante.COMPARADOR.contains(tokens.getTokens().get(i - 1).getText().toLowerCase())) {
                        //                                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.QUE);
                    } else {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.CONJ);
                    }
                }
                break;
            case 'F':
                try {
                    if (tokens.getTokens().get(i).getText().toLowerCase().equals(".")) {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PONTO);
                        iniciouSetras--;
                    } else {
                        if (tokens.getTokens().get(i).getText().toLowerCase().equals(",")) {
                            ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.VIRGULA);
                        } else {
                            if (tagger.getEtiquetas().get(i).charAt(1) == 'c'
                                    || tagger.getEtiquetas().get(i).charAt(1) == 'p') {
                                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.TERMINAL);
                            } else if (tagger.getEtiquetas().get(i).charAt(1) == 'd') {
                            } else {
                                ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SIMBOLOS);
                            }
                        }
                    }
                } catch (Exception e) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.PALAVRAESTRANGEIRA);
                }
                break;
            case 'R':
                if (tokens.getTokens().get(i).getText().toLowerCase().equals("ento")) {
                    ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ENTAO);
                } else {
                    if (tokens.getTokens().get(i).getText().toLowerCase().equals("seno")) {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SENAO);
                    } else if (iniciouSetras == 0 && palavraEtiquetada.toLowerCase().equals("enquanto")) {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ENQUANTO);
                        fraseEnquanto = true;
                    } else if (Constante.SUBSTANTIVADOR.contains("#" + palavraAnterior + "#")) {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.SUBS);
                    } else {
                        ((CommonToken) tokens.getTokens().get(i)).setType(UCGrammarParser.ADV);
                    }
                }
                break;
            } //Case
        } //if chaAt
        palavraAnterior = tokens.getTokens().get(i).getText().toLowerCase();
    }

    return (ArrayList<String>) tagger.getEtiquetas();
}

From source file:controle.dicionario.Dicionario.java

private ArrayList<String> etiquetar(CommonTokenStream tokens) {
    ArrayList<String> temp;
    ArrayList<String> sTokens = new ArrayList<>();
    ArrayList<String> simbolos = new ArrayList<>();
    temp = analisePeriodo.etiquetar(tokens);

    for (int i = 0; i < tokens.getTokens().size() - 1; i++) {
        int typeToken = ((CommonToken) tokens.getTokens().get(i)).getType();
        String sToken = ((CommonToken) tokens.getTokens().get(i)).getText();

        try {//from   ww  w . j  av  a  2 s. co  m
            ClasseGramatical classeGramatical = classeGramaticalHash.get(typeToken);
            if (classeGramatical == null) {
                classeGramatical = classeGramaticalDAO.buscaPorId(11);
            }
            palavrasHash.get(sToken.toLowerCase()).setIdClasseGramatical(classeGramatical);
        } catch (Exception e) {
        }
    }
    return temp;
}

From source file:de.bioviz.parser.BioParser.java

License:Open Source License

/**
 * Parses the annotations in a file.//from w w w .  j  a  v  a  2 s  .  co m
 * @param input an ANTLRInputStream
 * @param channel the channel to parse
 * @return A List of Strings containing the annotations.
 */
private static List<String> parseChannel(final ANTLRInputStream input, final int channel) {
    BioLexerGrammar lexer = new BioLexerGrammar(input);

    lexer.reset();
    CommonTokenStream cts = new CommonTokenStream(lexer);
    List<String> channelTokens = new ArrayList<>();

    // this one gets everything that is in the stream.
    cts.getText();
    // now we can use size() to run over the tokens
    for (int i = 0; i < cts.size(); i++) {
        Token token = cts.get(i);
        // and check here if the token is on the right channel
        if (token.getChannel() == channel) {
            logger.trace("Parsing Comment: " + token.getText());
            channelTokens.add(token.getText());
        }
    }

    return channelTokens;
}

From source file:edu.odu.cs.cs350.yellow1.mutationgeneration.CFile.java

License:Open Source License

private boolean applyTokenTransformation(String original, String by, String operation) throws Exception {
    ANTLRInputStream input = new ANTLRInputStream(fileContents.toString());
    CLexer lexer = new CLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    CParser parser = new CParser(tokens);
    ParseTree tree = parser.compilationUnit();
    ParseTreeWalker walker = new ParseTreeWalker(); // create standard

    for (Integer i = 0; i < tokens.size(); i++) {
        System.out.println("Tokens: " + tokens.get(i).getText().toString());
        if (tokens.get(i).getText().toString().equals(original)) {
            String s1 = ((Integer) tokens.get(i).getStartIndex()).toString();
            String s2 = ((Integer) tokens.get(i).getStopIndex()).toString();
            String uniqId = operation;
            uniqId = uniqId.concat(s1).concat(s2);

            String newName = fileContents.toString();
            if (mutationsAppliedContains(uniqId) < 0) {

                newName = newName.substring(0, tokens.get(i).getStartIndex()) + by
                        + newName.substring(tokens.get(i).getStopIndex() + by.length());
                writeMutation(this.fileName + "." + uniqId, newName);
                this.addToApplied(uniqId);
            }/*from w  w  w.  j  a va 2  s  .com*/
        }
    }
    return true;
}

From source file:edu.odu.cs.cs350.yellow1.mutationgeneration.JavaFile.java

License:Open Source License

/**
 * change tokens and add the mutuant cases to the mutant case vector
 * /*  www  .  jav  a  2s.c o  m*/
 * @param originalthe
 *            original token
 * 
 * @param by
 *            the replacement token
 * 
 * @param operation
 *            the operation name
 * 
 * 
 *            it is acceptable int y = +10; to be converted to int y = /10;
 *            because it will make the mutant project compilation fail and
 *            this mutation will be discarded because the compilation has
 *            failed
 * 
 * 
 */
private boolean applyTokenTransformation(String original, String by, String operation) throws Exception {
    ANTLRInputStream input = new ANTLRInputStream(fileContents.toString());
    JavaLexer lexer = new JavaLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);

    JavaParser parser = new JavaParser(tokens);

    ParseTree tree = parser.compilationUnit();

    TokenRewriteStream trs = new TokenRewriteStream();

    ParseTreeWalker walker = new ParseTreeWalker(); // create standard

    for (Integer i = 0; i < tokens.size(); i++) {
        if (tokens.get(i).getText().toString().equals(original)) {
            int startIndex = tokens.get(i).getStartIndex();
            int stopIndex = tokens.get(i).getStopIndex() + 1;
            mvc.add(startIndex, stopIndex, by);

        }
    }
    return true;
}

From source file:edu.odu.cs.cs350.yellow1.mutationgeneration.JavaFile.java

License:Open Source License

/**
 * @param blockstart/*from  w w w .  j av  a 2  s. co m*/
 *            accepts if or while
 * 
 * @param set
 *            accepts true or false
 * 
 *            Replaces the condition after [blockstart] by [set]
 */
private void applyConditionTransformation(String blockstart, String set) {
    int lparen = 0;
    int rparen = 0;
    int startIndex = -1;
    int stopIndex = -1;
    boolean ifLock = false;

    ANTLRInputStream input = new ANTLRInputStream(fileContents.toString());
    JavaLexer lexer = new JavaLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    JavaParser parser = new JavaParser(tokens);
    ParseTree tree = parser.compilationUnit();
    ParseTreeWalker walker = new ParseTreeWalker(); // create standard
    for (Integer i = 0; i < tokens.size(); i++) {

        if (tokens.get(i).getText().toString().equals(blockstart)) {
            ifLock = true;
        }

        if (ifLock == true && tokens.get(i).getText().toString().equals("(")) {
            lparen++;

            if (startIndex == -1)
                startIndex = tokens.get(i).getStartIndex();
        }

        if (ifLock == true && tokens.get(i).getText().toString().equals(")"))

        {

            rparen++;

        }

        if (lparen != 0 && rparen != 0 && lparen == rparen && ifLock == true) {
            ifLock = false;
            lparen = 0;
            rparen = 0;

            stopIndex = tokens.get(i).getStopIndex() + 1;

            String by = "(" + set + ")";

            this.mvc.add(startIndex, stopIndex, by);
            startIndex = -1;
            stopIndex = -1;
        }

    }

}

From source file:edu.odu.cs.cs350.yellow1.mutationgeneration.JavaFile.java

License:Open Source License

private void constantOperations(String required) {
    ANTLRInputStream input = new ANTLRInputStream(fileContents.toString());
    JavaLexer lexer = new JavaLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);

    JavaParser parser = new JavaParser(tokens);

    ParseTree tree = parser.compilationUnit();

    TokenRewriteStream trs = new TokenRewriteStream();

    ParseTreeWalker walker = new ParseTreeWalker(); // create standard

    for (Integer i = 0; i < tokens.size(); i++) {
        if (NumberUtils.isNumber(tokens.get(i).getText().toString())) {
            int startIndex = tokens.get(i).getStartIndex();
            int stopIndex = tokens.get(i).getStopIndex() + 1;
            String by = fileContents.substring(startIndex, stopIndex) + required;
            mvc.add(startIndex, stopIndex, by);
        }/*from w w  w .  j  a v  a  2 s.c o  m*/
    }
}

From source file:kalang.ide.completion.KalangCompletionHandler.java

private List<CompletionProposal> getCompleteType(KaParser.KaParserResult result, int caret) {
    CompilationUnit cunit = result.getCompilationUnit();
    CommonTokenStream ts = cunit.getTokenStream();
    TokenNavigator tokenNav = new TokenNavigator(ts.getTokens().toArray(new Token[0]));
    tokenNav.move(caret - 1);/* w w  w . j  a  va  2  s .com*/
    int currentTokenId = tokenNav.getCurrentToken().getTokenIndex();
    if (currentTokenId < 1) {
        return null;
    }
    //TODO skip comment channels
    Token curToken = ts.get(currentTokenId);
    log("cur token:" + curToken.getText());
    Token prevToken = ts.get(currentTokenId - 1);
    log("prev token:" + prevToken.getText());
    int exprStopCaret;
    int anchorCaret;
    if (curToken.getText().equals(".")) {
        exprStopCaret = prevToken.getStopIndex();
        anchorCaret = curToken.getStopIndex() + 1;
    } else if (prevToken.getText().equals(".")) {
        if (currentTokenId < 2) {
            return null;
        }
        Token prevPrevToken = ts.get(currentTokenId - 2);
        exprStopCaret = prevPrevToken.getStopIndex();
        anchorCaret = prevToken.getStopIndex() + 1;
    } else {
        return null;
    }
    AstNode astNode = AstNodeHelper.getAstNodeByCaretOffset(result, exprStopCaret);
    log("expr ast:" + astNode);
    if (astNode == null) {
        return null;
    }
    Type type;
    boolean inStatic;
    if (astNode instanceof ExprNode) {
        type = ((ExprNode) astNode).getType();
        inStatic = false;
    } else if (astNode instanceof ClassReference) {
        type = Types.getClassType(((ClassReference) astNode).getReferencedClassNode());
        inStatic = true;
    } else {
        return null;
    }
    CompletionRequest request = new CompletionRequest();
    request.anchorOffset = anchorCaret;
    request.compiler = result.getCompiler();
    String source = result.getSnapshot().getText().toString();
    request.prefix = source.substring(anchorCaret, caret);
    log("prefix:" + request.prefix);
    return TypeCompletion.complete(request, type, inStatic);
}

From source file:org.eclipse.titan.common.parsers.IntervalDetector.java

License:Open Source License

/**
 * Pops the actual interval off of the stack, making its parent the actual interval. The ending offset of the popped off interval is set here.
 * <p>//  w w w . j av  a 2s. c om
 * If the actual interval is the root interval, than it is not popped off the stack. This situation can only happen in case of a syntactically
 * invalid file.
 * <p>
 * The last non-hidden token will be the end of the interval.
 *
 * @param aTokenStream token stream to get the list of tokens for searching hidden tokens
 */
public final void popInterval(final CommonTokenStream aTokenStream) {
    final int nonHiddenIndex = getNonHiddenTokensBefore(aTokenStream.index() - 1, aTokenStream.getTokens());
    final Token t = aTokenStream.get(nonHiddenIndex);
    popInterval(t);
}