List of usage examples for org.antlr.v4.runtime CommonTokenStream getTokens
public List<Token> getTokens()
From source file:SparqlMain.java
License:Apache License
/** * * @param args//w w w . j a va 2s . c om */ public static void main(String args[]) throws Exception { System.out.println("Work on file " + args[0]); int lineWidth = 80; if (args.length >= 2) { lineWidth = Integer.parseInt(args[1]); } SparqlLexer lex = null; try { lex = new SparqlLexer(new ANTLRFileStream(args[0])); } catch (IOException ex) { Logger.getLogger(SparqlMain.class.getName()).log(Level.SEVERE, null, ex); } CommonTokenStream tokens = new CommonTokenStream(lex); System.out.println("Tokens: -------------------------------"); tokens.fill(); System.out.println("Number of tokens " + tokens.getTokens().size()); List tokenList = tokens.getTokens(); System.out.println("TokenList: -------------------------------"); Iterator it = tokenList.iterator(); while (it.hasNext()) { Token t = (Token) it.next(); System.out.println(t.toString()); } System.out.flush(); System.out.println("Input from token list: -------------------------------"); it = tokenList.iterator(); while (it.hasNext()) { Token t = (Token) it.next(); if (t.getType() != SparqlParser.EOF) { if (t.getType() == SparqlParser.WS || t.getType() == SparqlParser.COMMENT) { String s = t.getText(); s = s.replace("\r\n", "\n"); if (!System.lineSeparator().equals("\n")) { s = s.replace("\n", System.lineSeparator()); } System.out.print(s); } else { System.out.print(t.getText()); } } } System.out.flush(); SparqlParser parser = new SparqlParser(tokens); parser.setBuildParseTree(true); System.out.println("Start parsing: -------------------------------"); System.out.flush(); ParserRuleContext t = parser.query(); System.out.flush(); System.out.println("Parse tree: -------------------------------"); System.out.println(t.toStringTree(parser)); // visualize parse tree in dialog box t.inspect(parser); if (parser.getNumberOfSyntaxErrors() <= 0) { //ParseTreeWalker walker = new ParseTreeWalker(); String groupFile = "ident.stg"; if (args.length > 1) { groupFile = args[1]; } System.out.println("Read StringTemplate Group File: " + groupFile + "-------------------------------"); STGroup g = new STGroupFile(groupFile); IdentVisitor visitor = new IdentVisitor(); visitor.setSTGroup(g); ST query = visitor.visit(t); System.out.println("Emit reformatted query: -------------------------------"); System.out.println(query.render(lineWidth)); System.out.println("Emit original query: -------------------------------"); String q = query.render(lineWidth); /* get common token stream */ File tmpFile = File.createTempFile("query_", ".rq"); FileOutputStream fo = new FileOutputStream(tmpFile); OutputStreamWriter ow = new OutputStreamWriter(fo, "UTF8"); ow.write(q); ow.close(); /* transformation pipline * step 1: Unicode pre-processing * step 2: Lexical analysis */ lex = new SparqlLexer(new ANTLRFileStream(tmpFile.getCanonicalPath(), "UTF8")); tokens = new CommonTokenStream(lex); List formattedTokenList = tokens.getTokens(); it = tokenList.iterator(); Iterator fit = formattedTokenList.iterator(); boolean lineSeparatorHasToBeModified = !System.lineSeparator().equals("\n"); while (it.hasNext()) { Token originalToken = (Token) it.next(); if (originalToken.getType() != SparqlParser.EOF) { if (originalToken.getType() == SparqlParser.WS || originalToken.getType() == SparqlParser.COMMENT) { String s = originalToken.getText(); s = s.replace("\r\n", "\n"); if (lineSeparatorHasToBeModified) { s = s.replace("\n", System.lineSeparator()); } System.out.print(s); } else { System.out.print(originalToken.getText()); } } } System.out.flush(); } System.out.println("-------------------------------"); System.out.println("Number of errors encountered: " + parser.getNumberOfSyntaxErrors()); }
From source file:com.espertech.esper.core.deploy.EPLModuleUtil.java
License:Open Source License
public static ParseNode getModule(EPLModuleParseItem item, String resourceName) throws ParseException, IOException { CharStream input = new NoCaseSensitiveStream(new StringReader(item.getExpression())); EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input); CommonTokenStream tokenStream = new CommonTokenStream(lex); tokenStream.fill();//from w w w.j ava2 s .c om List tokens = tokenStream.getTokens(); int beginIndex = 0; boolean isMeta = false; boolean isModule = false; boolean isUses = false; boolean isExpression = false; while (beginIndex < tokens.size()) { Token t = (Token) tokens.get(beginIndex); if (t.getType() == EsperEPL2GrammarParser.EOF) { break; } if ((t.getType() == EsperEPL2GrammarParser.WS) || (t.getType() == EsperEPL2GrammarParser.SL_COMMENT) || (t.getType() == EsperEPL2GrammarParser.ML_COMMENT)) { beginIndex++; continue; } String tokenText = t.getText().trim().toLowerCase(); if (tokenText.equals("module")) { isModule = true; isMeta = true; } else if (tokenText.equals("uses")) { isUses = true; isMeta = true; } else if (tokenText.equals("import")) { isMeta = true; } else { isExpression = true; break; } beginIndex++; beginIndex++; // skip space break; } if (isExpression) { return new ParseNodeExpression(item); } if (!isMeta) { return new ParseNodeComment(item); } // check meta tag (module, uses, import) StringWriter buffer = new StringWriter(); for (int i = beginIndex; i < tokens.size(); i++) { Token t = (Token) tokens.get(i); if (t.getType() == EsperEPL2GrammarParser.EOF) { break; } if ((t.getType() != EsperEPL2GrammarParser.IDENT) && (t.getType() != EsperEPL2GrammarParser.DOT) && (t.getType() != EsperEPL2GrammarParser.STAR) && (!t.getText().matches("[a-zA-Z]*"))) { throw getMessage(isModule, isUses, resourceName, t.getType()); } buffer.append(t.getText().trim()); } String result = buffer.toString().trim(); if (result.length() == 0) { throw getMessage(isModule, isUses, resourceName, -1); } if (isModule) { return new ParseNodeModule(item, result); } else if (isUses) { return new ParseNodeUses(item, result); } return new ParseNodeImport(item, result); }
From source file:com.espertech.esper.core.deploy.EPLModuleUtil.java
License:Open Source License
public static List<EPLModuleParseItem> parse(String module) throws ParseException { CharStream input;//from w w w .ja v a2 s. c om try { input = new NoCaseSensitiveStream(new StringReader(module)); } catch (IOException ex) { log.error("Exception reading module expression: " + ex.getMessage(), ex); return null; } EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input); CommonTokenStream tokens = new CommonTokenStream(lex); try { tokens.fill(); } catch (RuntimeException ex) { String message = "Unexpected exception recognizing module text"; if (ex instanceof LexerNoViableAltException) { if (ParseHelper.hasControlCharacters(module)) { message = "Unrecognized control characters found in text, failed to parse text"; } else { message += ", recognition failed for " + ex.toString(); } } else if (ex instanceof RecognitionException) { RecognitionException recog = (RecognitionException) ex; message += ", recognition failed for " + recog.toString(); } else if (ex.getMessage() != null) { message += ": " + ex.getMessage(); } message += " [" + module + "]"; log.error(message, ex); throw new ParseException(message); } List<EPLModuleParseItem> statements = new ArrayList<EPLModuleParseItem>(); StringWriter current = new StringWriter(); Integer lineNum = null; int charPosStart = 0; int charPos = 0; List<Token> tokenList = tokens.getTokens(); Set<Integer> skippedSemicolonIndexes = getSkippedSemicolons(tokenList); int index = -1; for (Object token : tokenList) // Call getTokens first before invoking tokens.size! ANTLR problem { index++; Token t = (Token) token; boolean semi = t.getType() == EsperEPL2GrammarLexer.SEMI && !skippedSemicolonIndexes.contains(index); if (semi) { if (current.toString().trim().length() > 0) { statements.add(new EPLModuleParseItem(current.toString().trim(), lineNum == null ? 0 : lineNum, charPosStart, charPos)); lineNum = null; } current = new StringWriter(); } else { if ((lineNum == null) && (t.getType() != EsperEPL2GrammarParser.WS)) { lineNum = t.getLine(); charPosStart = charPos; } if (t.getType() != EsperEPL2GrammarLexer.EOF) { current.append(t.getText()); charPos += t.getText().length(); } } } if (current.toString().trim().length() > 0) { statements.add(new EPLModuleParseItem(current.toString().trim(), lineNum == null ? 0 : lineNum, 0, 0)); } return statements; }
From source file:com.espertech.esper.epl.db.DatabasePollingViewableFactory.java
License:Open Source License
/** * Lexes the sample SQL and inserts a "where 1=0" where-clause. * @param querySQL to inspect using lexer * @return sample SQL with where-clause inserted * @throws ExprValidationException to indicate a lexer problem *//*from w w w .j a v a2s .com*/ protected static String lexSampleSQL(String querySQL) throws ExprValidationException { querySQL = querySQL.replaceAll("\\s\\s+|\\n|\\r", " "); StringReader reader = new StringReader(querySQL); CharStream input; try { input = new NoCaseSensitiveStream(reader); } catch (IOException ex) { throw new ExprValidationException("IOException lexing query SQL '" + querySQL + '\'', ex); } int whereIndex = -1; int groupbyIndex = -1; int havingIndex = -1; int orderByIndex = -1; List<Integer> unionIndexes = new ArrayList<Integer>(); EsperEPL2GrammarLexer lex = ParseHelper.newLexer(input); CommonTokenStream tokens = new CommonTokenStream(lex); tokens.fill(); List tokenList = tokens.getTokens(); for (int i = 0; i < tokenList.size(); i++) { Token token = (Token) tokenList.get(i); if ((token == null) || token.getText() == null) { break; } String text = token.getText().toLowerCase().trim(); if (text.equals("where")) { whereIndex = token.getCharPositionInLine() + 1; } if (text.equals("group")) { groupbyIndex = token.getCharPositionInLine() + 1; } if (text.equals("having")) { havingIndex = token.getCharPositionInLine() + 1; } if (text.equals("order")) { orderByIndex = token.getCharPositionInLine() + 1; } if (text.equals("union")) { unionIndexes.add(token.getCharPositionInLine() + 1); } } // If we have a union, break string into subselects and process each if (unionIndexes.size() != 0) { StringWriter changedSQL = new StringWriter(); int lastIndex = 0; for (int i = 0; i < unionIndexes.size(); i++) { int index = unionIndexes.get(i); String fragment; if (i > 0) { fragment = querySQL.substring(lastIndex + 5, index - 1); } else { fragment = querySQL.substring(lastIndex, index - 1); } String lexedFragment = lexSampleSQL(fragment); if (i > 0) { changedSQL.append("union "); } changedSQL.append(lexedFragment); lastIndex = index - 1; } // last part after last union String fragment = querySQL.substring(lastIndex + 5, querySQL.length()); String lexedFragment = lexSampleSQL(fragment); changedSQL.append("union "); changedSQL.append(lexedFragment); return changedSQL.toString(); } // Found a where clause, simplest cases if (whereIndex != -1) { StringWriter changedSQL = new StringWriter(); String prefix = querySQL.substring(0, whereIndex + 5); String suffix = querySQL.substring(whereIndex + 5, querySQL.length()); changedSQL.write(prefix); changedSQL.write("1=0 and "); changedSQL.write(suffix); return changedSQL.toString(); } // No where clause, find group-by int insertIndex; if (groupbyIndex != -1) { insertIndex = groupbyIndex; } else if (havingIndex != -1) { insertIndex = havingIndex; } else if (orderByIndex != -1) { insertIndex = orderByIndex; } else { StringWriter changedSQL = new StringWriter(); changedSQL.write(querySQL); changedSQL.write(" where 1=0 "); return changedSQL.toString(); } try { StringWriter changedSQL = new StringWriter(); String prefix = querySQL.substring(0, insertIndex - 1); changedSQL.write(prefix); changedSQL.write("where 1=0 "); String suffix = querySQL.substring(insertIndex - 1, querySQL.length()); changedSQL.write(suffix); return changedSQL.toString(); } catch (Exception ex) { String text = "Error constructing sample SQL to retrieve metadata for JDBC-drivers that don't support metadata, consider using the " + SAMPLE_WHERECLAUSE_PLACEHOLDER + " placeholder or providing a sample SQL"; log.error(text, ex); throw new ExprValidationException(text, ex); } }
From source file:com.espertech.esper.epl.parse.ASTUtil.java
License:Open Source License
/** * Print the token stream to the logger. * @param tokens to print/* ww w .ja va2s . c o m*/ */ public static void printTokens(CommonTokenStream tokens) { if (log.isDebugEnabled()) { List tokenList = tokens.getTokens(); StringWriter writer = new StringWriter(); PrintWriter printer = new PrintWriter(writer); for (int i = 0; i < tokens.size(); i++) { Token t = (Token) tokenList.get(i); String text = t.getText(); if (text.trim().length() == 0) { printer.print("'" + text + "'"); } else { printer.print(text); } printer.print('['); printer.print(t.getType()); printer.print(']'); printer.print(" "); } printer.println(); log.debug("Tokens: " + writer.toString()); } }
From source file:com.espertech.esper.event.property.PropertyParser.java
License:Open Source License
private synchronized static String escapeKeywords(CommonTokenStream tokens) { if (keywordCache == null) { keywordCache = new HashSet<String>(); Set<String> keywords = ParseHelper.newParser(tokens).getKeywords(); for (String keyword : keywords) { if (keyword.charAt(0) == '\'' && keyword.charAt(keyword.length() - 1) == '\'') { keywordCache.add(keyword.substring(1, keyword.length() - 1)); }//from w ww. j a v a 2s . c om } } StringWriter writer = new StringWriter(); for (Object token : tokens.getTokens()) // Call getTokens first before invoking tokens.size! ANTLR problem { Token t = (Token) token; if (t.getType() == EsperEPL2GrammarLexer.EOF) { break; } boolean isKeyword = keywordCache.contains(t.getText().toLowerCase()); if (isKeyword) { writer.append('`'); writer.append(t.getText()); writer.append('`'); } else { writer.append(t.getText()); } } return writer.toString(); }
From source file:com.fizzed.rocker.compiler.TemplateParser.java
License:Apache License
private TemplateModel parse(ANTLRInputStream input, String packageName, String templateName, long modifiedAt) throws ParserException { // construct path for more helpful error messages String templatePath = packageName.replace(".", File.separator) + "/" + templateName; // get our lexer log.trace("Lexer for input stream"); RockerLexer lexer = new RockerLexer(input); lexer.removeErrorListeners();// ww w .j ava 2 s . c o m lexer.addErrorListener(new DescriptiveErrorListener()); // // lexer // CommonTokenStream tokens = null; try { // get a list of matched tokens log.trace("Tokenizing lexer"); tokens = new CommonTokenStream(lexer); } catch (ParserRuntimeException e) { throw unwrapParserRuntimeException(templatePath, e); } if (log.isTraceEnabled()) { // just for debugging lexer tokens.fill(); for (Token token : tokens.getTokens()) { log.trace("{}", token); } } // // parser & new model // try { // pass the tokens to the parser log.trace("Parsing tokens"); RockerParser parser = new RockerParser(tokens); parser.removeErrorListeners(); parser.addErrorListener(new DescriptiveErrorListener()); TemplateModel model = new TemplateModel(packageName, templateName, modifiedAt, configuration.getOptions().copy()); // walk it and attach our listener TemplateParserListener listener = new TemplateParserListener(input, model, templatePath); ParseTreeWalker walker = new ParseTreeWalker(); log.trace("Walking parse tree"); walker.walk(listener, parser.template()); if (model.getOptions().getCombineAdjacentPlain()) { combineAdjacentPlain(model); } // discard whitespace either globally or template-set or also fallsback // to the default per content-type if (model.getOptions().getDiscardLogicWhitespaceForContentType(model.getContentType())) { discardLogicWhitespace(model); } return model; } catch (ParserRuntimeException e) { throw unwrapParserRuntimeException(templatePath, e); } }
From source file:com.github.jknack.css.CSS.java
License:Apache License
public StyleSheet parse(final String content) { CssLexer lexer = new CssLexer(new ANTLRInputStream(content)); // lexer.removeErrorListeners(); if (!debug) { lexer.addErrorListener(new ErrorReporter()); }// w ww.j av a 2 s .co m CommonTokenStream tokens = new CommonTokenStream(lexer); tokens.fill(); CssParser parser = new CssParser(tokens); // parser.setErrorHandler(new BailErrorStrategy()); // parser.removeParseListeners(); if (!debug) { parser.addErrorListener(new ErrorReporter()); } if (debug) { for (Token tok : tokens.getTokens()) { CommonToken ct = (CommonToken) tok; String[] tokenNames = lexer.getTokenNames(); int type = ct.getType(); System.out.println((type > 0 ? tokenNames[type] : "EOF") + "(" + ct.getText() + ")"); } } StyleSheetContext tree = parser.styleSheet(); if (debug) { System.out.println(tree.toStringTree(parser)); } CSSBuilder builder = new CSSBuilder(); return (StyleSheet) builder.visit(tree); }
From source file:controle.analiseTexto.AnalisePeriodo.java
public static ArrayList<String> etiquetar(CommonTokenStream tokens) { ultimaPalavraSentenca = tokens.getTokens().get(tokens.getTokens().size() - 2).getText(); for (int i = 0; i < tokens.getTokens().size() - 1; i++) { //Deteco e atribuio das etiquetas aos seus respectivos tokens String palavraEtiquetada = tokens.getTokens().get(i).getText().toLowerCase(); String etiqueta = tagger.getHashEtiquetas().get(palavraEtiquetada); String lemma = ""; try {/* w ww . j ava2 s.c o m*/ lemma = tagger.getLemma(palavraEtiquetada); } catch (Exception e) { e.printStackTrace(); } if (Character.isDigit(palavraEtiquetada.charAt(0))) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.NUMERO); } else { switch (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(0)) { case 'V': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.VERB); break; case 'N': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SUBS); break; case 'D': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ART); break; case 'P': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PRON); break; case 'S': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PREP); break; case 'A': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ADJ); break; case 'Z': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.NUM); break; case 'C': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.CONJ); break; case 'F': try { if (tagger.getEtiquetas().get(i).charAt(1) == 'c') { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.TERMINAL); } else if (tagger.getEtiquetas().get(i).charAt(1) == 'd') { } else { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SIMBOLOS); } } catch (Exception e) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PALAVRAESTRANGEIRA); } break; case 'R': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ADV); break; } //Case } //if chaAt } return (ArrayList<String>) tagger.getEtiquetas(); }
From source file:controle.analiseTexto.AnalisePeriodo.java
public static ArrayList<String> etiquetarDescricaoFuncionalidade(CommonTokenStream tokens) { String verbos = "#dever#poder#"; elementosDaFraseSRS = new ElementosFrase(); etiquetasSentenca = ""; ultimaPalavraSentenca = tokens.getTokens().get(tokens.getTokens().size() - 2).getText(); for (int i = 0; i < tokens.getTokens().size() - 1; i++) { //Deteco e atribuio das etiquetas aos seus respectivos tokens String palavraEtiquetada = tokens.getTokens().get(i).getText().toLowerCase(); String lemma = ""; try {//w ww . ja v a 2s . c o m lemma = tagger.getLemma(palavraEtiquetada); } catch (Exception e) { // System.out.println("erro em: " + palavraEtiquetada); e.printStackTrace(); } switch (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(0)) { case 'V': String verbo = tokens.getTokens().get(i).getText().toLowerCase(); verbo = Etiquetador.lemmas.get(verbo.toLowerCase()); if (verbo.toLowerCase().equals("permitir")) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PERMITIR); etiquetasSentenca = etiquetasSentenca + "PERMITIR"; //System.out.print(palavraEtiquetada + ":" + "PERMITIR "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "PERMITIR"); } else { if (verbos.contains(verbo.toLowerCase())) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.DEVE); etiquetasSentenca = etiquetasSentenca + "DEVE"; // System.out.print(palavraEtiquetada + ":" + "DEVE "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "DEVE"); } else { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.VERB); etiquetasSentenca = etiquetasSentenca + "VERB"; //System.out.print(palavraEtiquetada + ":" + "VERB "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "VERB"); } elementosDaFraseSRS.addVerbo(verbo); } break; case 'N': if (tokens.getTokens().get(i).getText().toLowerCase().equals("sistema") && i <= 2) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SISTEMA); etiquetasSentenca = etiquetasSentenca + "SISTEMA"; //System.out.print(palavraEtiquetada + ":" + "SISTEMA "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "SISTEMA"); } else { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SUBS); etiquetasSentenca = etiquetasSentenca + "SUBS"; //System.out.print(palavraEtiquetada + ":" + "SUBS "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "SUBS"); } break; case 'D': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ART); //System.out.print(palavraEtiquetada + ":" + "ART "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "ART"); break; case 'P': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PRON); //System.out.print(palavraEtiquetada + ":" + "PRON "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "PRON"); break; case 'S': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PREP); //System.out.print(palavraEtiquetada + ":" + "PREP "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "PREP"); break; case 'A': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ADJ); //System.out.print(palavraEtiquetada + ":" + "ADJ "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "ADJ"); break; case 'Z': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.NUM); //System.out.print(palavraEtiquetada + ":" + "NUM "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "NUM"); break; case 'C': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.CONJ); //System.out.print(palavraEtiquetada + ":" + "CONJ "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "CONJ"); break; case 'F': try { if (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(1) == 'c') { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SEPARADOR); //System.out.print(palavraEtiquetada + ":" + "SEPARADOR "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "SEPARADOR"); } else if (tagger.getHashEtiquetas().get(palavraEtiquetada).charAt(1) == 'd') { } else { if (tokens.getTokens().get(i).getText().toLowerCase().equals(".")) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PONTO); //System.out.print(palavraEtiquetada + ":" + "PONTO "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "PONTO"); } else { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.SIMBOLOS); //System.out.print(palavraEtiquetada + ":" + "SIMBOLOS "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "SIMBOLOS"); } } } catch (Exception e) { ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.PALAVRAESTRANGEIRA); //System.out.print(palavraEtiquetada + ":" + "PALAVRAESTRANGEIRA "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "PALAVRAESTRANGEIRA"); } break; case 'R': ((CommonToken) tokens.getTokens().get(i)).setType(SrsGrammarParser.ADV); //System.out.print(palavraEtiquetada + ":" + "ADV "); elementosDaFraseSRS.addElemento(palavraEtiquetada, "ADV"); break; } } return (ArrayList<String>) tagger.getEtiquetas(); }