Example usage for org.antlr.v4.runtime CommonTokenFactory CommonTokenFactory

List of usage examples for org.antlr.v4.runtime CommonTokenFactory CommonTokenFactory

Introduction

In this page you can find the example usage for org.antlr.v4.runtime CommonTokenFactory CommonTokenFactory.

Prototype

public CommonTokenFactory(boolean copyText) 

Source Link

Document

Constructs a CommonTokenFactory with the specified value for #copyText .

Usage

From source file:ai.grakn.graql.internal.parser.QueryParserImpl.java

License:Open Source License

/**
 * @param reader a reader representing several queries
 * @return a list of queries/*from   ww w  .j  a  v  a  2  s  . com*/
 */
@Override
public <T extends Query<?>> Stream<T> parseList(Reader reader) {
    UnbufferedCharStream charStream = new UnbufferedCharStream(reader);
    GraqlErrorListener errorListener = GraqlErrorListener.withoutQueryString();
    GraqlLexer lexer = createLexer(charStream, errorListener);

    /*
    We tell the lexer to copy the text into each generated token.
    Normally when calling `Token#getText`, it will look into the underlying `TokenStream` and call
    `TokenStream#size` to check it is in-bounds. However, `UnbufferedTokenStream#size` is not supported
    (because then it would have to read the entire input). To avoid this issue, we set this flag which will
    copy over the text into each `Token`, s.t. that `Token#getText` will just look up the copied text field.
    */
    lexer.setTokenFactory(new CommonTokenFactory(true));

    // Use an unbuffered token stream so we can handle extremely large input strings
    UnbufferedTokenStream tokenStream = new UnbufferedTokenStream(ChannelTokenSource.of(lexer));

    GraqlParser parser = createParser(tokenStream, errorListener);

    /*
    The "bail" error strategy prevents us reading all the way to the end of the input, e.g.
            
    ```
    match $x isa person; insert $x has name "Bob"; match $x isa movie; get;
                                                   ^
    ```
            
    In this example, when ANTLR reaches the indicated `match`, it considers two possibilities:
            
    1. this is the end of the query
    2. the user has made a mistake. Maybe they accidentally pasted the `match` here.
            
    Because of case 2, ANTLR will parse beyond the `match` in order to produce a more helpful error message.
    This causes memory issues for very large queries, so we use the simpler "bail" strategy that will
    immediately stop when it hits `match`.
    */
    parser.setErrorHandler(new BailErrorStrategy());

    // This is a lazy iterator that will only consume a single query at a time, without parsing any further.
    // This means it can pass arbitrarily long streams of queries in constant memory!
    Iterable<T> queryIterator = () -> new AbstractIterator<T>() {
        @Nullable
        @Override
        protected T computeNext() {
            int latestToken = tokenStream.LA(1);
            if (latestToken == Token.EOF) {
                endOfData();
                return null;
            } else {
                // This will parse and consume a single query, even if it doesn't reach an EOF
                // When we next run it, it will start where it left off in the stream
                return (T) QUERY.parse(parser, errorListener);
            }
        }
    };

    return StreamSupport.stream(queryIterator.spliterator(), false);
}

From source file:com.linkedin.pinot.pql.parsers.Pql2Compiler.java

License:Apache License

@Override
public BrokerRequest compileToBrokerRequest(String expression) throws Pql2CompilationException {
    try {//w w w .jav  a  2 s  . c  o m
        //
        CharStream charStream = new ANTLRInputStream(expression);
        PQL2Lexer lexer = new PQL2Lexer(charStream);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        TokenStream tokenStream = new UnbufferedTokenStream<CommonToken>(lexer);
        PQL2Parser parser = new PQL2Parser(tokenStream);
        parser.setErrorHandler(new BailErrorStrategy());

        // Parse
        ParseTree parseTree = parser.root();

        ParseTreeWalker walker = new ParseTreeWalker();
        Pql2AstListener listener = new Pql2AstListener();
        walker.walk(listener, parseTree);

        AstNode rootNode = listener.getRootNode();
        // System.out.println(rootNode.toString(0));

        BrokerRequest brokerRequest = new BrokerRequest();
        rootNode.updateBrokerRequest(brokerRequest);
        return brokerRequest;
    } catch (Pql2CompilationException e) {
        throw e;
    } catch (Exception e) {
        throw new Pql2CompilationException(e.getMessage());
    }
}

From source file:io.mindmaps.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * @param inputStream a stream representing a list of patterns
 * @return a stream of patterns//from   w  w  w . ja  va  2  s  . c  o  m
 */
public Stream<Pattern> parsePatterns(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:io.mindmaps.graql.QueryParser.java

License:Open Source License

public Stream<Pattern> parsePatternsStream(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }/*ww  w  . j  a v a  2s .c o m*/

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:org.chocosolver.parser.flatzinc.Flatzinc.java

License:Open Source License

public void parse(Model target, Datas data, InputStream is) {
    CharStream input = new UnbufferedCharStream(is);
    Flatzinc4Lexer lexer = new Flatzinc4Lexer(input);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    TokenStream tokens = new UnbufferedTokenStream<CommonToken>(lexer);
    Flatzinc4Parser parser = new Flatzinc4Parser(tokens);
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.setBuildParseTree(false);/* ww  w . j  av  a  2s . c  o  m*/
    parser.setTrimParseTree(false);
    parser.flatzinc_model(target, data, all, free);
}

From source file:org.eclipse.titan.common.parsers.cfg.CfgAnalyzer.java

License:Open Source License

/**
 * Parses the provided elements./*from   w  w w . jav a 2 s.c o m*/
 * If the contents of an editor are to be parsed, than the file parameter is only used to report the errors to.
 * 
 * @param file the file to parse
 * @param fileName the name of the file, to refer to.
 * @param code the contents of an editor, or null.
 */
public void directParse(final IFile file, final String fileName, final String code) {
    final Reader reader;
    final int fileLength;
    if (null != code) {
        reader = new StringReader(code);
        fileLength = code.length();
    } else if (null != file) {
        try {
            reader = new BufferedReader(new InputStreamReader(file.getContents(), StandardCharsets.UTF8));
            IFileStore store = EFS.getStore(file.getLocationURI());
            IFileInfo fileInfo = store.fetchInfo();
            fileLength = (int) fileInfo.getLength();
        } catch (CoreException e) {
            ErrorReporter.logExceptionStackTrace("Could not get the contents of `" + fileName + "'", e);
            return;
        }
    } else {
        ErrorReporter.INTERNAL_ERROR("CfgAnalyzer.directParse(): nothing to parse");
        return;
    }

    final CharStream charStream = new UnbufferedCharStream(reader);
    CfgLexer lexer = new CfgLexer(charStream);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    lexer.initRootInterval(fileLength);
    lexerListener = new TitanListener();
    lexer.removeErrorListeners(); // remove ConsoleErrorListener
    lexer.addErrorListener(lexerListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
    final CfgParser parser = new CfgParser(tokenStream);
    parser.setActualFile(file);
    //parser tree is built by default
    parserListener = new TitanListener();
    parser.removeErrorListeners(); // remove ConsoleErrorListener
    parser.addErrorListener(parserListener);
    final ParserRuleContext parseTreeRoot = parser.pr_ConfigFile();

    mCfgParseResult = parser.getCfgParseResult();
    // manually add the result parse tree, and its corresponding token stream,
    // because they logically belong to here
    mCfgParseResult.setParseTreeRoot(parseTreeRoot);
    mCfgParseResult.setTokens(tokenStream.getTokens());

    // fill handlers
    moduleParametersHandler = parser.getModuleParametersHandler();
    testportParametersHandler = parser.getTestportParametersHandler();
    componentSectionHandler = parser.getComponentSectionHandler();
    groupSectionHandler = parser.getGroupSectionHandler();
    mcSectionHandler = parser.getMcSectionHandler();
    externalCommandsSectionHandler = parser.getExternalCommandsSectionHandler();
    executeSectionHandler = parser.getExecuteSectionHandler();
    includeSectionHandler = parser.getIncludeSectionHandler();
    orderedIncludeSectionHandler = parser.getOrderedIncludeSectionHandler();
    defineSectionHandler = parser.getDefineSectionHandler();
    loggingSectionHandler = parser.getLoggingSectionHandler();

    rootInterval = lexer.getRootInterval();
}

From source file:org.eclipse.titan.designer.AST.TTCN3.definitions.Definition.java

License:Open Source License

private static ErroneousAttributeSpecification parseErrAttrSpecString(final AttributeSpecification aAttrSpec) {
    ErroneousAttributeSpecification returnValue = null;
    Location location = aAttrSpec.getLocation();
    String code = aAttrSpec.getSpecification();
    if (code == null) {
        return null;
    }//  w  w w  .  jav a2 s  . c o  m
    // code must be transformed, according to
    // compiler2/ttcn3/charstring_la.l
    code = Ttcn3CharstringLexer.parseCharstringValue(code, location); // TODO
    Reader reader = new StringReader(code);
    CharStream charStream = new UnbufferedCharStream(reader);
    Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    // needs to be shifted by one because of the \" of the string
    lexer.setCharPositionInLine(0);

    // lexer and parser listener
    TitanListener parserListener = new TitanListener();
    // remove ConsoleErrorListener
    lexer.removeErrorListeners();
    lexer.addErrorListener(parserListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);

    Ttcn3Reparser parser = new Ttcn3Reparser(tokenStream);
    IFile file = (IFile) location.getFile();
    parser.setActualFile(file);
    parser.setOffset(location.getOffset() + 1);
    parser.setLine(location.getLine());

    // remove ConsoleErrorListener
    parser.removeErrorListeners();
    parser.addErrorListener(parserListener);

    MarkerHandler.markMarkersForRemoval(GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER, location.getFile(),
            location.getOffset(), location.getEndOffset());

    returnValue = parser.pr_ErroneousAttributeSpec().errAttrSpec;
    List<SyntacticErrorStorage> errors = parser.getErrors();
    List<TITANMarker> warnings = parser.getWarnings();
    List<TITANMarker> unsupportedConstructs = parser.getUnsupportedConstructs();

    // add markers
    if (errors != null) {
        for (int i = 0; i < errors.size(); i++) {
            Location temp = new Location(location);
            temp.setOffset(temp.getOffset() + 1);
            ParserMarkerSupport.createOnTheFlySyntacticMarker(file, errors.get(i), IMarker.SEVERITY_ERROR,
                    temp);
        }
    }
    if (warnings != null) {
        for (TITANMarker marker : warnings) {
            if (file.isAccessible()) {
                Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
                loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(),
                        GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
            }
        }
    }
    if (unsupportedConstructs != null) {
        for (TITANMarker marker : unsupportedConstructs) {
            if (file.isAccessible()) {
                Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
                loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(),
                        GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
            }
        }
    }
    return returnValue;
}

From source file:org.eclipse.titan.designer.parsers.extensionattributeparser.ExtensionAttributeAnalyzer.java

License:Open Source License

public void parse(final AttributeSpecification specification) {
    ExtensionAttributeLexer lexer;/* w w w.  j  a  v a  2  s. c  o m*/
    Location location = specification.getLocation();

    StringReader reader = new StringReader(specification.getSpecification());
    CharStream charStream = new UnbufferedCharStream(reader);
    lexer = new ExtensionAttributeLexer(charStream);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    TitanListener lexerListener = new TitanListener();
    lexer.removeErrorListeners();
    lexer.addErrorListener(lexerListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);

    ExtensionAttributeParser parser = new ExtensionAttributeParser(tokenStream);
    parser.setBuildParseTree(false);

    TitanListener parserListener = new TitanListener();
    parser.removeErrorListeners();
    parser.addErrorListener(parserListener);

    parser.setActualFile((IFile) location.getFile());
    parser.setLine(location.getLine());
    parser.setOffset(location.getOffset() + 1);

    MarkerHandler.markMarkersForRemoval(GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER, location.getFile(),
            location.getOffset(), location.getEndOffset());

    attributes = null;
    attributes = parser.pr_ExtensionAttributeRoot().list;

    if (!lexerListener.getErrorsStored().isEmpty()) {
        String reportLevel = Platform.getPreferencesService().getString(ProductConstants.PRODUCT_ID_DESIGNER,
                PreferenceConstants.REPORTERRORSINEXTENSIONSYNTAX, GeneralConstants.WARNING, null);
        int errorLevel;
        if (GeneralConstants.ERROR.equals(reportLevel)) {
            errorLevel = IMarker.SEVERITY_ERROR;
        } else if (GeneralConstants.WARNING.equals(reportLevel)) {
            errorLevel = IMarker.SEVERITY_WARNING;
        } else {
            return;
        }
        for (int i = 0; i < lexerListener.getErrorsStored().size(); i++) {
            Location temp = new Location(location);
            temp.setOffset(temp.getOffset() + 1);
            ParserMarkerSupport.createOnTheFlyMixedMarker((IFile) location.getFile(),
                    lexerListener.getErrorsStored().get(i), errorLevel, temp);
        }
    }
    if (!parserListener.getErrorsStored().isEmpty()) {
        String reportLevel = Platform.getPreferencesService().getString(ProductConstants.PRODUCT_ID_DESIGNER,
                PreferenceConstants.REPORTERRORSINEXTENSIONSYNTAX, GeneralConstants.WARNING, null);
        int errorLevel;
        if (GeneralConstants.ERROR.equals(reportLevel)) {
            errorLevel = IMarker.SEVERITY_ERROR;
        } else if (GeneralConstants.WARNING.equals(reportLevel)) {
            errorLevel = IMarker.SEVERITY_WARNING;
        } else {
            return;
        }
        for (int i = 0; i < parserListener.getErrorsStored().size(); i++) {
            Location temp = new Location(location);
            temp.setOffset(temp.getOffset() + 1);
            ParserMarkerSupport.createOnTheFlyMixedMarker((IFile) location.getFile(),
                    parserListener.getErrorsStored().get(i), errorLevel, temp);
        }
    }
}

From source file:org.eclipse.titan.designer.parsers.ttcn3parser.ConditionalTransition.java

License:Open Source License

/**
 * Adds a new lexer to the lexer stack to read tokens from the included
 * file//from w ww .  ja  va2  s  .c o m
 * 
 * @param fileName
 *                the file name paramtere of the #include directive
 */
private void processIncludeDirective(PreprocessorDirective ppDirective) {
    if (ppDirective.str == null || "".equals(ppDirective.str)) {
        TITANMarker marker = new TITANMarker("File name was not provided", ppDirective.line, -1, -1,
                IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
        unsupportedConstructs.add(marker);
        return;
    }
    IFile includedFile = GlobalParser.getProjectSourceParser(actualFile.getProject())
            .getTTCN3IncludeFileByName(ppDirective.str);
    if (includedFile == null) {
        TITANMarker marker = new TITANMarker(
                MessageFormat.format("Included file `{0}'' could not be found", ppDirective.str),
                ppDirective.line, -1, -1, IMarker.SEVERITY_ERROR, IMarker.PRIORITY_NORMAL);
        unsupportedConstructs.add(marker);
        return;
    }
    // check extension
    if (!GlobalParser.TTCNIN_EXTENSION.equals(includedFile.getFileExtension())) {
        TITANMarker marker = new TITANMarker(
                MessageFormat.format("File `{0}'' does not have the `{1}'' extension", ppDirective.str,
                        GlobalParser.TTCNIN_EXTENSION),
                ppDirective.line, -1, -1, IMarker.SEVERITY_WARNING, IMarker.PRIORITY_NORMAL);
        warnings.add(marker);
    }
    // check if the file is already loaded into an editor
    String code = null;
    if (EditorTracker.containsKey(includedFile)) {
        List<ISemanticTITANEditor> editors = EditorTracker.getEditor(includedFile);
        ISemanticTITANEditor editor = editors.get(0);
        IDocument document = editor.getDocument();
        code = document.get();
    }
    // create lexer and set it up
    Reader reader = null;
    CharStream charStream = null;
    Ttcn3Lexer lexer = null;
    int rootInt;
    if (code != null) {
        reader = new StringReader(code);
        charStream = new UnbufferedCharStream(reader);
        lexer = new Ttcn3Lexer(charStream);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        rootInt = code.length();
    } else {
        try {
            InputStreamReader temp = new InputStreamReader(includedFile.getContents());
            if (!includedFile.getCharset().equals(temp.getEncoding())) {
                try {
                    temp.close();
                } catch (IOException e) {
                    ErrorReporter.logWarningExceptionStackTrace(e);
                }
                temp = new InputStreamReader(includedFile.getContents(), includedFile.getCharset());
            }

            reader = new BufferedReader(temp);
        } catch (CoreException e) {
            ErrorReporter.logExceptionStackTrace(e);
            return;
        } catch (UnsupportedEncodingException e) {
            ErrorReporter.logExceptionStackTrace(e);
            return;
        }
        charStream = new UnbufferedCharStream(reader);
        lexer = new Ttcn3Lexer(charStream);
        lexer.setTokenFactory(new CommonTokenFactory(true));
        lexerListener = new TitanListener();
        lexer.removeErrorListeners(); // remove ConsoleErrorListener
        lexer.addErrorListener(lexerListener);

        IFileStore store;
        try {
            store = EFS.getStore(includedFile.getLocationURI());
        } catch (CoreException e) {
            ErrorReporter.logExceptionStackTrace(e);
            return;
        }
        IFileInfo fileInfo = store.fetchInfo();
        rootInt = (int) fileInfo.getLength();
    }
    lexer.setTokenFactory(new CommonTokenFactory(true));
    lexer.setTTCNPP();
    lexer.initRootInterval(rootInt);
    lexer.setActualFile(includedFile);
    // add the lexer to the stack of lexers
    tokenStreamStack.push(new TokenStreamData(lexer, includedFile, reader));
    if (parser != null) {
        parser.setActualFile(includedFile);
        parser.setLexer(lexer);
    }
    includedFiles.add(includedFile);
}

From source file:org.eclipse.titan.designer.parsers.ttcn3parser.TTCN3Analyzer.java

License:Open Source License

/**
 * Parse TTCN-3 file using ANTLR v4/*w w  w.  j ava2 s .  co m*/
 * @param aReader file to parse (cannot be null, closes aReader)
 * @param aFileLength file length
 * @param aEclipseFile Eclipse dependent resource file
 */
private void parse(final Reader aReader, final int aFileLength, IFile aEclipseFile) {
    CharStream charStream = new UnbufferedCharStream(aReader);
    Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);

    lexer.setCommentTodo(true);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    lexer.initRootInterval(aFileLength);

    TitanListener lexerListener = new TitanListener();
    // remove ConsoleErrorListener
    lexer.removeErrorListeners();
    lexer.addErrorListener(lexerListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);

    Ttcn3Parser parser = new Ttcn3Parser(tokenStream);
    parser.setBuildParseTree(false);
    PreprocessedTokenStream preprocessor = null;

    if (aEclipseFile != null && GlobalParser.TTCNPP_EXTENSION.equals(aEclipseFile.getFileExtension())) {
        lexer.setTTCNPP();
        preprocessor = new PreprocessedTokenStream(lexer);
        preprocessor.setActualFile(aEclipseFile);
        if (aEclipseFile.getProject() != null) {
            preprocessor.setMacros(
                    PreprocessorSymbolsOptionsData.getTTCN3PreprocessorDefines(aEclipseFile.getProject()));
        }
        parser = new Ttcn3Parser(preprocessor);
        preprocessor.setActualLexer(lexer);
        preprocessor.setParser(parser);
    }

    if (aEclipseFile != null) {
        lexer.setActualFile(aEclipseFile);
        parser.setActualFile(aEclipseFile);
        parser.setProject(aEclipseFile.getProject());
    }

    parser.setLexer(lexer);
    // remove ConsoleErrorListener
    parser.removeErrorListeners();
    TitanListener parserListener = new TitanListener();
    parser.addErrorListener(parserListener);

    // This is added because of the following ANTLR 4 bug:
    // Memory Leak in PredictionContextCache #499
    // https://github.com/antlr/antlr4/issues/499
    DFA[] decisionToDFA = parser.getInterpreter().decisionToDFA;
    parser.setInterpreter(
            new ParserATNSimulator(parser, parser.getATN(), decisionToDFA, new PredictionContextCache()));

    //try SLL mode
    try {
        parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
        parser.pr_TTCN3File();
        warnings = parser.getWarnings();
        mErrorsStored = lexerListener.getErrorsStored();
        mErrorsStored.addAll(parserListener.getErrorsStored());
    } catch (RecognitionException e) {
        // quit
    }

    if (!warnings.isEmpty() || !mErrorsStored.isEmpty()) {
        //SLL mode might have failed, try LL mode
        try {
            CharStream charStream2 = new UnbufferedCharStream(aReader);
            lexer.setInputStream(charStream2);
            //lexer.reset();
            parser.reset();
            parserListener.reset();
            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
            parser.pr_TTCN3File();
            warnings = parser.getWarnings();
            mErrorsStored = lexerListener.getErrorsStored();
            mErrorsStored.addAll(parserListener.getErrorsStored());
        } catch (RecognitionException e) {

        }
    }

    unsupportedConstructs = parser.getUnsupportedConstructs();
    rootInterval = lexer.getRootInterval();
    actualTtc3Module = parser.getModule();
    if (preprocessor != null) {
        // if the file was preprocessed
        mErrorsStored.addAll(preprocessor.getErrorStorage());
        warnings.addAll(preprocessor.getWarnings());
        unsupportedConstructs.addAll(preprocessor.getUnsupportedConstructs());
        if (actualTtc3Module != null) {
            actualTtc3Module.setIncludedFiles(preprocessor.getIncludedFiles());
            actualTtc3Module.setInactiveCodeLocations(preprocessor.getInactiveCodeLocations());
        }
    }
    //TODO: empty mErrorsStored not to store errors from the previous parse round in case of exception

    try {
        aReader.close();
    } catch (IOException e) {
    }
}