Example usage for org.antlr.v4.runtime UnbufferedCharStream UnbufferedCharStream

List of usage examples for org.antlr.v4.runtime UnbufferedCharStream UnbufferedCharStream

Introduction

In this page you can find the example usage for org.antlr.v4.runtime UnbufferedCharStream UnbufferedCharStream.

Prototype

public UnbufferedCharStream(Reader input) 

Source Link

Usage

From source file:ai.grakn.graql.internal.parser.QueryParserImpl.java

License:Open Source License

/**
 * @param reader a reader representing several queries
 * @return a list of queries/*from ww  w  .ja  va  2  s  .  c  o m*/
 */
@Override
public <T extends Query<?>> Stream<T> parseList(Reader reader) {
    UnbufferedCharStream charStream = new UnbufferedCharStream(reader);
    GraqlErrorListener errorListener = GraqlErrorListener.withoutQueryString();
    GraqlLexer lexer = createLexer(charStream, errorListener);

    /*
    We tell the lexer to copy the text into each generated token.
    Normally when calling `Token#getText`, it will look into the underlying `TokenStream` and call
    `TokenStream#size` to check it is in-bounds. However, `UnbufferedTokenStream#size` is not supported
    (because then it would have to read the entire input). To avoid this issue, we set this flag which will
    copy over the text into each `Token`, s.t. that `Token#getText` will just look up the copied text field.
    */
    lexer.setTokenFactory(new CommonTokenFactory(true));

    // Use an unbuffered token stream so we can handle extremely large input strings
    UnbufferedTokenStream tokenStream = new UnbufferedTokenStream(ChannelTokenSource.of(lexer));

    GraqlParser parser = createParser(tokenStream, errorListener);

    /*
    The "bail" error strategy prevents us reading all the way to the end of the input, e.g.
            
    ```
    match $x isa person; insert $x has name "Bob"; match $x isa movie; get;
                                                   ^
    ```
            
    In this example, when ANTLR reaches the indicated `match`, it considers two possibilities:
            
    1. this is the end of the query
    2. the user has made a mistake. Maybe they accidentally pasted the `match` here.
            
    Because of case 2, ANTLR will parse beyond the `match` in order to produce a more helpful error message.
    This causes memory issues for very large queries, so we use the simpler "bail" strategy that will
    immediately stop when it hits `match`.
    */
    parser.setErrorHandler(new BailErrorStrategy());

    // This is a lazy iterator that will only consume a single query at a time, without parsing any further.
    // This means it can pass arbitrarily long streams of queries in constant memory!
    Iterable<T> queryIterator = () -> new AbstractIterator<T>() {
        @Nullable
        @Override
        protected T computeNext() {
            int latestToken = tokenStream.LA(1);
            if (latestToken == Token.EOF) {
                endOfData();
                return null;
            } else {
                // This will parse and consume a single query, even if it doesn't reach an EOF
                // When we next run it, it will start where it left off in the stream
                return (T) QUERY.parse(parser, errorListener);
            }
        }
    };

    return StreamSupport.stream(queryIterator.spliterator(), false);
}

From source file:edu.clemson.cs.rsrg.typeandpopulate.utilities.HardCoded.java

License:Open Source License

/**
 * <p>This method establishes all built-in relationships of the symbol table.</p>
 *
 * @param g The current type graph./*from www.j av  a  2 s  .  c o  m*/
 * @param b The current scope repository builder.
 */
public static void addBuiltInRelationships(TypeGraph g, MathSymbolTableBuilder b) {
    try {
        // YS: Everything we build here could be written in a file called Cls_Theory,
        // but since right now the whole math theory files and type system is still
        // being hashed out, we hard code these. At some point someone should revisit
        // this and see if it can be moved to a physical file.
        Location classTheoryLoc = new Location(
                new ResolveFile(new ResolveFileBasicInfo("Cls_Theory", ""), ModuleType.THEORY,
                        new UnbufferedCharStream(new StringReader("")), null, new ArrayList<String>(), ""),
                0, 0);
        ModuleDec module = new PrecisModuleDec(classTheoryLoc.clone(),
                new PosSymbol(classTheoryLoc.clone(), "Cls_Theory"), new ArrayList<ModuleParameterDec>(),
                new ArrayList<UsesItem>(), new ArrayList<Dec>(),
                new LinkedHashMap<ResolveFileBasicInfo, Boolean>());
        ScopeBuilder s = b.startModuleScope(module);

        // Since adding a binding require something of ResolveConceptualElement,
        // we associate everything with a VarExp.
        VarExp v = new VarExp(classTheoryLoc.clone(), null,
                new PosSymbol(classTheoryLoc.clone(), "Cls_Theory"));

        // Built-in functions
        s.addBinding("Instance_Of", v, new MTFunction(g, g.BOOLEAN, g.CLS, g.ENTITY));
        s.addBinding("Powerclass", v, g.POWERCLASS);
        s.addBinding("union", v, g.UNION);
        s.addBinding("intersection", v, g.INTERSECT);
        s.addBinding("->", v, g.CLS_FUNCTION);
        s.addBinding("*", v, g.CLS_CROSS);

        // This is just a hard-coded version of this theoretical type theorem
        // that can't actually appear in a theory because it won't type-check
        // (it requires itself to typecheck):

        //Type Theorem Function_Subtypes:
        //   For all D1, R1 : Cls,
        //   For all D2 : Powerclass(D1),
        //   For all R2 : Powerclass(R1),
        //   For all f : D2 -> R2,
        //       f : D1 -> R1;
        VarExp typeTheorem1 = new VarExp(classTheoryLoc.clone(), null,
                new PosSymbol(classTheoryLoc.clone(), "Function_Subtypes"));
        ScopeBuilder typeTheorem1Scope = b.startScope(typeTheorem1);

        // Various binding
        typeTheorem1Scope.addBinding("D1", Quantification.UNIVERSAL, v, g.CLS);
        typeTheorem1Scope.addBinding("R1", Quantification.UNIVERSAL, v, g.CLS);
        typeTheorem1Scope.addBinding("D2", Quantification.UNIVERSAL, v,
                new MTPowerclassApplication(g, new MTNamed(g, "D1")));
        typeTheorem1Scope.addBinding("R2", Quantification.UNIVERSAL, v,
                new MTPowerclassApplication(g, new MTNamed(g, "R1")));
        typeTheorem1Scope.addBinding("f", Quantification.UNIVERSAL, v,
                new MTFunction(g, new MTNamed(g, "R2"), new MTNamed(g, "D2")));

        // VarExp refering to function f
        VarExp f = new VarExp(classTheoryLoc.clone(), null, new PosSymbol(classTheoryLoc.clone(), "f"),
                Quantification.UNIVERSAL);
        f.setMathType(new MTFunction(g, new MTNamed(g, "R2"), new MTNamed(g, "D2")));

        // Add relationship and close typeTheorem21 scope
        g.addRelationship(f, new MTFunction(g, new MTNamed(g, "R1"), new MTNamed(g, "D1")), null,
                typeTheorem1Scope);
        b.endScope();

        //Type Theorem Card_Prod_Thingy:
        //   For all T1, T2 : Cls,
        //   For all R1 : Powerclass(T1),
        //   For all R2 : Powerclass(T2),
        //   For all r1 : R1,
        //   For all r2 : R2,
        //       (r1, r2) : (T1 * T2);
        VarExp typeTheorem2 = new VarExp(classTheoryLoc.clone(), null,
                new PosSymbol(classTheoryLoc.clone(), "Card_Prod_Thingy"));
        ScopeBuilder typeTheorem2Scope = b.startScope(typeTheorem2);

        // Various binding
        typeTheorem2Scope.addBinding("T1", Quantification.UNIVERSAL, v, g.CLS);
        typeTheorem2Scope.addBinding("T2", Quantification.UNIVERSAL, v, g.CLS);
        typeTheorem2Scope.addBinding("R1", Quantification.UNIVERSAL, v,
                new MTPowerclassApplication(g, new MTNamed(g, "T1")));
        typeTheorem2Scope.addBinding("R2", Quantification.UNIVERSAL, v,
                new MTPowerclassApplication(g, new MTNamed(g, "T2")));
        typeTheorem2Scope.addBinding("r1", Quantification.UNIVERSAL, v, new MTNamed(g, "R1"));
        typeTheorem2Scope.addBinding("r2", Quantification.UNIVERSAL, v, new MTNamed(g, "R2"));

        // Binding type
        List<MTCartesian.Element> bindingTypeElements = new LinkedList<>();
        bindingTypeElements.add(new MTCartesian.Element(new MTNamed(g, "T1")));
        bindingTypeElements.add(new MTCartesian.Element(new MTNamed(g, "T2")));
        MTCartesian bindingType = new MTCartesian(g, bindingTypeElements);

        // Fields inside the tuple
        List<Exp> tupleExps = new LinkedList<>();
        VarExp r1 = new VarExp(classTheoryLoc.clone(), null, new PosSymbol(classTheoryLoc.clone(), "r1"),
                Quantification.UNIVERSAL);
        r1.setMathType(new MTNamed(g, "R1"));
        tupleExps.add(r1);

        VarExp r2 = new VarExp(classTheoryLoc.clone(), null, new PosSymbol(classTheoryLoc.clone(), "r2"),
                Quantification.UNIVERSAL);
        r2.setMathType(new MTNamed(g, "R2"));
        tupleExps.add(r2);

        // Create the tuple and create it's type
        TupleExp tupleExp = new TupleExp(classTheoryLoc.clone(), tupleExps);
        List<MTCartesian.Element> fieldTypes = new LinkedList<>();
        fieldTypes.add(new MTCartesian.Element(new MTNamed(g, "R1")));
        fieldTypes.add(new MTCartesian.Element(new MTNamed(g, "R2")));
        MTCartesian tupleType = new MTCartesian(g, fieldTypes);
        tupleExp.setMathType(tupleType);

        // Add relationship and close typeTheorem2 scope
        g.addRelationship(tupleExp, bindingType, null, typeTheorem2Scope);
        b.endScope();

        // Close module scope
        b.endScope();
    } catch (DuplicateSymbolException dse) {
        //Not possible--we're the first ones to add anything
        throw new RuntimeException(dse);
    }
}

From source file:edu.clemson.cs.rsrg.typeandpopulate.utilities.HardCoded.java

License:Open Source License

/**
 * <p>This method establishes all built-in symbols of the symbol table.</p>
 *
 * @param g The current type graph./*from   w ww  .  j  a  v  a 2s .c om*/
 * @param b The current scope repository builder.
 */
public static void addBuiltInSymbols(TypeGraph g, ScopeBuilder b) {
    try {
        // YS: Everything we build here lives in a global namespace. This means
        // that we don't need to import anything to access any of these symbols.
        // Since adding a binding require something of ResolveConceptualElement,
        // we associate everything with a VarExp.
        Location globalSpaceLoc = new Location(
                new ResolveFile(new ResolveFileBasicInfo("Global", ""), ModuleType.THEORY,
                        new UnbufferedCharStream(new StringReader("")), null, new ArrayList<String>(), ""),
                0, 0);
        VarExp v = new VarExp(globalSpaceLoc, null, new PosSymbol(globalSpaceLoc, "Global"));

        // built-in symbols
        b.addBinding("Entity", v, g.CLS, g.ENTITY);
        b.addBinding("Element", v, g.CLS, g.ELEMENT);
        b.addBinding("Cls", v, g.CLS, g.CLS);
        b.addBinding("SSet", v, g.CLS, g.SSET);
        b.addBinding("B", v, g.SSET, g.BOOLEAN);
        b.addBinding("Empty_Class", v, g.CLS, g.EMPTY_CLASS);
        b.addBinding("Empty_Set", v, g.SSET, g.EMPTY_SET);
        b.addBinding("true", v, g.BOOLEAN);
        b.addBinding("false", v, g.BOOLEAN);

        // built-in symbols that are defined as a function
        // These must be built in for our compiler to function correctly.
        b.addBinding("Powerset", v, g.POWERSET);
        b.addBinding("->", v, g.SSET_FUNCTION);
        b.addBinding("*", v, g.SSET_CROSS);
        b.addBinding("not", v, new MTFunction(g, g.BOOLEAN, g.BOOLEAN));
        b.addBinding("and", v, new MTFunction(g, g.BOOLEAN, g.BOOLEAN, g.BOOLEAN));
        b.addBinding("or", v, new MTFunction(g, g.BOOLEAN, g.BOOLEAN, g.BOOLEAN));
        b.addBinding("=", v, new MTFunction(g, g.BOOLEAN, g.ENTITY, g.ENTITY));
        b.addBinding("/=", v, new MTFunction(g, g.BOOLEAN, g.ENTITY, g.ENTITY));
    } catch (DuplicateSymbolException dse) {
        //Not possible--we're the first ones to add anything
        throw new RuntimeException(dse);
    }
}

From source file:io.mindmaps.graql.internal.parser.QueryParser.java

License:Open Source License

/**
 * @param inputStream a stream representing a list of patterns
 * @return a stream of patterns/*  w  w w.  j  av  a2 s . c  o m*/
 */
public Stream<Pattern> parsePatterns(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:io.mindmaps.graql.QueryParser.java

License:Open Source License

public Stream<Pattern> parsePatternsStream(InputStream inputStream) {
    GraqlLexer lexer = new GraqlLexer(new UnbufferedCharStream(inputStream));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    UnbufferedTokenStream tokens = new UnbufferedTokenStream(lexer);

    // Create an iterable that will keep parsing until EOF
    Iterable<Pattern> iterable = () -> new Iterator<Pattern>() {

        private Pattern pattern = null;

        private Optional<Pattern> getNext() {

            if (pattern == null) {
                if (tokens.get(tokens.index()).getType() == Token.EOF) {
                    return Optional.empty();
                }/*w  ww . j  a v  a  2s .  c om*/

                pattern = parseQueryFragment(GraqlParser::patternSep, QueryVisitor::visitPatternSep, tokens);
            }
            return Optional.of(pattern);
        }

        @Override
        public boolean hasNext() {
            return getNext().isPresent();
        }

        @Override
        public Pattern next() {
            Optional<Pattern> result = getNext();
            pattern = null;
            return result.orElseThrow(NoSuchElementException::new);
        }
    };

    return StreamSupport.stream(iterable.spliterator(), false);
}

From source file:org.chocosolver.parser.flatzinc.Flatzinc.java

License:Open Source License

public void parse(Model target, Datas data, InputStream is) {
    CharStream input = new UnbufferedCharStream(is);
    Flatzinc4Lexer lexer = new Flatzinc4Lexer(input);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    TokenStream tokens = new UnbufferedTokenStream<CommonToken>(lexer);
    Flatzinc4Parser parser = new Flatzinc4Parser(tokens);
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.setBuildParseTree(false);//from   ww w.jav  a  2 s . co  m
    parser.setTrimParseTree(false);
    parser.flatzinc_model(target, data, all, free);
}

From source file:org.eclipse.titan.common.parsers.cfg.CfgAnalyzer.java

License:Open Source License

/**
 * Parses the provided elements.//from   ww w  .j a v a  2  s  .  c  om
 * If the contents of an editor are to be parsed, than the file parameter is only used to report the errors to.
 * 
 * @param file the file to parse
 * @param fileName the name of the file, to refer to.
 * @param code the contents of an editor, or null.
 */
public void directParse(final IFile file, final String fileName, final String code) {
    final Reader reader;
    final int fileLength;
    if (null != code) {
        reader = new StringReader(code);
        fileLength = code.length();
    } else if (null != file) {
        try {
            reader = new BufferedReader(new InputStreamReader(file.getContents(), StandardCharsets.UTF8));
            IFileStore store = EFS.getStore(file.getLocationURI());
            IFileInfo fileInfo = store.fetchInfo();
            fileLength = (int) fileInfo.getLength();
        } catch (CoreException e) {
            ErrorReporter.logExceptionStackTrace("Could not get the contents of `" + fileName + "'", e);
            return;
        }
    } else {
        ErrorReporter.INTERNAL_ERROR("CfgAnalyzer.directParse(): nothing to parse");
        return;
    }

    final CharStream charStream = new UnbufferedCharStream(reader);
    CfgLexer lexer = new CfgLexer(charStream);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    lexer.initRootInterval(fileLength);
    lexerListener = new TitanListener();
    lexer.removeErrorListeners(); // remove ConsoleErrorListener
    lexer.addErrorListener(lexerListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);
    final CfgParser parser = new CfgParser(tokenStream);
    parser.setActualFile(file);
    //parser tree is built by default
    parserListener = new TitanListener();
    parser.removeErrorListeners(); // remove ConsoleErrorListener
    parser.addErrorListener(parserListener);
    final ParserRuleContext parseTreeRoot = parser.pr_ConfigFile();

    mCfgParseResult = parser.getCfgParseResult();
    // manually add the result parse tree, and its corresponding token stream,
    // because they logically belong to here
    mCfgParseResult.setParseTreeRoot(parseTreeRoot);
    mCfgParseResult.setTokens(tokenStream.getTokens());

    // fill handlers
    moduleParametersHandler = parser.getModuleParametersHandler();
    testportParametersHandler = parser.getTestportParametersHandler();
    componentSectionHandler = parser.getComponentSectionHandler();
    groupSectionHandler = parser.getGroupSectionHandler();
    mcSectionHandler = parser.getMcSectionHandler();
    externalCommandsSectionHandler = parser.getExternalCommandsSectionHandler();
    executeSectionHandler = parser.getExecuteSectionHandler();
    includeSectionHandler = parser.getIncludeSectionHandler();
    orderedIncludeSectionHandler = parser.getOrderedIncludeSectionHandler();
    defineSectionHandler = parser.getDefineSectionHandler();
    loggingSectionHandler = parser.getLoggingSectionHandler();

    rootInterval = lexer.getRootInterval();
}

From source file:org.eclipse.titan.designer.AST.ASN1.definitions.SpecialASN1Module.java

License:Open Source License

/**
 * Parses the special internal assignments to build their semantic
 * representation.//from   www. ja v a 2 s . c o m
 * 
 * @param inputCode
 *                the code to parse.
 * @param identifier
 *                the identifier for the assignment to be created.
 * 
 * @return the parsed assignment.
 */
public static ASN1Assignment parseSpecialInternalAssignment(final String inputCode,
        final Identifier identifier) {
    ASN1Assignment assignment = null;
    StringReader reader = new StringReader(inputCode);
    CharStream charStream = new UnbufferedCharStream(reader);
    Asn1Lexer lexer = new Asn1Lexer(charStream);
    lexer.setTokenFactory(new TokenWithIndexAndSubTokensFactory(true));

    ASN1Listener lexerListener = new ASN1Listener();
    lexer.removeErrorListeners(); // remove ConsoleErrorListener
    lexer.addErrorListener(lexerListener);
    ModuleLevelTokenStreamTracker tracker = new ModuleLevelTokenStreamTracker(lexer);
    tracker.discard(Asn1Lexer.WS);
    tracker.discard(Asn1Lexer.MULTILINECOMMENT);
    tracker.discard(Asn1Lexer.SINGLELINECOMMENT);
    Asn1Parser parser = new Asn1Parser(tracker);
    parser.setBuildParseTree(false);
    ASN1Listener parserListener = new ASN1Listener(parser);
    parser.removeErrorListeners(); // remove ConsoleErrorListener
    parser.addErrorListener(parserListener);
    assignment = parser.pr_TITAN_special_Assignment(identifier).assignment;
    if (!parser.getErrorStorage().isEmpty()) {
        ErrorReporter.INTERNAL_ERROR(PARSINGFAILED);
        for (SyntacticErrorStorage temp : parser.getErrorStorage()) {
            ErrorReporter.logError(temp.message);
        }
    }
    return assignment;
}

From source file:org.eclipse.titan.designer.AST.TTCN3.definitions.Definition.java

License:Open Source License

private static ErroneousAttributeSpecification parseErrAttrSpecString(final AttributeSpecification aAttrSpec) {
    ErroneousAttributeSpecification returnValue = null;
    Location location = aAttrSpec.getLocation();
    String code = aAttrSpec.getSpecification();
    if (code == null) {
        return null;
    }//from   w  w w.j  a va  2  s . co m
    // code must be transformed, according to
    // compiler2/ttcn3/charstring_la.l
    code = Ttcn3CharstringLexer.parseCharstringValue(code, location); // TODO
    Reader reader = new StringReader(code);
    CharStream charStream = new UnbufferedCharStream(reader);
    Ttcn3Lexer lexer = new Ttcn3Lexer(charStream);
    lexer.setTokenFactory(new CommonTokenFactory(true));
    // needs to be shifted by one because of the \" of the string
    lexer.setCharPositionInLine(0);

    // lexer and parser listener
    TitanListener parserListener = new TitanListener();
    // remove ConsoleErrorListener
    lexer.removeErrorListeners();
    lexer.addErrorListener(parserListener);

    // 1. Previously it was UnbufferedTokenStream(lexer), but it was changed to BufferedTokenStream, because UnbufferedTokenStream seems to be unusable. It is an ANTLR 4 bug.
    // Read this: https://groups.google.com/forum/#!topic/antlr-discussion/gsAu-6d3pKU
    // pr_PatternChunk[StringBuilder builder, boolean[] uni]:
    //   $builder.append($v.text); <-- exception is thrown here: java.lang.UnsupportedOperationException: interval 85..85 not in token buffer window: 86..341
    // 2. Changed from BufferedTokenStream to CommonTokenStream, otherwise tokens with "-> channel(HIDDEN)" are not filtered out in lexer.
    final CommonTokenStream tokenStream = new CommonTokenStream(lexer);

    Ttcn3Reparser parser = new Ttcn3Reparser(tokenStream);
    IFile file = (IFile) location.getFile();
    parser.setActualFile(file);
    parser.setOffset(location.getOffset() + 1);
    parser.setLine(location.getLine());

    // remove ConsoleErrorListener
    parser.removeErrorListeners();
    parser.addErrorListener(parserListener);

    MarkerHandler.markMarkersForRemoval(GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER, location.getFile(),
            location.getOffset(), location.getEndOffset());

    returnValue = parser.pr_ErroneousAttributeSpec().errAttrSpec;
    List<SyntacticErrorStorage> errors = parser.getErrors();
    List<TITANMarker> warnings = parser.getWarnings();
    List<TITANMarker> unsupportedConstructs = parser.getUnsupportedConstructs();

    // add markers
    if (errors != null) {
        for (int i = 0; i < errors.size(); i++) {
            Location temp = new Location(location);
            temp.setOffset(temp.getOffset() + 1);
            ParserMarkerSupport.createOnTheFlySyntacticMarker(file, errors.get(i), IMarker.SEVERITY_ERROR,
                    temp);
        }
    }
    if (warnings != null) {
        for (TITANMarker marker : warnings) {
            if (file.isAccessible()) {
                Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
                loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(),
                        GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
            }
        }
    }
    if (unsupportedConstructs != null) {
        for (TITANMarker marker : unsupportedConstructs) {
            if (file.isAccessible()) {
                Location loc = new Location(file, marker.getLine(), marker.getOffset(), marker.getEndOffset());
                loc.reportExternalProblem(marker.getMessage(), marker.getSeverity(),
                        GeneralConstants.ONTHEFLY_SYNTACTIC_MARKER);
            }
        }
    }
    return returnValue;
}

From source file:org.eclipse.titan.designer.editors.asn1editor.ASN1ReferenceParser.java

License:Open Source License

private Reference parseReference(final IFile file, final String input, final int line, final int offset) {
    Reference reference = null;/*from  www  .  ja v  a  2  s .c o m*/
    StringReader reader = new StringReader(input);
    CharStream charStream = new UnbufferedCharStream(reader);
    Asn1Lexer lexer = new Asn1Lexer(charStream);
    lexer.setTokenFactory(new TokenWithIndexAndSubTokensFactory(true));
    ASN1Listener lexerListener = new ASN1Listener();
    lexer.removeErrorListeners(); // remove ConsoleErrorListener
    lexer.addErrorListener(lexerListener);
    ModuleLevelTokenStreamTracker tracker = new ModuleLevelTokenStreamTracker(lexer);
    tracker.discard(Asn1Lexer.WS);
    tracker.discard(Asn1Lexer.MULTILINECOMMENT);
    tracker.discard(Asn1Lexer.SINGLELINECOMMENT);
    Asn1Parser parser = new Asn1Parser(tracker);
    parser.setProject(file.getProject());
    parser.setActualFile(file);
    parser.setLine(line);
    parser.setOffset(offset);
    parser.setBuildParseTree(false);
    ASN1Listener parserListener = new ASN1Listener();
    parser.removeErrorListeners(); // remove ConsoleErrorListener
    parser.addErrorListener(parserListener);
    reference = parser.pr_parseReference().reference;
    return reference;
}