Example usage for edu.stanford.nlp.trees GrammaticalStructure typedDependencies

List of usage examples for edu.stanford.nlp.trees GrammaticalStructure typedDependencies

Introduction

In this page you can find the example usage for edu.stanford.nlp.trees GrammaticalStructure typedDependencies.

Prototype

List typedDependencies

To view the source code for edu.stanford.nlp.trees GrammaticalStructure typedDependencies.

Click Source Link

Usage

From source file:ConstituencyParse.java

License:Apache License

public int[] depTreeParents(Tree tree, List<HasWord> tokens) {
    GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
    Collection<TypedDependency> tdl = gs.typedDependencies();
    int len = tokens.size();
    int[] parents = new int[len];
    for (int i = 0; i < len; i++) {
        // if a node has a parent of -1 at the end of parsing, then the node
        // has no parent.
        parents[i] = -1;/*from w ww .  ja va  2 s  .c  om*/
    }

    for (TypedDependency td : tdl) {
        // let root have index 0
        int child = td.dep().index();
        int parent = td.gov().index();
        parents[child - 1] = parent;
    }

    return parents;
}

From source file:de.tudarmstadt.ukp.dkpro.core.stanfordnlp.StanfordDependencyConverter.java

License:Open Source License

protected void doCreateDependencyTags(JCas aJCas, TreebankLanguagePack aLP, Tree parseTree,
        List<Token> tokens) {
    GrammaticalStructure gs;
    try {// w ww  .ja v  a 2s.  c  o  m
        gs = aLP.grammaticalStructureFactory(aLP.punctuationWordRejectFilter(), aLP.typedDependencyHeadFinder())
                .newGrammaticalStructure(parseTree);
    } catch (UnsupportedOperationException e) {
        // We already warned in the model provider if dependencies are not supported, so here
        // we just do nothing and skip the dependencies.
        return;
    }

    Collection<TypedDependency> dependencies = null;
    switch (mode) {
    case BASIC:
        dependencies = gs.typedDependencies(); // gs.typedDependencies(false);
        break;
    case NON_COLLAPSED:
        dependencies = gs.allTypedDependencies(); // gs.typedDependencies(true);
        break;
    case COLLAPSED_WITH_EXTRA:
        dependencies = gs.typedDependenciesCollapsed(true);
        break;
    case COLLAPSED:
        dependencies = gs.typedDependenciesCollapsed(false);
        break;
    case CC_PROPAGATED:
        dependencies = gs.typedDependenciesCCprocessed(true);
        break;
    case CC_PROPAGATED_NO_EXTRA:
        dependencies = gs.typedDependenciesCCprocessed(false);
        break;
    case TREE:
        dependencies = gs.typedDependenciesCollapsedTree();
        break;
    }

    for (TypedDependency currTypedDep : dependencies) {
        int govIndex = currTypedDep.gov().index();
        int depIndex = currTypedDep.dep().index();
        if (govIndex != 0) {
            // Stanford CoreNLP produces a dependency relation between a verb and ROOT-0 which
            // is not token at all!
            Token govToken = tokens.get(govIndex - 1);
            Token depToken = tokens.get(depIndex - 1);

            StanfordAnnotator.createDependencyAnnotation(aJCas, currTypedDep.reln(), govToken, depToken);
        }
    }
}

From source file:de.tudarmstadt.ukp.dkpro.core.stanfordnlp.StanfordParser.java

License:Open Source License

protected void doCreateDependencyTags(ParserGrammar aParser, StanfordAnnotator sfAnnotator, Tree parseTree,
        List<Token> tokens) {
    GrammaticalStructure gs;
    try {/*from   ww  w  .j  ava  2  s  .  c o  m*/
        TreebankLanguagePack tlp = aParser.getTLPParams().treebankLanguagePack();
        gs = tlp.grammaticalStructureFactory(tlp.punctuationWordRejectFilter(), tlp.typedDependencyHeadFinder())
                .newGrammaticalStructure(parseTree);
    } catch (UnsupportedOperationException e) {
        // We already warned in the model provider if dependencies are not supported, so here
        // we just do nothing and skip the dependencies.
        return;
    }

    Collection<TypedDependency> dependencies = null;
    switch (mode) {
    case BASIC:
        dependencies = gs.typedDependencies(); // gs.typedDependencies(false);
        break;
    case NON_COLLAPSED:
        dependencies = gs.allTypedDependencies(); // gs.typedDependencies(true);
        break;
    case COLLAPSED_WITH_EXTRA:
        dependencies = gs.typedDependenciesCollapsed(true);
        break;
    case COLLAPSED:
        dependencies = gs.typedDependenciesCollapsed(false);
        break;
    case CC_PROPAGATED:
        dependencies = gs.typedDependenciesCCprocessed(true);
        break;
    case CC_PROPAGATED_NO_EXTRA:
        dependencies = gs.typedDependenciesCCprocessed(false);
        break;
    case TREE:
        dependencies = gs.typedDependenciesCollapsedTree();
        break;
    }

    for (TypedDependency currTypedDep : dependencies) {
        int govIndex = currTypedDep.gov().index();
        int depIndex = currTypedDep.dep().index();
        if (govIndex != 0) {
            // Stanford CoreNLP produces a dependency relation between a verb and ROOT-0 which
            // is not token at all!
            Token govToken = tokens.get(govIndex - 1);
            Token depToken = tokens.get(depIndex - 1);

            sfAnnotator.createDependencyAnnotation(currTypedDep.reln(), govToken, depToken);
        }
    }
}

From source file:edu.albany.cubism.util.StanfordChineseParser.java

public ParserParts parse(String sentence) {
    try {/*www  .j av a2  s.  c o m*/
        System.out.println("--------- Start Parsing ---------");
        //         FileInputStream fis = new FileInputStream(filepath);
        //         DataInputStream dis = new DataInputStream(fis);
        //         BufferedReader br = new BufferedReader(new InputStreamReader(fis));

        // prepare parser, tokenizer and tree printer

        System.out.println("Parse sentence: " + sentence);
        if (sentence.trim().length() == 0) {
            ParserParts newParserPartsNode = new ParserParts("", "");
            return newParserPartsNode;
        }
        List tokens = tf.getTokenizer(new StringReader(sentence)).tokenize();
        //lp.parse(tokens);
        //t = lp.getBestParse();
        t = (Tree) lp.apply(tokens);
        System.out.println("penn Structure: ");
        t.pennPrint();
        GrammaticalStructure gs = gsf.newGrammaticalStructure(t);
        //tdl = gs.typedDependenciesCollapsed();
        tdl = gs.typedDependencies();
        System.out.println("Relationship: " + tdl);
        System.out.println("--------- End Parsing --------- " + count);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        System.out.println("count is while exception: " + sentence);
        e.printStackTrace();
    }
    ParserParts newParserPartsNode = new ParserParts(tdl.toString(), t.toString());
    return newParserPartsNode;
}

From source file:edu.nus.comp.nlp.stanford.UtilParser.java

License:Open Source License

public static Tree getDepTree(Tree parseTree) {
    //Dependencies, punctuations included
    GrammaticalStructure gs = new EnglishGrammaticalStructure(parseTree,
            new PennTreebankLanguagePack().punctuationWordAcceptFilter());
    Collection<TypedDependency> tdl = gs.typedDependencies(); // typedDependenciesCollapsed() eats the prepositions, etc
    gs = new EnglishGrammaticalStructure(parseTree);
    tdl.addAll(gs.typedDependencies());/*from w ww  .  j av a2 s .  com*/
    Tree depTree = makeTreeRobust(tdl);
    return depTree;
}

From source file:edu.umn.biomedicus.gpl.stanford.parser.StanfordDependencyParserModel.java

License:Open Source License

public String parseSentence(List<ParseToken> tokens, List<PosTag> posTags) {
    GrammaticalStructure structure = parseToGrammaticalStructure(tokens, posTags);
    return structure.typedDependencies().toString();
}

From source file:Engines.Test.StanfordParser.TreeHandling.java

License:Open Source License

public static Collection<TypedDependency> getDeps(Tree parseTree) {
    //Dependencies, punctuations included 
    GrammaticalStructure gs = new EnglishGrammaticalStructure(parseTree,
            new PennTreebankLanguagePack().punctuationWordAcceptFilter());
    Collection<TypedDependency> tdl = gs.typedDependencies(); // typedDependenciesCollapsed() eats the prepositions, etc 
    gs = new EnglishGrammaticalStructure(parseTree);
    tdl.addAll(gs.typedDependencies());//from w w w.  j a v a  2 s.c  o  m
    return tdl;
}

From source file:gate.stanford.DependencyMode.java

License:Open Source License

protected static Collection<TypedDependency> getDependencies(GrammaticalStructure gs, DependencyMode mode,
        boolean includeExtras) {
    Collection<TypedDependency> result = null;

    if (mode.equals(Typed)) {
        result = gs.typedDependencies(includeExtras);
    } else if (mode.equals(AllTyped)) {
        result = gs.allTypedDependencies();
    } else if (mode.equals(TypedCollapsed)) {
        result = gs.typedDependenciesCollapsed(includeExtras);
    } else if (mode.equals(TypedCCprocessed)) {
        result = gs.typedDependenciesCCprocessed(includeExtras);
    }/*from  w  w  w  . j  ava 2s. c  o m*/

    return result;
}

From source file:gov.llnl.ontology.text.parse.StanfordParser.java

License:Open Source License

private List<SimpleDependencyTreeNode> parseTokens(String header, List<HasWord> sentence) {
    List<SimpleDependencyTreeNode> nodes = Lists.newArrayList();

    // Parse the sentence.  If the sentence has no tokens or the
    // parser fails, simply return an empty string.
    if (sentence.size() == 0 || sentence.size() > 100 || !parser.parse(sentence))
        return nodes;

    // Get the parse tree and tagged words for the sentence.
    Tree tree = parser.getBestParse();/*from   w  ww.j  a va2  s.co  m*/
    List<TaggedWord> taggedSent = tree.taggedYield();

    // Convert the tree to a collection of dependency links.
    GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
    Collection<TypedDependency> dep = gs.typedDependencies();

    List<Link> links = Lists.newArrayList();

    for (TypedDependency dependency : dep) {
        int nodeIndex = dependency.dep().index();
        int parentIndex = dependency.gov().index();
        String relation = dependency.reln().toString();
        String token = taggedSent.get(nodeIndex - 1).word();
        String pos = taggedSent.get(nodeIndex - 1).tag();

        nodes.add(new SimpleDependencyTreeNode(token, pos, nodeIndex));
        links.add(new Link(nodeIndex, relation, parentIndex));
    }

    Link.addLinksToTree(nodes, links);
    return nodes;
}

From source file:ipgraph.datastructure.DTree.java

License:Open Source License

/** **************************************************************
 * Build dependency-tree from a plain sentence
 *//*from   w  w  w  . ja  va2s.com*/
public static DTree buildTree(String s) {
    StanfordPCFGParser pcfgParser = new StanfordPCFGParser("", false);
    Tree tree = pcfgParser.getLexicalizedParser().parse(s);

    SemanticHeadFinder headFinder = new SemanticHeadFinder(false); // keep copula verbs as head
    GrammaticalStructure egs = new EnglishGrammaticalStructure(tree, string -> true, headFinder, true);

    // notes: typedDependencies() is suggested
    String conllx = EnglishGrammaticalStructure.dependenciesToString(egs, egs.typedDependencies(), tree, true,
            true);

    DTree dtree = LangTools.getDTreeFromCoNLLXString(conllx, true);
    return dtree;
}