Example usage for edu.stanford.nlp.trees GrammaticalStructure typedDependenciesCCprocessed

List of usage examples for edu.stanford.nlp.trees GrammaticalStructure typedDependenciesCCprocessed

Introduction

In this page you can find the example usage for edu.stanford.nlp.trees GrammaticalStructure typedDependenciesCCprocessed.

Prototype

@Deprecated
public List<TypedDependency> typedDependenciesCCprocessed(boolean includeExtras) 

Source Link

Usage

From source file:csav2.pkg0.ParserTagging.java

public String dependency(String text) {
    String output = "";
    Annotation document = new Annotation(text);
    try {/*from   www  . ja v a 2  s.  c  o  m*/
        pipeline.annotate(document);
    } catch (Exception e) {
        System.out.println("Exception while calling method annotate(Method dependency):" + e);
    }
    List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
    for (CoreMap sentence : sentences) {
        TokenizerFactory tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
        List wordList = tokenizerFactory.getTokenizer(new StringReader(sentence.toString())).tokenize();
        //System.out.println(wordList.toString());
        Tree tree = lp.apply(wordList);
        GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
        Collection cc = gs.typedDependenciesCCprocessed(true);
        output += cc.toString() + "\n";
    }
    //System.out.println(output);
    return output;
}

From source file:de.tudarmstadt.ukp.dkpro.core.stanfordnlp.StanfordDependencyConverter.java

License:Open Source License

protected void doCreateDependencyTags(JCas aJCas, TreebankLanguagePack aLP, Tree parseTree,
        List<Token> tokens) {
    GrammaticalStructure gs;
    try {//w  ww.  j  a  va2  s  . c o m
        gs = aLP.grammaticalStructureFactory(aLP.punctuationWordRejectFilter(), aLP.typedDependencyHeadFinder())
                .newGrammaticalStructure(parseTree);
    } catch (UnsupportedOperationException e) {
        // We already warned in the model provider if dependencies are not supported, so here
        // we just do nothing and skip the dependencies.
        return;
    }

    Collection<TypedDependency> dependencies = null;
    switch (mode) {
    case BASIC:
        dependencies = gs.typedDependencies(); // gs.typedDependencies(false);
        break;
    case NON_COLLAPSED:
        dependencies = gs.allTypedDependencies(); // gs.typedDependencies(true);
        break;
    case COLLAPSED_WITH_EXTRA:
        dependencies = gs.typedDependenciesCollapsed(true);
        break;
    case COLLAPSED:
        dependencies = gs.typedDependenciesCollapsed(false);
        break;
    case CC_PROPAGATED:
        dependencies = gs.typedDependenciesCCprocessed(true);
        break;
    case CC_PROPAGATED_NO_EXTRA:
        dependencies = gs.typedDependenciesCCprocessed(false);
        break;
    case TREE:
        dependencies = gs.typedDependenciesCollapsedTree();
        break;
    }

    for (TypedDependency currTypedDep : dependencies) {
        int govIndex = currTypedDep.gov().index();
        int depIndex = currTypedDep.dep().index();
        if (govIndex != 0) {
            // Stanford CoreNLP produces a dependency relation between a verb and ROOT-0 which
            // is not token at all!
            Token govToken = tokens.get(govIndex - 1);
            Token depToken = tokens.get(depIndex - 1);

            StanfordAnnotator.createDependencyAnnotation(aJCas, currTypedDep.reln(), govToken, depToken);
        }
    }
}

From source file:de.tudarmstadt.ukp.dkpro.core.stanfordnlp.StanfordParser.java

License:Open Source License

protected void doCreateDependencyTags(ParserGrammar aParser, StanfordAnnotator sfAnnotator, Tree parseTree,
        List<Token> tokens) {
    GrammaticalStructure gs;
    try {/*from  w w w .  java2s  .  com*/
        TreebankLanguagePack tlp = aParser.getTLPParams().treebankLanguagePack();
        gs = tlp.grammaticalStructureFactory(tlp.punctuationWordRejectFilter(), tlp.typedDependencyHeadFinder())
                .newGrammaticalStructure(parseTree);
    } catch (UnsupportedOperationException e) {
        // We already warned in the model provider if dependencies are not supported, so here
        // we just do nothing and skip the dependencies.
        return;
    }

    Collection<TypedDependency> dependencies = null;
    switch (mode) {
    case BASIC:
        dependencies = gs.typedDependencies(); // gs.typedDependencies(false);
        break;
    case NON_COLLAPSED:
        dependencies = gs.allTypedDependencies(); // gs.typedDependencies(true);
        break;
    case COLLAPSED_WITH_EXTRA:
        dependencies = gs.typedDependenciesCollapsed(true);
        break;
    case COLLAPSED:
        dependencies = gs.typedDependenciesCollapsed(false);
        break;
    case CC_PROPAGATED:
        dependencies = gs.typedDependenciesCCprocessed(true);
        break;
    case CC_PROPAGATED_NO_EXTRA:
        dependencies = gs.typedDependenciesCCprocessed(false);
        break;
    case TREE:
        dependencies = gs.typedDependenciesCollapsedTree();
        break;
    }

    for (TypedDependency currTypedDep : dependencies) {
        int govIndex = currTypedDep.gov().index();
        int depIndex = currTypedDep.dep().index();
        if (govIndex != 0) {
            // Stanford CoreNLP produces a dependency relation between a verb and ROOT-0 which
            // is not token at all!
            Token govToken = tokens.get(govIndex - 1);
            Token depToken = tokens.get(depIndex - 1);

            sfAnnotator.createDependencyAnnotation(currTypedDep.reln(), govToken, depToken);
        }
    }
}

From source file:DependencyParser.Parser.java

public void CallParser(String text) // start of the main method

{
    try {//from   www .  j a v a2 s  .  c  o m

        TreebankLanguagePack tlp = new PennTreebankLanguagePack();
        GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
        LexicalizedParser lp = LexicalizedParser
                .loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
        lp.setOptionFlags(new String[] { "-maxLength", "500", "-retainTmpSubcategories" });
        TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
        List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize();
        Tree tree = lp.apply(wordList);

        GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
        Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(true);
        System.out.println(tdl);

        PrintWriter pw = new PrintWriter("H:\\Thesis Development\\Thesis\\NLP\\src\\nlp\\Text-Parsed.txt");
        TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
        tp.printTree(tree, pw);

        pw.close();
        Main.writeImage(tree, tdl, "H:\\Thesis Development\\Thesis\\NLP\\src\\nlp\\image.png", 3);
        assert (new File("image.png").exists());
    } catch (FileNotFoundException f) {

    } catch (Exception ex) {
        Logger.getLogger(Parser.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:Engines.Test.StanfordParser.TreeHandling.java

License:Open Source License

public static void test(String text) {
    TreebankLanguagePack tlp = new PennTreebankLanguagePack();
    GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
    LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
    lp.setOptionFlags(new String[] { "-maxLength", "500", "-retainTmpSubcategories" });
    TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
    List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize();
    Tree tree = lp.apply(wordList);// www .ja v  a  2 s  .  co m
    GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
    Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(true);

}

From source file:gate.stanford.DependencyMode.java

License:Open Source License

protected static Collection<TypedDependency> getDependencies(GrammaticalStructure gs, DependencyMode mode,
        boolean includeExtras) {
    Collection<TypedDependency> result = null;

    if (mode.equals(Typed)) {
        result = gs.typedDependencies(includeExtras);
    } else if (mode.equals(AllTyped)) {
        result = gs.allTypedDependencies();
    } else if (mode.equals(TypedCollapsed)) {
        result = gs.typedDependenciesCollapsed(includeExtras);
    } else if (mode.equals(TypedCCprocessed)) {
        result = gs.typedDependenciesCCprocessed(includeExtras);
    }//from w w  w .  j  a  va 2s  .  c o m

    return result;
}

From source file:org.apdplat.qa.questiontypeanalysis.patternbased.MainPartExtracter.java

License:Open Source License

/**
 * ???/*from  w w w. ja  v a2 s .  co  m*/
 *
 * @param question 
 * @param words HasWord
 * @return 
 */
public QuestionStructure getMainPart(String question, List<edu.stanford.nlp.ling.Word> words) {
    QuestionStructure questionStructure = new QuestionStructure();
    questionStructure.setQuestion(question);

    Tree tree = LP.apply(words);
    LOG.info("?: ");
    tree.pennPrint();
    questionStructure.setTree(tree);

    GrammaticalStructure gs = GSF.newGrammaticalStructure(tree);
    if (gs == null) {
        return null;
    }
    //??
    Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true);
    questionStructure.setTdls(tdls);
    Map<String, String> map = new HashMap<>();
    String top = null;
    String root = null;
    LOG.info("???");
    //?
    List<String> dependencies = new ArrayList<>();
    for (TypedDependency tdl : tdls) {
        String item = tdl.toString();
        dependencies.add(item);
        LOG.info("\t" + item);
        if (item.startsWith("top")) {
            top = item;
        }
        if (item.startsWith("root")) {
            root = item;
        }
        int start = item.indexOf("(");
        int end = item.lastIndexOf(")");
        item = item.substring(start + 1, end);
        String[] attr = item.split(",");
        String k = attr[0].trim();
        String v = attr[1].trim();
        String value = map.get(k);
        if (value == null) {
            map.put(k, v);
        } else {
            //
            value += ":";
            value += v;
            map.put(k, value);
        }
    }
    questionStructure.setDependencies(dependencies);

    String mainPartForTop = null;
    String mainPartForRoot = null;
    if (top != null) {
        mainPartForTop = topPattern(top, map);
    }
    if (root != null) {
        mainPartForRoot = rootPattern(root, map);
    }
    questionStructure.setMainPartForTop(mainPartForTop);
    questionStructure.setMainPartForRoot(mainPartForRoot);

    if (questionStructure.getMainPart() == null) {
        LOG.error("" + question);
    } else {
        LOG.info("" + questionStructure.getMainPart());
    }
    return questionStructure;
}

From source file:org.linuxkernel.proof.digger.questiontypeanalysis.patternbased.MainPartExtracter.java

License:Open Source License

/**
 * ???/*  www.  java2 s .  co m*/
 *
 * @param question 
 * @param words HashWord
 * @return 
 */
public QuestionStructure getMainPart(String question, List<Word> words) {
    QuestionStructure questionStructure = new QuestionStructure();
    questionStructure.setQuestion(question);

    Tree tree = lp.apply(words);
    LOG.info("?: ");
    tree.pennPrint();
    questionStructure.setTree(tree);

    GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
    if (gs == null) {
        return null;
    }
    //??
    Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true);
    questionStructure.setTdls(tdls);
    Map<String, String> map = new HashMap<>();
    String top = null;
    String root = null;
    LOG.info("???");
    //?
    List<String> dependencies = new ArrayList<>();
    for (TypedDependency tdl : tdls) {
        String item = tdl.toString();
        dependencies.add(item);
        LOG.info("\t" + item);
        if (item.startsWith("top")) {
            top = item;
        }
        if (item.startsWith("root")) {
            root = item;
        }
        int start = item.indexOf("(");
        int end = item.lastIndexOf(")");
        item = item.substring(start + 1, end);
        String[] attr = item.split(",");
        String k = attr[0].trim();
        String v = attr[1].trim();
        String value = map.get(k);
        if (value == null) {
            map.put(k, v);
        } else {
            //
            value += ":";
            value += v;
            map.put(k, value);
        }
    }
    questionStructure.setDependencies(dependencies);

    String mainPartForTop = null;
    String mainPartForRoot = null;
    if (top != null) {
        mainPartForTop = topPattern(top, map);
    }
    if (root != null) {
        mainPartForRoot = rootPattern(root, map);
    }
    questionStructure.setMainPartForTop(mainPartForTop);
    questionStructure.setMainPartForRoot(mainPartForRoot);

    if (questionStructure.getMainPart() == null) {
        LOG.error("" + question);
    } else {
        LOG.info("" + questionStructure.getMainPart());
    }
    return questionStructure;
}