Example usage for edu.stanford.nlp.trees GrammaticalRelation valueOf

List of usage examples for edu.stanford.nlp.trees GrammaticalRelation valueOf

Introduction

In this page you can find the example usage for edu.stanford.nlp.trees GrammaticalRelation valueOf.

Prototype

public static GrammaticalRelation valueOf(String s) 

Source Link

Usage

From source file:ca.ualberta.exemplar.core.RelationExtraction.java

License:Open Source License

private static void addModifiers(List<IndexedWord> words, IndexedWord word, SemanticGraph dependencies) {
    List<IndexedWord> adjs = dependencies.getChildrenWithReln(word, GrammaticalRelation.valueOf("amod"));
    List<IndexedWord> nns = dependencies.getChildrenWithReln(word, GrammaticalRelation.valueOf("nn"));
    List<IndexedWord> negs = dependencies.getChildrenWithReln(word, GrammaticalRelation.valueOf("neg"));
    List<IndexedWord> pvts = dependencies.getChildrenWithReln(word, GrammaticalRelation.valueOf("pvt")); // phrasal verb particle -- shut down

    List<IndexedWord> newWords = new ArrayList<IndexedWord>();
    if (adjs != null)
        newWords.addAll(adjs);/*from   www.j  a va2 s  . c o  m*/
    if (nns != null)
        newWords.addAll(nns);
    if (negs != null)
        newWords.addAll(negs);
    if (pvts != null)
        newWords.addAll(pvts);

    for (IndexedWord newWord : newWords) {

        if (Math.abs(word.index() - newWord.index()) > 5) {
            // If a modifier is too far way from trigger (> 5 tokens), ignore this modifier since it is probably a mistake
            continue;
        }

        if (!newWord.ner().equals("PERSON") && !newWord.ner().equals("ORGANIZATION")
                && !newWord.ner().equals("LOCATION") && !newWord.ner().equals("MISC")) {
            words.add(newWord);
        }
    }

}

From source file:count_dep.Count_dep.java

public LinkedList<Event> GetEvents(SemanticGraph dependencies, CoreMap sentence) {
    LinkedList<Event> res = new LinkedList<>();
    LinkedList<IndexedWord> roots = new LinkedList<>();
    List<CoreLabel> words = sentence.get(TokensAnnotation.class);
    List<GrammaticalRelation> senserel = new LinkedList<>();
    senserel.add(GrammaticalRelation.valueOf("nsubj"));
    senserel.add(GrammaticalRelation.valueOf("dobj"));
    for (CoreLabel word : words) {
        if (word.tag().length() >= 2
                && ("VB".equals(word.tag().substring(0, 2)) || "NN".equals(word.tag().substring(0, 2)))) {
            IndexedWord iword = new IndexedWord(word);
            roots.add(iword);/*  w w w.j  a v  a2s  .  c  om*/
        }
    }
    for (IndexedWord word : roots) {
        Event e = new Event();
        e.trigger = word.word();
        try {
            Set<IndexedWord> children = dependencies.getChildren(word);
            children.stream().forEach((iw) -> {
                e.arguments.add(new EventArgument(iw.word(), ""));
            });
            if (dependencies.inDegree(word) > 0) {
                IndexedWord parent = dependencies.getParent(word);
                if (parent.tag().length() >= 2 && "VB".equals(parent.tag().substring(0, 2))) {
                    Set<IndexedWord> children1 = dependencies.getChildrenWithRelns(parent, senserel);
                    children1.remove(word);
                    children1.stream().forEach((iw) -> {
                        e.arguments.add(new EventArgument(iw.word(), ""));
                    });
                } else {
                    e.arguments.add(new EventArgument(dependencies.getParent(word).word(), ""));
                }
            }
        } catch (java.lang.IllegalArgumentException error) {
            continue;
        }
        res.add(e);
    }
    return res;
}

From source file:de.tudarmstadt.ukp.dkpro.core.corenlp.internal.DKPro2CoreNlp.java

License:Open Source License

public Annotation convert(JCas aSource, Annotation aTarget) {
    // Document annotation
    aTarget.set(CoreAnnotations.TextAnnotation.class, aSource.getDocumentText());

    // Sentences/*from  w  w  w . j  av a 2s .  c  om*/
    List<CoreMap> sentences = new ArrayList<>();
    for (Sentence s : select(aSource, Sentence.class)) {
        if (StringUtils.isBlank(s.getCoveredText())) {
            continue;
        }

        String sentenceText = s.getCoveredText();
        if (encoding != null && !"UTF-8".equals(encoding.name())) {
            sentenceText = new String(sentenceText.getBytes(StandardCharsets.UTF_8), encoding);
        }

        Annotation sentence = new Annotation(sentenceText);
        sentence.set(CharacterOffsetBeginAnnotation.class, s.getBegin());
        sentence.set(CharacterOffsetEndAnnotation.class, s.getEnd());
        sentence.set(SentenceIndexAnnotation.class, sentences.size());

        // Tokens
        Map<Token, IndexedWord> idxTokens = new HashMap<>();
        List<CoreLabel> tokens = new ArrayList<>();
        for (Token t : selectCovered(Token.class, s)) {
            String tokenText = t.getCoveredText();
            if (encoding != null && !"UTF-8".equals(encoding.name())) {
                tokenText = new String(tokenText.getBytes(StandardCharsets.UTF_8), encoding);
            }

            CoreLabel token = tokenFactory.makeToken(tokenText, t.getBegin(), t.getEnd() - t.getBegin());
            // First add token so that tokens.size() returns a 1-based counting as required
            // by IndexAnnotation
            tokens.add(token);
            token.set(SentenceIndexAnnotation.class, sentences.size());
            token.set(IndexAnnotation.class, tokens.size());
            token.set(TokenKey.class, t);
            idxTokens.put(t, new IndexedWord(token));

            // POS tags
            if (readPos && t.getPos() != null) {
                token.set(PartOfSpeechAnnotation.class, t.getPos().getPosValue());
            }

            // Lemma
            if (t.getLemma() != null) {
                token.set(LemmaAnnotation.class, t.getLemma().getValue());
            }

            // Stem
            if (t.getStem() != null) {
                token.set(StemAnnotation.class, t.getStem().getValue());
            }

            // NamedEntity
            // TODO: only token-based NEs are supported, but not multi-token NEs
            // Supporting multi-token NEs via selectCovering would be very slow. To support
            // them, another approach would need to be implemented, e.g. via indexCovering.
            List<NamedEntity> nes = selectCovered(NamedEntity.class, t);
            if (nes.size() > 0) {
                token.set(NamedEntityTagAnnotation.class, nes.get(0).getValue());
            } else {
                token.set(NamedEntityTagAnnotation.class, "O");
            }
        }

        // Constituents
        for (ROOT r : selectCovered(ROOT.class, s)) {
            Tree tree = createStanfordTree(r, idxTokens);
            tree.indexSpans();
            sentence.set(TreeAnnotation.class, tree);
        }

        // Dependencies
        List<TypedDependency> dependencies = new ArrayList<>();
        for (Dependency d : selectCovered(Dependency.class, s)) {
            TypedDependency dep = new TypedDependency(GrammaticalRelation.valueOf(d.getDependencyType()),
                    idxTokens.get(d.getGovernor()), idxTokens.get(d.getDependent()));
            if (DependencyFlavor.ENHANCED.equals(d.getFlavor())) {
                dep.setExtra();
            }
            dependencies.add(dep);
        }
        sentence.set(EnhancedDependenciesAnnotation.class, new SemanticGraph(dependencies));

        if (ptb3Escaping) {
            tokens = applyPtbEscaping(tokens, quoteBegin, quoteEnd);
        }

        sentence.set(TokensAnnotation.class, tokens);
        sentences.add(sentence);
    }
    aTarget.set(SentencesAnnotation.class, sentences);

    return aTarget;
}

From source file:edu.jhu.agiga.StanfordAgigaSentence.java

License:Open Source License

public List<TypedDependency> getStanfordTypedDependencies(DependencyForm form) {
    List<TypedDependency> dependencies = new ArrayList<TypedDependency>();
    if (this.nodes == null)
        nodes = getStanfordTreeGraphNodes(form);

    List<AgigaTypedDependency> agigaDeps = getAgigaDeps(form);
    for (AgigaTypedDependency agigaDep : agigaDeps) {
        // Add one, since the tokens are zero-indexed but the TreeGraphNodes are one-indexed
        TreeGraphNode gov = nodes.get(agigaDep.getGovIdx() + 1);
        TreeGraphNode dep = nodes.get(agigaDep.getDepIdx() + 1);
        // Create the typed dependency
        TypedDependency typedDep = new TypedDependency(GrammaticalRelation.valueOf(agigaDep.getType()), gov,
                dep);/*from   w  ww  .j  ava 2 s  .co m*/
        dependencies.add(typedDep);
    }
    return dependencies;
}

From source file:sleventextraction.SLEventExtraction.java

public static String GetNPstring(IndexedWord curr, SemanticGraph dependencies, Set<IndexedWord> done_tokens) {
    String res = "";
    LinkedList<IndexedWord> children = new LinkedList<>();
    children.addAll(dependencies.getChildrenWithReln(curr, GrammaticalRelation.valueOf("amod")));
    children.addAll(dependencies.getChildrenWithReln(curr, GrammaticalRelation.valueOf("det")));
    children.addAll(dependencies.getChildrenWithReln(curr, GrammaticalRelation.valueOf("nn")));
    children.addAll(dependencies.getChildrenWithReln(curr, GrammaticalRelation.valueOf("num")));
    children.addAll(dependencies.getChildrenWithReln(curr, GrammaticalRelation.valueOf("poss")));
    children.add(curr);//  w ww .  j a va  2  s.  co  m
    Collections.sort(children, new Comparator<IndexedWord>() {

        @Override
        public int compare(IndexedWord o1, IndexedWord o2) {
            return o1.index() - o2.index();
        }
    });
    int start = children.indexOf(curr);
    int end = start;
    while (start > 0 && children.get(start - 1).index() == children.get(start).index() - 1) {
        start--;
    }
    while (end < children.size() - 1 && children.get(end + 1).index() == children.get(end).index() + 1) {
        end++;
    }
    for (int i = start; i <= end; i++) {
        done_tokens.add(children.get(i));
        res += children.get(i).word();
        if (i != end) {
            res += ' ';
        }
    }

    return res;
}