Example usage for edu.stanford.nlp.semgraph SemanticGraphEdge getTarget

List of usage examples for edu.stanford.nlp.semgraph SemanticGraphEdge getTarget

Introduction

In this page you can find the example usage for edu.stanford.nlp.semgraph SemanticGraphEdge getTarget.

Prototype

public IndexedWord getTarget() 

Source Link

Usage

From source file:context.core.tokenizer.SemanticAnnotation.java

License:Open Source License

/**
 *
 * @param text/*from w  w  w  .  j av a 2s. co m*/
 * @param docId
 * @return
 */
public static Map<String, CustomEdge> tokenize(String text, String docId) {
    Map<String, CustomEdge> customEdges = new LinkedHashMap<>();
    Annotation document = new Annotation(text);
    pipeline.annotate(document);

    List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
    int sentIndex = 0;
    for (CoreMap sentence : sentences) {
        // traversing the words in the current sentence
        // a CoreLabel is a CoreMap with additional token-specific methods
        int index = 0;

        SemanticGraph dependencies = sentence
                .get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);
        //            System.out.println(dependencies);
        for (SemanticGraphEdge edge : dependencies.edgeListSorted()) {

            CustomEdge cedge = new CustomEdge();
            cedge.setDocId(docId);
            cedge.setSentenceIndex(sentIndex);
            cedge.setIndex(index);
            cedge.setWord1(removePOS(edge.getSource() + ""));
            cedge.setWord2(removePOS(edge.getTarget() + ""));
            cedge.setType(edge.getRelation() + "");
            //                System.out.println(edge + " >d: " + edge.getDependent() + " >g: " + edge.getGovernor() + " > " + edge.getRelation() + "> " + edge.getSource() + " > " + edge.getTarget() + " >w: " + edge.getWeight());
            customEdges.put(cedge.getWord1() + "/" + cedge.getWord2() + "/" + cedge.getDocId() + "/"
                    + cedge.getSentenceIndex(), cedge);
            index++;
        }

        //            Collection<TypedDependency> deps = dependencies.typedDependencies();
        //            for (TypedDependency typedDep : deps) {
        //                GrammaticalRelation reln = typedDep.reln();
        //                String type = reln.toString();
        //                System.out.println("type=" + type + " >> " + typedDep);
        //            }
        //            Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
        //            
        sentIndex++;
    }
    return customEdges;
}

From source file:context.core.tokenizer.SPOExtractor.java

License:Open Source License

static List<SPOStructure> extractSPOs(CoreMap sentence, String docId, int sentIndex) {
    // traversing the words in the current sentence
    // a CoreLabel is a CoreMap with additional token-specific methods
    int index = 0;
    Map<String, CustomEdge> customEdges = new LinkedHashMap<>();
    SemanticGraph dependencies = sentence
            .get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);
    for (SemanticGraphEdge edge : dependencies.edgeListSorted()) {
        CustomEdge cedge = new CustomEdge();
        cedge.setDocId(docId);/*from   w  w  w . j a  v a  2 s  . co  m*/
        cedge.setSentenceIndex(sentIndex);
        cedge.setIndex(index);
        cedge.setWord1(edge.getSource().originalText());
        cedge.setWord2(edge.getTarget().originalText());
        cedge.setType(edge.getRelation() + "");
        customEdges.put(cedge.getWord1() + "/" + cedge.getWord2() + "/" + cedge.getDocId() + "/"
                + cedge.getSentenceIndex(), cedge);
    }
    Collection<String> verbs = extractVerbs(customEdges.values());
    List<SPOStructure> spos_list = new ArrayList<>();
    for (String v : verbs) {
        SPOStructure spo = new SPOStructure();
        for (CustomEdge cedge : customEdges.values()) {
            if (cedge.getType().equals("nsubj") && cedge.getWord1().equals(v)) {
                CustomToken subject = new CustomToken();
                String expandedSubject = expandNoun(cedge.getWord2(), customEdges.values());
                subject.setWord(expandedSubject);
                spo.addSubject(subject);
            } else if (cedge.getType().equals("dobj") && cedge.getWord1().equals(v)) {
                CustomToken object = new CustomToken();
                String expandedObject = expandNoun(cedge.getWord2(), customEdges.values());
                object.setWord(expandedObject);
                spo.addObject(object);
            }
        }
        if (spo.getObjects().isEmpty()) {
            for (CustomEdge cedge : customEdges.values()) {
                if (cedge.getType().contains("prep") && cedge.getWord1().equals(v)) {
                    CustomToken object = new CustomToken();
                    String expandedObject = expandNoun(cedge.getWord2(), customEdges.values());
                    object.setWord(expandedObject);
                    spo.addObject(object);
                    break;
                }
            }
        }
        if (spo.getObjects().size() > 0 && spo.getSubjects().size() > 0) {
            CustomToken predicate = new CustomToken();
            predicate.setWord(v);
            spo.setPredicate(predicate);
            spos_list.add(spo);
        }
    }
    return spos_list;
}

From source file:edu.cmu.geolocator.nlp.StanfordCoreTools.StanfordNLP.java

License:Apache License

public void createEdgeMap(SemanticGraph tree, Map<String, String> edgeMap,
        Map<String, ArrayList<String>> childMap) {
    Set<SemanticGraphEdge> tmp = tree.getEdgeSet();
    // = new HashMap<String,String>();
    for (SemanticGraphEdge edge : tmp) {
        //System.out.println(edge.toString()+"("+edge.getSource().toString()+","+edge.getTarget().word()+")");
        edgeMap.put(edge.getTarget().word(), edge.toString());
        if (!childMap.containsKey(edge.getSource().word()))
            childMap.put(edge.getSource().word(), new ArrayList<String>());
        childMap.get(edge.getSource().word()).add(edge.getTarget().tag());

    }/*from   w w w  .j  av  a2s.  c o m*/
    //System.out.println();

}

From source file:edu.jhu.hlt.concrete.stanford.PreNERCoreMapWrapper.java

License:Open Source License

private List<Dependency> makeDependencies(SemanticGraph graph) {
    List<Dependency> depList = new ArrayList<Dependency>();
    for (IndexedWord root : graph.getRoots()) {
        // this mimics CoreNLP's handling
        String rel = GrammaticalRelation.ROOT.getLongName().replaceAll("\\s+", "");
        int dep = root.index() - 1;
        Dependency depend = DependencyFactory.create(dep, rel);
        depList.add(depend);/*from  w  w  w  .ja  v  a 2 s  . c  om*/
    }
    for (SemanticGraphEdge edge : graph.edgeListSorted()) {
        String rel = edge.getRelation().toString().replaceAll("\\s+", "");
        int gov = edge.getSource().index() - 1;
        int dep = edge.getTarget().index() - 1;
        Dependency depend = DependencyFactory.create(dep, rel, gov);
        depList.add(depend);
    }
    return depList;
}

From source file:nlp.service.implementation.DefaultGrammarService.java

public DefaultGrammarService(SemanticGraph graph) {
    targetMap = new HashSetValuedHashMap<>();

    rootIndex = graph.getFirstRoot().index();

    for (SemanticGraphEdge edge : graph.edgeIterable()) {

        GrammaticalDependency dependency;

        try {//from  w  ww. j a  v  a2s .com
            String relation = edge.getRelation().toString();
            if (relation.contains(":")) {
                relation = relation.substring(relation.indexOf(':') + 1, relation.length());
            }

            if (relation.equals("case")) {
                dependency = GrammaticalDependency.casemarker;
            } else {
                dependency = GrammaticalDependency.valueOf(relation);
            }
        } catch (IllegalArgumentException e) {
            dependency = GrammaticalDependency.unknown;
        }

        GrammaticalRelation<Integer> relation = new GrammaticalRelation<>(dependency, edge.getTarget().index(),
                edge.getSource().index());
        targetMap.put(relation.getTarget(), relation);
    }
}

From source file:shef.mt.tools.ParsingProcessor.java

@Override
public void processNextSentence(Sentence s) {
    //Create resource objects:
    ArrayList<String> POSData = new ArrayList<>();
    HashMap<Integer, Integer> depData = new HashMap<>();

    //Get sentences' tokens:
    String[] sentenceTokens = s.getTokens();

    //Create content object:
    Annotation document = new Annotation(s.getText());

    //Annotate content object:
    pipeline.annotate(document);//w w w . j  a  va  2s. c o  m

    //Initialize index shift:
    int shift = 0;

    //Get sentence fragments:
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);

    //Initialize token buffer and index:
    String buffer = "";
    int index = 0;
    for (CoreMap sentence : sentences) {
        //Get tokens from sentence fragment:
        List<CoreLabel> tokens = sentence.get(TokensAnnotation.class);

        //Add tokens to resulting POS tag list:
        for (CoreLabel token : tokens) {
            String[] fragments = new String[] { token.originalText() };
            if (token.originalText().contains(" ")) {
                fragments = token.originalText().split(" ");
            }
            String pos = token.get(PartOfSpeechAnnotation.class);
            for (String fragment : fragments) {
                buffer += fragment;
                if (buffer.trim().equals(sentenceTokens[index])) {
                    POSData.add(pos);
                    index += 1;
                    buffer = "";
                }
            }
        }

        //Check for dependency parsing requirement:
        if (this.requiresDepCounts) {
            //Get dependency relations:
            SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
            List<SemanticGraphEdge> deps = dependencies.edgeListSorted();

            //For each edge, add 1 to occurrences of source and target indexes:
            for (SemanticGraphEdge sge : deps) {
                int sourceIndex = shift + sge.getSource().index() - 1;
                int targetIndex = shift + sge.getTarget().index() - 1;
                if (depData.get(sourceIndex) == null) {
                    depData.put(sourceIndex, 1);
                } else {
                    depData.put(sourceIndex, depData.get(sourceIndex) + 1);
                }
                if (depData.get(targetIndex) == null) {
                    depData.put(targetIndex, 1);
                } else {
                    depData.put(targetIndex, depData.get(targetIndex) + 1);
                }

                //Increase shift:
                shift += tokens.size();
            }
        }
    }

    //Add resources to sentence:
    s.setValue("postags", POSData);
    s.setValue("depcounts", depData);
}