Example usage for edu.stanford.nlp.pipeline StanfordCoreNLP process

List of usage examples for edu.stanford.nlp.pipeline StanfordCoreNLP process

Introduction

In this page you can find the example usage for edu.stanford.nlp.pipeline StanfordCoreNLP process.

Prototype

public Annotation process(String text) 

Source Link

Document

Runs the entire pipeline on the content of the given text passed in.

Usage

From source file:analyzer.SentimentAnalyzerBean.java

@Override
public Sentiment findSentiment(final String text) {
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    int mainSentiment = 0;
    if (text != null && text.length() > 0) {
        int longest = 0;
        Annotation annotation = pipeline.process(text);
        for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }// w w  w . j a v  a 2s .co  m

        }
    }

    return Sentiment.getFromValue(mainSentiment);
}

From source file:bi.meteorite.sentiment.NLPStep.java

License:Apache License

private String processString(String document) {
    // shut off the annoying intialization messages
    Properties props = new Properties();
    //specify the annotators that we want to use to annotate the text.  We need a tokenized sentence with POS tags to extract sentiment.
    //this forms our pipeline
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    Annotation annotation = pipeline.process(document);
    List<Sentence> sentences = new ArrayList<Sentence>();
    /*//from w w w .j  a v  a  2s  . co m
     * We're going to iterate over all of the sentences and extract the sentiment.  We'll adopt a majority rule policy
     */
    for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
        //for each sentence, we get the sentiment that CoreNLP thinks this sentence indicates.
        Tree sentimentTree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
        int sentimentClassIdx = RNNCoreAnnotations.getPredictedClass(sentimentTree);
        SentimentClass sentimentClass = SentimentClass.getSpecific(sentimentClassIdx);

        /*
         * Each possible sentiment has an associated probability, so let's pull the entire
         * set of probabilities across all sentiment classes.
         */
        double[] probs = new double[SentimentClass.values().length];
        {
            SimpleMatrix mat = RNNCoreAnnotations.getPredictions(sentimentTree);
            for (int i = 0; i < SentimentClass.values().length; ++i) {
                probs[i] = mat.get(i);
            }
        }
        /*
         * Add the sentence and the associated probabilities to our list.
         */
        String sentenceStr = AnnotationUtils.sentenceToString(sentence).replace("\n", "");
        sentences.add(new Sentence(probs, sentenceStr, sentimentClass));
    }
    SentimentClass sentimentClass = null;
    if (meta.getAnalysisType().equals("Wilson Score")) {
        sentimentClass = SentimentRollup.WILSON_SCORE.apply(sentences);
    } else if (meta.getAnalysisType().equals("Simple Vote Rollup")) {
        sentimentClass = SentimentRollup.SIMPLE_VOTE.apply(sentences);
    } else if (meta.getAnalysisType().equals("Longest Sentence Wins")) {
        sentimentClass = SentimentRollup.LONGEST_SENTENCE_WINS.apply(sentences);
    } else if (meta.getAnalysisType().equals("Last Sentence Wins")) {
        sentimentClass = SentimentRollup.LAST_SENTENCE_WINS.apply(sentences);
    } else if (meta.getAnalysisType().equals("Average Probabilities Rollup")) {
        sentimentClass = SentimentRollup.AVERAGE_PROBABILITIES.apply(sentences);
    }

    if (sentimentClass != null) {
        return sentimentClass.toString();
    } else
        return null;
}

From source file:ch.zhaw.parallelComputing.model.sentiment.TweetMapper.java

License:Open Source License

private Integer findSentiment(String line) {

    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    int mainSentiment = 0;
    if (line != null && line.length() > 0) {
        int longest = 0;
        Annotation annotation = pipeline.process(line);
        for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }//from  ww  w.j a v a2s . c  o  m
        }
    }
    return mainSentiment;
}

From source file:com.raythos.sentilexo.trident.twitter.sentiment.CalculateNLPSentiment.java

License:Apache License

private String calcSentimentForTweetMessage(String statusText) {

    int mainSentiment = 0;
    StanfordCoreNLP pipeline = CoreNLPSentimentClassifier.getInstance().getPipeline();

    if (statusText != null && statusText.length() > 0) {
        int longest = 0;
        Annotation annotation = pipeline.process(statusText);
        for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }/*from  w  w  w  .  j  a  va2 s.  co m*/

        }
    }
    return textSentiment(mainSentiment);
}

From source file:gr.aueb.cs.nlp.bioasq.classifiers.Baseline.java

public static ArrayList<String> lemmatize(String documentText) {
    ArrayList<String> lemmas = new ArrayList<String>();
    Properties props = new Properties();
    props.put("annotators", "tokenize, ssplit, pos, lemma");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props, false);
    String text = documentText;/*from www  .  j  av  a 2s . c om*/
    Annotation document = pipeline.process(text);
    for (CoreMap sentence : document.get(SentencesAnnotation.class)) {
        for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
            String word = token.get(TextAnnotation.class);
            String lemma = token.get(LemmaAnnotation.class);
            lemmas.add(lemma);
        }
    }
    return lemmas;
}

From source file:gr.aueb.cs.nlp.bioasq.classifiers.Features.java

public static ArrayList<String> lemmatize(String documentText) {
    ArrayList<String> lemmas = new ArrayList<String>();
    Properties props = new Properties();
    props.put("annotators", "tokenize, ssplit, pos, lemma");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props, false);
    String text = documentText;/* w  w w .ja v  a2s.c  om*/
    Annotation document = pipeline.process(text);
    for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
        for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
            String word = token.get(CoreAnnotations.TextAnnotation.class);
            String lemma = token.get(CoreAnnotations.LemmaAnnotation.class);
            lemmas.add(lemma);
        }
    }
    return lemmas;
}

From source file:graphene.augment.snlp.services.SentimentAnalyzerImpl.java

License:Apache License

@Override
public StringWithSentiment findSentiment(String line) {

    Properties props = new Properties();
    props.setProperty("annotators", "tokenize,ssplit,parse,sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    int mainSentiment = 0;
    if (ValidationUtils.isValid(line)) {

        int longest = 0;
        Annotation an = pipeline.process(line);
        for (CoreMap sentence : an.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }/*  w  ww .j  a  va  2s.  co m*/

        }
    }
    if (mainSentiment == 2 || mainSentiment > 4 || mainSentiment < 0) {
        return null;
    }
    StringWithSentiment sm = new StringWithSentiment(line, toCss(mainSentiment));
    return sm;
}

From source file:ml.arunreddy.research.sentiment.stanford.impl.StanfordSentimentAnalyzer.java

License:Open Source License

public double getSentimentScore(String text) {
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    int mainSentiment = 0;
    if (text != null && !text.isEmpty()) {
        int longest = 0;
        Annotation annotation = pipeline.process(text);
        for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }//  w  w w. j  a  v  a2s  .c om

        }
    }

    assert mainSentiment >= 0;
    assert mainSentiment <= 4;

    double value = (double) mainSentiment / 4.0;

    return value;
}

From source file:ml.arunreddy.tl.classifier.sentiment.MultiDomainStanfordSentimentClassifier.java

License:Open Source License

public double getSentimentScore(String text) {
    Properties props;/*from ww  w.j  av  a  2  s.c  o  m*/
    props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    int mainSentiment = 0;
    if (text != null && !text.isEmpty()) {
        int longest = 0;
        Annotation annotation = pipeline.process(text);
        for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
            Tree tree = sentence.get(SentimentCoreAnnotations.AnnotatedTree.class);
            int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
            String partText = sentence.toString();
            if (partText.length() > longest) {
                mainSentiment = sentiment;
                longest = partText.length();
            }

        }
    }

    assert mainSentiment >= 0;
    assert mainSentiment <= 4;

    double value = (double) mainSentiment / 4.0;

    return value;
}

From source file:nlp.classify1.java

public static void main(String args[]) {
    String text = "hi dude";
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize,ssplit,pos,parse,sentiment");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    edu.stanford.nlp.pipeline.Annotation annotation = pipeline.process(text);
    List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
    for (CoreMap sentence : sentences) {
        String sentiment = sentence.get(SentimentCoreAnnotations.ClassName.class);
        System.out.println(sentiment + "\t" + sentence);
        //String result = sentiment;

    }/*from   w ww .ja v  a 2  s.  c o m*/
}