Example usage for edu.stanford.nlp.parser.nndep DependencyParser loadFromModelFile

List of usage examples for edu.stanford.nlp.parser.nndep DependencyParser loadFromModelFile

Introduction

In this page you can find the example usage for edu.stanford.nlp.parser.nndep DependencyParser loadFromModelFile.

Prototype

public static DependencyParser loadFromModelFile(String modelFile) 

Source Link

Document

Convenience method; see #loadFromModelFile(String,java.util.Properties) .

Usage

From source file:Dependency.java

public static void main(String[] args) {
    String modelPath = DependencyParser.DEFAULT_MODEL;
    String taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger";
    Scanner sc = new Scanner(System.in);

    String text = "";
    text = sc.nextLine();/*from w ww.  j  a  v a  2  s .  co  m*/
    // while(text!="exit"){

    MaxentTagger tagger = new MaxentTagger(taggerPath);
    DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);

    DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text));
    for (List<HasWord> sentence : tokenizer) {

        List<TaggedWord> tagged = tagger.tagSentence(sentence);
        Object[] x = tagged.toArray();
        GrammaticalStructure gs = parser.predict(tagged);
        //System.out.println();

        Collection<TypedDependency> s = gs.typedDependenciesCollapsedTree();
        Object[] z = s.toArray();

        System.out.println(tagged.toString());
        String token[] = new String[z.length];
        String pos[] = new String[z.length];
        int k = 0;
        for (Object i : x) {
            String str = i.toString();
            /*String temp0="(.*?)(?=\\/)";
            String temp1="\\/(.*)";
                    
            System.out.println(str);
            Pattern t0 = Pattern.compile("(.*?)(?=\\/)");
            Pattern t1 = Pattern.compile("\\/(.*)");
            Matcher m0 = t0.matcher(str);
            Matcher m1 = t1.matcher(str);*/
            int index = str.lastIndexOf('/');
            token[k] = str.substring(0, index);
            pos[k] = str.substring(index + 1);
            //System.out.println(pos[k]);
            k++;
        }
        String rels[] = new String[z.length];
        String word1[] = new String[z.length];
        String word2[] = new String[z.length];
        int j = 0;
        for (Object i : z) {
            System.out.println(i);
            String temp = i.toString();
            String pattern0 = "(.*)(?=\\()";
            String pattern1 = "(?<=\\()(.*?)(?=-)";
            String pattern2 = "(?<=, )(.*)(?=-)";
            Pattern r0 = Pattern.compile(pattern0);
            Pattern r1 = Pattern.compile(pattern1);
            Pattern r2 = Pattern.compile(pattern2);
            Matcher m0 = r0.matcher(temp);
            Matcher m1 = r1.matcher(temp);
            Matcher m2 = r2.matcher(temp);
            if (m0.find()) {
                rels[j] = m0.group(0);
                //System.out.println(rels[j]);
            }
            if (m1.find()) {
                word1[j] = m1.group(0);
            }
            if (m2.find()) {
                word2[j] = m2.group(0);
            }
            j++;
        }
        //System.out.println(s);
        //Rules for feature extraction.
        //rule1:::::::::::::::::
        //System.out.println("1");
        int[] q = toIntArray(grRecog(rels, "nsubj"));
        //System.out.println("2");
        if (q.length != 0) {
            //System.out.println("3");
            if (posrecog(token, pos, word2[q[0]]).equals("NN")) {
                //System.out.println("4");
                int[] w = toIntArray(grRecog(rels, "compound"));
                //System.out.println("5");
                if (w.length != 0) {
                    System.out.println("6");
                    System.out.println(word1[q[0]] + "," + word2[q[0]] + "," + word2[w[0]]);
                } else {
                    int conj_and_index = compgrRecog(rels, word1, word2, "conj:and", word2[q[0]]);
                    if (conj_and_index != -1) {
                        System.out.println(
                                word1[conj_and_index] + "," + word2[conj_and_index] + "," + word2[q[0]]);
                    } else
                        System.out.println(word1[q[0]] + "," + word2[q[0]]);
                }
            }
            //RULE 2:::::::::::::
            else if (posrecog(token, pos, word1[q[0]]).equals("JJ")) {
                //System.out.println("aaaaa_JJ");
                int a = compgrRecog(rels, word1, word2, "xcomp", word1[q[0]]);
                if (a != -1) {
                    int b = compgrRecog(rels, word1, word2, "dobj", word2[a]);
                    if (b != -1) {
                        int c = compgrRecog(rels, word1, word2, "compound", word2[b]);
                        if (c != -1) {
                            System.out.println(word1[q[0]] + "," + word1[c] + "," + word2[c]);
                        }
                    }
                }
                //RULE 3::::::::::
                else {
                    int b[] = toIntArray(grRecog(rels, "ccomp"));
                    if (b.length != 0) {
                        System.out.println(word1[q[1]] + "," + word2[q[1]] + "," + word1[b[0]]);
                    }

                }
            }
            //RULE 4::::::::::
            else if (posrecog(token, pos, word1[q[0]]).equals("VBZ")) {
                //System.out.println("aaaaa");
                int vbp_dobj_index = compgrRecog(rels, word1, word2, "dobj", word2[q[0]]);
                if (vbp_dobj_index != -1) {
                    System.out.println(word1[vbp_dobj_index] + "," + word2[vbp_dobj_index]);
                } else {
                    int vbp_xcomp_index = compgrRecog(rels, word1, word2, "xcomp", word1[q[0]]);
                    if (vbp_xcomp_index != -1) {

                        System.out.println(word1[vbp_xcomp_index] + "," + word2[vbp_xcomp_index]);
                    } else {
                        int vbp_acomp_index = compgrRecog(rels, word1, word2, "acomp", word1[q[0]]);
                        if (vbp_acomp_index != -1) {

                            System.out.println(
                                    word1[q[0]] + "," + word1[vbp_acomp_index] + "," + word2[vbp_acomp_index]);
                        } else
                            System.out.println(word1[q[0]]);

                    }

                }

            }
            int[] f = toIntArray(grRecog(rels, "amod"));
            if (f.length != 0) {
                for (int i : f) {
                    System.out.println(word1[i] + "," + word2[i]);
                }
                int cj[] = toIntArray(grRecog(rels, "conj:and"));
                if (cj.length != 0) {
                    for (int i : cj) {
                        System.out.println(word1[i] + "," + word2[i]);
                    }
                }
            }
            int[] neg = toIntArray(grRecog(rels, "neg"));
            if (neg.length != 0) {
                for (int i : neg) {
                    System.out.println(word1[i] + "," + word2[i]);
                }

            }

        } else {
            int[] f = toIntArray(grRecog(rels, "amod"));
            if (f.length != 0) {
                for (int i : f) {
                    System.out.print(word1[i] + "," + word2[i]);
                    String qwe = word1[i] + "," + word2[i];
                }
                int cj[] = toIntArray(grRecog(rels, "conj:and"));
                if (cj.length != 0) {
                    for (int i : cj) {
                        System.out.println(word2[i]);

                    }
                }
            }
            int[] neg = toIntArray(grRecog(rels, "neg"));
            if (neg.length != 0) {
                for (int i : neg) {
                    System.out.println(word1[i] + "," + word2[i]);
                }

            }

        }

        //RULE 2:::::::::::::

    }

    //  text=sc.nextLine();
    //}
}

From source file:DependencyParse.java

License:Apache License

public static void main(String[] args) throws Exception {
    Properties props = StringUtils.argsToProperties(args);
    if (!props.containsKey("tokpath") || !props.containsKey("parentpath") || !props.containsKey("relpath")) {
        System.err.println(/*  w  ww . j a v a2 s.  c o m*/
                "usage: java DependencyParse -tokenize - -tokpath <tokpath> -parentpath <parentpath> -relpath <relpath>");
        System.exit(1);
    }

    boolean tokenize = false;
    if (props.containsKey("tokenize")) {
        tokenize = true;
    }

    String tokPath = props.getProperty("tokpath");
    String parentPath = props.getProperty("parentpath");
    String relPath = props.getProperty("relpath");

    BufferedWriter tokWriter = new BufferedWriter(new FileWriter(tokPath));
    BufferedWriter parentWriter = new BufferedWriter(new FileWriter(parentPath));
    BufferedWriter relWriter = new BufferedWriter(new FileWriter(relPath));

    MaxentTagger tagger = new MaxentTagger(TAGGER_MODEL);
    DependencyParser parser = DependencyParser.loadFromModelFile(PARSER_MODEL);
    Scanner stdin = new Scanner(System.in);
    int count = 0;
    long start = System.currentTimeMillis();
    while (stdin.hasNextLine()) {
        String line = stdin.nextLine();
        List<HasWord> tokens = new ArrayList<>();
        if (tokenize) {
            PTBTokenizer<Word> tokenizer = new PTBTokenizer(new StringReader(line), new WordTokenFactory(), "");
            for (Word label; tokenizer.hasNext();) {
                tokens.add(tokenizer.next());
            }
        } else {
            for (String word : line.split(" ")) {
                tokens.add(new Word(word));
            }
        }

        List<TaggedWord> tagged = tagger.tagSentence(tokens);

        int len = tagged.size();
        Collection<TypedDependency> tdl = parser.predict(tagged).typedDependencies();
        int[] parents = new int[len];
        for (int i = 0; i < len; i++) {
            // if a node has a parent of -1 at the end of parsing, then the node
            // has no parent.
            parents[i] = -1;
        }

        String[] relns = new String[len];
        for (TypedDependency td : tdl) {
            // let root have index 0
            int child = td.dep().index();
            int parent = td.gov().index();
            relns[child - 1] = td.reln().toString();
            parents[child - 1] = parent;
        }

        // print tokens
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < len - 1; i++) {
            if (tokenize) {
                sb.append(PTBTokenizer.ptbToken2Text(tokens.get(i).word()));
            } else {
                sb.append(tokens.get(i).word());
            }
            sb.append(' ');
        }
        if (tokenize) {
            sb.append(PTBTokenizer.ptbToken2Text(tokens.get(len - 1).word()));
        } else {
            sb.append(tokens.get(len - 1).word());
        }
        sb.append('\n');
        tokWriter.write(sb.toString());

        // print parent pointers
        sb = new StringBuilder();
        for (int i = 0; i < len - 1; i++) {
            sb.append(parents[i]);
            sb.append(' ');
        }
        sb.append(parents[len - 1]);
        sb.append('\n');
        parentWriter.write(sb.toString());

        // print relations
        sb = new StringBuilder();
        for (int i = 0; i < len - 1; i++) {
            sb.append(relns[i]);
            sb.append(' ');
        }
        sb.append(relns[len - 1]);
        sb.append('\n');
        relWriter.write(sb.toString());

        count++;
        if (count % 1000 == 0) {
            double elapsed = (System.currentTimeMillis() - start) / 1000.0;
            System.err.printf("Parsed %d lines (%.2fs)\n", count, elapsed);
        }
    }

    long totalTimeMillis = System.currentTimeMillis() - start;
    System.err.printf("Done: %d lines in %.2fs (%.1fms per line)\n", count, totalTimeMillis / 1000.0,
            totalTimeMillis / (double) count);
    tokWriter.close();
    parentWriter.close();
    relWriter.close();
}

From source file:DependencyParserDemo.java

public static void main(String[] args) {
    String modelPath = DependencyParser.DEFAULT_MODEL;
    String taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger";

    for (int argIndex = 0; argIndex < args.length;) {
        switch (args[argIndex]) {
        case "-tagger":
            taggerPath = args[argIndex + 1];
            argIndex += 2;// www. j av  a  2  s  .  c  o  m
            break;
        case "-model":
            modelPath = args[argIndex + 1];
            argIndex += 2;
            break;
        default:
            throw new RuntimeException("Unknown argument " + args[argIndex]);
        }
    }

    String text = "I can almost always tell when movies use fake dinosaurs.";

    MaxentTagger tagger = new MaxentTagger(taggerPath);
    DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);

    DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text));
    for (List<HasWord> sentence : tokenizer) {
        List<TaggedWord> tagged = tagger.tagSentence(sentence);
        GrammaticalStructure gs = parser.predict(tagged);

        // Print typed dependencies
        System.err.println(gs);
    }
}

From source file:Dependency2.java

public static void main(String[] args) {
    String modelPath = DependencyParser.DEFAULT_MODEL;
    String taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger";
    Scanner sc = new Scanner(System.in);

    readCsv();//from   www  . j a  v  a 2s .  co  m
    String text = "";
    text = sc.nextLine();
    if (multifeatures(text)) {
        System.out.println("Multiple features present");
        MaxentTagger tagger = new MaxentTagger(taggerPath);
        DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);

        DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text));
        for (List<HasWord> sentence : tokenizer) {
            List<TaggedWord> tagged = tagger.tagSentence(sentence);
            GrammaticalStructure gs = parser.predict(tagged);

            Collection<TypedDependency> s = gs.typedDependenciesCollapsedTree();
            Map<Character, Pair<Character, Character>> map = new HashMap<Character, Pair<Character, Character>>();
            Object[] z = s.toArray();
            String rels[] = new String[z.length];
            String word1[] = new String[z.length];
            String word2[] = new String[z.length];
            int j = 0;
            String f, f1, f2;
            for (Object i : z) {
                //System.out.println(i);
                String temp = i.toString();
                System.out.println(temp);
                String pattern0 = "(.*)(?=\\()";
                String pattern1 = "(?<=\\()(.*?)(?=-)";
                String pattern2 = "(?<=,)(.*)(?=-)";
                Pattern r0 = Pattern.compile(pattern0);
                Pattern r1 = Pattern.compile(pattern1);
                Pattern r2 = Pattern.compile(pattern2);
                Matcher m0 = r0.matcher(temp);
                Matcher m1 = r1.matcher(temp);
                Matcher m2 = r2.matcher(temp);
                if (m0.find())
                    rels[j] = m0.group(0);
                if (m1.find())
                    word1[j] = m1.group(0);
                if (m2.find())
                    word2[j] = m2.group(0);
                if (rels[j].equals("amod")) {
                    f1 = getFeature(word1[j]);
                    f2 = getFeature(word2[j]);
                    f = f1 != null ? (f1) : (f2 != null ? f2 : null);
                    if (f != null) {
                        System.out.println("Feature: " + f);

                    }

                }

                j++;
            }
            //System.out.println(Arrays.toString(rels));
        }
    } else {
        //sentence score is feature score
    }

}

From source file:RestServices.GetTargetedSentimentResource.java

/**
 * Retrieves representation of an instance of RestServices.GetTargetedSentimentResource
 * @return an instance of java.lang.String
 *//*  w  w  w. j  av a  2 s  .c o  m*/
@GET
@Produces("application/json")
public String getJson(@QueryParam("data") String datas) {
    System.out.println("Working Directory = " + System.getProperty("user.dir"));
    JSONObject objOuter = new JSONObject();
    try {
        JSONObject inputJsonObject = new JSONObject(datas);
        String targetPhrase = inputJsonObject.getString("target");
        String contextText = inputJsonObject.getString("data");

        String modelPath = DependencyParser.DEFAULT_MODEL;

        MaxentTagger tagger = new MaxentTagger(GlobalVarsStore.taggerPath);
        DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);

        DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(contextText));
        ArrayList<TypedDependency> td = new ArrayList<TypedDependency>();
        for (List<HasWord> sentence : tokenizer) {
            List<TaggedWord> tagged = tagger.tagSentence(sentence);
            GrammaticalStructure gs = parser.predict(tagged);
            td.addAll(gs.typedDependencies());
        }

        BaselineAnalysisTools bat = new BaselineAnalysisTools();
        bat.prepareTools();
        String[] bag = clear(contextText).split("(?:(?:[^a-zA-Z]+')|(?:'[^a-zA-Z]+))|(?:[^a-zA-Z']+)");
        double sent = 0.0;
        ArrayList<Double> weights = new ArrayList<Double>();
        int pos = 0;
        int neg = 0;
        int neu = 0;
        double confidenceContainment = 0.0;
        double confidenceClassification = 0.0;
        for (int i = 0; i < bag.length; i++) {
            Double res = GlobalVarsStore.lex.testWord(bag[i]);
            if (res != null) {
                weights.add(res);
                confidenceContainment++;
            }
        }
        Double totalSent = 0.0;
        for (int i = 0; i < weights.size(); i++) {
            totalSent += weights.get(i);
            if (weights.get(i) > 0) {
                pos++;
            } else if (weights.get(i) < 0) {
                neg++;
            } else {
                neu++;
            }
        }
        if (weights.size() > 0) {
            sent = totalSent / weights.size();
            confidenceContainment = confidenceContainment / bag.length;
            if (sent > 0) {
                confidenceClassification = pos / (double) (pos + neg + neu);
            } else if (sent < 0) {
                confidenceClassification = neg / (double) (pos + neg + neu);
            } else {
                confidenceClassification = neu / (double) (pos + neg + neu);
            }
        }

        String[] targets;
        if (targetPhrase.contains(" ")) {
            targets = targetPhrase.split(" ");
        } else {
            targets = new String[1];
            targets[0] = targetPhrase;
        }
        double tempSent = 0;
        double tSent = 0;
        double tSize = 0;
        double sentMod = 1;
        for (int i = 0; i < targets.length; i++) {
            for (int j = 0; j < td.size(); j++) {
                String secondLevel = null;
                double secondSentMod = 1;
                if (targets[i].equals(td.get(j).gov().value())) {
                    if (td.get(j).reln().getShortName().equals("neg")) {
                        sentMod = -1;
                    } else {
                        if (td.get(j).reln().getShortName().equals("dobj")) {
                            secondLevel = td.get(j).dep().value();
                        }
                        tSize++;
                        tempSent = GlobalVarsStore.lex.testWord(td.get(j).dep().value());
                    }
                } else if (targets[i].equals(td.get(j).dep().value())) {
                    if (td.get(j).reln().getShortName().equals("neg")) {
                        sentMod = -1;
                    } else {
                        if (td.get(j).reln().getShortName().equals("dobj")) {
                            secondLevel = td.get(j).gov().value();
                        }
                        tSize++;
                        tempSent = GlobalVarsStore.lex.testWord(td.get(j).gov().value());
                    }
                }
                if (secondLevel != null) {
                    for (int k = 0; k < td.size(); k++) {
                        if (!targets[i].equals(td.get(k).dep().value())
                                && secondLevel.equals(td.get(k).gov().value())) {
                            if (td.get(k).reln().getShortName().equals("neg")) {
                                secondSentMod = -1;
                            }
                        } else if (!targets[i].equals(td.get(k).gov().value())
                                && secondLevel.equals(td.get(k).dep().value())) {
                            if (td.get(k).reln().getShortName().equals("neg")) {
                                secondSentMod = -1;
                            }
                        }
                    }
                }
                tSent += tempSent * secondSentMod;
            }
        }
        if (tSize > 0) {
            tSent /= tSize;
        }
        if (tSent == 0) {
            tSent = sent * sentMod;
        }

        objOuter.put("SOS", tSent);
        objOuter.put("CONFIDENCE",
                (confidenceClassification * (1 - GlobalVarsStore.containmentConfidenceWeight))
                        + (confidenceContainment * GlobalVarsStore.containmentConfidenceWeight));
    } catch (JSONException ex) {
        Logger.getLogger(GetBatchSentimentResource.class.getName()).log(Level.SEVERE, null, ex);
    }
    return objOuter.toString();
}

From source file:RestServices.Scoresservice.java

public static void main(String[] args) throws ParseException {
    /* String scenario="transportation";
     GlobalVarsStore.lexicon="wordnet";/*from   w  w  w .jav  a  2s.  co  m*/
     int objective=6;
             
     //Reading source file
     CrawlersConnector ccn=new CrawlersConnector();
     ArrayList<CustomStatus> scenarioTweets = null;
     try {
    scenarioTweets = ccn.readScenario(scenario);
     } catch (IOException ex) {
    System.out.println("<xml><result>Error: "+ex.getMessage()+"</result></xml>");
     } catch (JSONException ex) {
    System.out.println("<xml><result>Error: "+ex.getMessage()+"</result></xml>");
     }
     System.out.println("Parsed " + scenarioTweets.size() + " documents.");
     ArrayList<String> objectiveNames=new ArrayList<String>();
     objectiveNames.add("Change in Level of Service");
     objectiveNames.add("% change of Accidents cost");
     objectiveNames.add("% change of Air pollution (external) cost");
     objectiveNames.add("% change of Noise (external) cost");
     objectiveNames.add("User convenience in using the RP system");
     objectiveNames.add("Availability of alternative routes and modes");
            
     //Calculating SOF
     DecimalFormat df = new DecimalFormat("#.####");
     ArrayList<String> keys=null;
     if(scenario.equalsIgnoreCase("transportation")){
    if(objective==1){keys=ccn.transportKeywords1;}
    else if(objective==2){keys=ccn.transportKeywords2;}
    else if(objective==3){keys=ccn.transportKeywords3;}
    else if(objective==4){keys=ccn.transportKeywords4;}
    else if(objective==5){keys=ccn.transportKeywords5;}
    else if(objective==6){keys=ccn.transportKeywords6;}
     }else if(scenario.equalsIgnoreCase("biofuel")){
    if(objective==1){keys=ccn.biofuelKeywords1;}
    else if(objective==2){keys=ccn.biofuelKeywords2;}
    else if(objective==3){keys=ccn.biofuelKeywords3;}
    else if(objective==4){keys=ccn.biofuelKeywords4;}
     }
     System.out.println("Calculating Score Of Frequency...");
     TopicAnalysisTools tat = new TopicAnalysisTools();
     ArrayList<Double> sofs = new ArrayList<Double>();
     for (int i = 0; i < keys.size(); i++) {
    sofs.add(tat.countFrequency(scenarioTweets, keys.get(i)));
     }
     Double sof=0.0;
     for (int i = 0; i < sofs.size(); i++) {
    sof+=sofs.get(i);
     }
     if(sofs.size()>0) sof=sof/sofs.size();
     System.out.println("Score of Frequency for objective '" + objectiveNames.get(objective-1) + "' is " + df.format(sof));
            
     //Calculating SOS
     System.out.println("Calculating Score Of Sentiment...");
     BaselineAnalysisTools bat = new BaselineAnalysisTools();
     bat.prepareTools();
     ArrayList<CustomStatus> tweets=null;
     Double sos=0.0;
     ArrayList<Double> soses = new ArrayList<Double>();
     for (int i = 0; i < keys.size(); i++) {
    try {
        tweets=ccn.readKeyword(keys.get(i));
        soses.add(bat.SentiWordNetMeanAnalysis(tweets,keys.get(i)));
    } catch (IOException ex) { ex.printStackTrace(); soses.add(0.0);
    } catch (JSONException ex) { ex.printStackTrace();soses.add(0.0);
    }
     }
     for (int i = 0; i < soses.size(); i++) {
    sos+=soses.get(i);
     }
     if(soses.size()>0) sos=sos/soses.size();
     System.out.println("Score of Sentiment for objective '" + objectiveNames.get(objective-1) + "' is " + df.format(sos));*/

    //String tweetDate="Thu Jul 23 00:00:00 CEST 2015";
    //DateFormat df = new SimpleDateFormat("EEE MMM dd kk:mm:ss z yyyy", Locale.ENGLISH);
    //Date result =  df.parse(tweetDate);
    //tweetDate = (result.getYear()+1900)+"-"+(result.getMonth()+1)+"-"+result.getDate();
    //System.out.println(tweetDate);

    String modelPath = DependencyParser.DEFAULT_MODEL;
    String taggerPath = "C:\\Users\\ViP\\Copy\\NTUA\\Code\\ConsensusPublicOpinion\\models\\english-left3words-distsim.tagger";

    for (int argIndex = 0; argIndex < args.length;) {
        switch (args[argIndex]) {
        case "-tagger":
            taggerPath = args[argIndex + 1];
            argIndex += 2;
            break;
        case "-model":
            modelPath = args[argIndex + 1];
            argIndex += 2;
            break;
        default:
            throw new RuntimeException("Unknown argument " + args[argIndex]);
        }
    }

    String text = "I love apples and do not hate oranges";

    MaxentTagger tagger = new MaxentTagger(taggerPath);
    DependencyParser parser = DependencyParser.loadFromModelFile(modelPath);

    DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text));
    for (List<HasWord> sentence : tokenizer) {
        List<TaggedWord> tagged = tagger.tagSentence(sentence);
        GrammaticalStructure gs = parser.predict(tagged);

        // Print typed dependencies
        System.out.println(gs);
        ArrayList<TypedDependency> cd = (ArrayList<TypedDependency>) gs.typedDependencies();
        for (int i = 0; i < cd.size(); i++) {
            System.out.println(String.format("%1$" + 10 + "s", cd.get(i).gov().value()) + "\t"
                    + String.format("%1$" + 10 + "s", cd.get(i).dep().value()) + "\t"
                    + cd.get(i).reln().getShortName() + "\t" + cd.get(i).reln().getLongName());
        }
    }
}