List of usage examples for edu.stanford.nlp.trees Tree pennPrint
public void pennPrint()
From source file:com.parse.Dependency.java
public static void main(String[] args) { LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"); lp.setOptionFlags(new String[] { "-maxLength", "80", "-retainTmpSubcategories", }); String[] sent = { "This", "is", "an", "easy", "sentence", "." }; List<CoreLabel> rawWords = Sentence.toCoreLabelList(sent); Tree parse = lp.apply(rawWords); parse.pennPrint(); System.out.println();/*from w w w .ja va 2s.c o m*/ TreebankLanguagePack tlp = new PennTreebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); //System.out.println(); //TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed"); // tp.printTree(parse); String sentence = "which movies were directed by Christopher Nolan"; Tree t2 = lp.parse(sentence); System.out.println(t2.firstChild().toString()); gs = gsf.newGrammaticalStructure(t2); tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(tdl.get(0).dep().nodeString()); }
From source file:englishparser.EnglishParser.java
private static void display(Tree t) { t.pennPrint(); for (Tree child : t.children()) { if (!child.isLeaf()) { display(child);//from w w w . ja va 2 s . c om } } }
From source file:englishparser.EnglishParser.java
/** * demoAPI demonstrates other ways of calling the parser with already * tokenized text, or in some cases, raw text that needs to be tokenized as * a single sentence. Output is handled with a TreePrint object. Note that * the options used when creating the TreePrint can determine what results * to print out. Once again, one can capture the output by passing a * PrintWriter to TreePrint.printTree.//from ww w . j a va 2 s.co m */ public static void demoAPI(LexicalizedParser lp) { // This option shows parsing a list of correctly tokenized words String[] sent = { "This", "is", "an", "easy", "sentence", "." }; List<CoreLabel> rawWords = Sentence.toCoreLabelList(sent); Tree parse = lp.apply(rawWords); parse.pennPrint(); System.out.println(); // This option shows loading and using an explicit tokenizer String sent2 = "This is another sentence."; TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); Tokenizer<CoreLabel> tok = tokenizerFactory.getTokenizer(new StringReader(sent2)); List<CoreLabel> rawWords2 = tok.tokenize(); parse = lp.apply(rawWords2); TreebankLanguagePack tlp = new PennTreebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(); // You can also use a TreePrint object to print trees and dependencies TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed"); tp.printTree(parse); }
From source file:org.apdplat.qa.questiontypeanalysis.patternbased.MainPartExtracter.java
License:Open Source License
/** * ???/*from ww w. j a v a 2 s . co m*/ * * @param question * @param words HasWord * @return */ public QuestionStructure getMainPart(String question, List<edu.stanford.nlp.ling.Word> words) { QuestionStructure questionStructure = new QuestionStructure(); questionStructure.setQuestion(question); Tree tree = LP.apply(words); LOG.info("?: "); tree.pennPrint(); questionStructure.setTree(tree); GrammaticalStructure gs = GSF.newGrammaticalStructure(tree); if (gs == null) { return null; } //?? Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>(); String top = null; String root = null; LOG.info("???"); //? List<String> dependencies = new ArrayList<>(); for (TypedDependency tdl : tdls) { String item = tdl.toString(); dependencies.add(item); LOG.info("\t" + item); if (item.startsWith("top")) { top = item; } if (item.startsWith("root")) { root = item; } int start = item.indexOf("("); int end = item.lastIndexOf(")"); item = item.substring(start + 1, end); String[] attr = item.split(","); String k = attr[0].trim(); String v = attr[1].trim(); String value = map.get(k); if (value == null) { map.put(k, v); } else { // value += ":"; value += v; map.put(k, value); } } questionStructure.setDependencies(dependencies); String mainPartForTop = null; String mainPartForRoot = null; if (top != null) { mainPartForTop = topPattern(top, map); } if (root != null) { mainPartForRoot = rootPattern(root, map); } questionStructure.setMainPartForTop(mainPartForTop); questionStructure.setMainPartForRoot(mainPartForRoot); if (questionStructure.getMainPart() == null) { LOG.error("" + question); } else { LOG.info("" + questionStructure.getMainPart()); } return questionStructure; }
From source file:org.linuxkernel.proof.digger.questiontypeanalysis.patternbased.MainPartExtracter.java
License:Open Source License
/** * ???/*from w ww . ja va 2 s. c om*/ * * @param question * @param words HashWord * @return */ public QuestionStructure getMainPart(String question, List<Word> words) { QuestionStructure questionStructure = new QuestionStructure(); questionStructure.setQuestion(question); Tree tree = lp.apply(words); LOG.info("?: "); tree.pennPrint(); questionStructure.setTree(tree); GrammaticalStructure gs = gsf.newGrammaticalStructure(tree); if (gs == null) { return null; } //?? Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>(); String top = null; String root = null; LOG.info("???"); //? List<String> dependencies = new ArrayList<>(); for (TypedDependency tdl : tdls) { String item = tdl.toString(); dependencies.add(item); LOG.info("\t" + item); if (item.startsWith("top")) { top = item; } if (item.startsWith("root")) { root = item; } int start = item.indexOf("("); int end = item.lastIndexOf(")"); item = item.substring(start + 1, end); String[] attr = item.split(","); String k = attr[0].trim(); String v = attr[1].trim(); String value = map.get(k); if (value == null) { map.put(k, v); } else { // value += ":"; value += v; map.put(k, value); } } questionStructure.setDependencies(dependencies); String mainPartForTop = null; String mainPartForRoot = null; if (top != null) { mainPartForTop = topPattern(top, map); } if (root != null) { mainPartForRoot = rootPattern(root, map); } questionStructure.setMainPartForTop(mainPartForTop); questionStructure.setMainPartForRoot(mainPartForRoot); if (questionStructure.getMainPart() == null) { LOG.error("" + question); } else { LOG.info("" + questionStructure.getMainPart()); } return questionStructure; }
From source file:qmul.util.parse.StanfordParser.java
License:Open Source License
public static void main(String a[]) { StanfordParser pw = new StanfordParser(); Tree t = pw.parse( "this sentence is false. this isn't bad..... I am a good boy. You can't kill my mother. I won't let you. You think this is a game? I think I'll have to kill you first."); t.pennPrint(); }
From source file:qmul.util.treekernel.TreeKernel.java
License:Open Source License
/** * Main method for testing/*from www .j av a 2s .c o m*/ * * @param args */ public static void main(String[] args) { StanfordParser pw = new StanfordParser(); Tree t1 = pw.parse("I loves Mary."); Tree t2 = pw.parse("John hates you"); // Tree t1 = pw.parse("John."); // Tree t2 = // pw.parse("John is the nicest person I have ever met and I dare you to say anything else, as time will show"); t1.pennPrint(); t2.pennPrint(); setIncludeWords(false); setKernelNormalisation(true); System.out.println(TreeKernel.resetAndCompute(t1, t2, 0)); System.out.println(TreeKernel.resetAndCompute(t1, t2, 1)); System.out.println(TreeKernel.resetAndCompute(t1, t2, 2)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 0)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 1)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 2)); addBannedProduction("NP:PRP"); addBannedProduction("NP:NNP"); System.out.println(TreeKernel.resetAndCompute(t1, t2, 0)); System.out.println(TreeKernel.resetAndCompute(t1, t2, 1)); System.out.println(TreeKernel.resetAndCompute(t1, t2, 2)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 0)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 1)); System.out.println(TreeKernel.resetAndCompute(t2, t1, 2)); }
From source file:wtute.parser.EssayParser.java
private String parse(Tree toParse) { GrammaticalStructure gs = gsf.newGrammaticalStructure(toParse); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); toParse.pennPrint(); System.out.println(tdl);//from w ww .ja va 2 s. co m return toParse.pennString() + "\n" + tdl + "\n" + toParse.taggedYield() + "\n\n"; }