List of usage examples for edu.stanford.nlp.trees TypedDependency toString
@Override
public String toString()
From source file:ie.pars.bnc.preprocess.ProcessNLP.java
License:Open Source License
public static void handleDependencies(Tree tree, ParserGrammar parser, String arg, OutputStream outStream, String commandArgs) throws IOException { GrammaticalStructure gs = parser.getTLPParams().getGrammaticalStructure(tree, parser.treebankLanguagePack().punctuationWordRejectFilter(), parser.getTLPParams().typedDependencyHeadFinder()); Collection<TypedDependency> deps = gs.typedDependenciesCollapsedTree(); // SemanticGraph sg = new SemanticGraph(deps); OutputStreamWriter osw = new OutputStreamWriter(outStream, "utf-8"); for (TypedDependency dep : deps) { String t = dep.dep().word() + "\t" + dep.dep().lemma() + "\t" + dep.dep().tag() + "\t"; System.out.println(t);//w w w . j av a 2 s . c o m osw.write(dep.toString()); osw.write("\n"); } osw.flush(); }
From source file:knowledgeextraction.EntityAttributeGraph.java
static HashMap<String, HashMap<String, List<String>>> GetTree(Object[] list) { HashMap<String, HashMap<String, List<String>>> tree = new HashMap<>(); TypedDependency typedDependency; for (Object object : list) { typedDependency = (TypedDependency) object; String line = typedDependency.toString(); int i = 0; char[] str = line.toCharArray(); String reln = "", first = "", second = ""; for (; i < str.length; i++) { if (str[i] == '(') break; reln += str[i];//from w w w. jav a 2 s . c o m } for (i++; i < str.length; i++) { if (str[i] == '-') break; first += str[i]; } for (i++; i < str.length; i++) { if (str[i] == '-') break; second += str[i]; } if (tree.containsKey(first) == false) { tree.put(first, new HashMap<String, List<String>>()); } if (tree.get(first).containsKey(reln) == false) { tree.get(first).put(reln, new ArrayList<String>()); } tree.get(first).get(reln).add(second); } return tree; }
From source file:org.apdplat.qa.questiontypeanalysis.patternbased.MainPartExtracter.java
License:Open Source License
/** * ???// w w w.j a va 2 s .c o m * * @param question * @param words HasWord * @return */ public QuestionStructure getMainPart(String question, List<edu.stanford.nlp.ling.Word> words) { QuestionStructure questionStructure = new QuestionStructure(); questionStructure.setQuestion(question); Tree tree = LP.apply(words); LOG.info("?: "); tree.pennPrint(); questionStructure.setTree(tree); GrammaticalStructure gs = GSF.newGrammaticalStructure(tree); if (gs == null) { return null; } //?? Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>(); String top = null; String root = null; LOG.info("???"); //? List<String> dependencies = new ArrayList<>(); for (TypedDependency tdl : tdls) { String item = tdl.toString(); dependencies.add(item); LOG.info("\t" + item); if (item.startsWith("top")) { top = item; } if (item.startsWith("root")) { root = item; } int start = item.indexOf("("); int end = item.lastIndexOf(")"); item = item.substring(start + 1, end); String[] attr = item.split(","); String k = attr[0].trim(); String v = attr[1].trim(); String value = map.get(k); if (value == null) { map.put(k, v); } else { // value += ":"; value += v; map.put(k, value); } } questionStructure.setDependencies(dependencies); String mainPartForTop = null; String mainPartForRoot = null; if (top != null) { mainPartForTop = topPattern(top, map); } if (root != null) { mainPartForRoot = rootPattern(root, map); } questionStructure.setMainPartForTop(mainPartForTop); questionStructure.setMainPartForRoot(mainPartForRoot); if (questionStructure.getMainPart() == null) { LOG.error("" + question); } else { LOG.info("" + questionStructure.getMainPart()); } return questionStructure; }
From source file:org.linuxkernel.proof.digger.questiontypeanalysis.patternbased.MainPartExtracter.java
License:Open Source License
/** * ???/* w w w .ja v a2 s. co m*/ * * @param question * @param words HashWord * @return */ public QuestionStructure getMainPart(String question, List<Word> words) { QuestionStructure questionStructure = new QuestionStructure(); questionStructure.setQuestion(question); Tree tree = lp.apply(words); LOG.info("?: "); tree.pennPrint(); questionStructure.setTree(tree); GrammaticalStructure gs = gsf.newGrammaticalStructure(tree); if (gs == null) { return null; } //?? Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>(); String top = null; String root = null; LOG.info("???"); //? List<String> dependencies = new ArrayList<>(); for (TypedDependency tdl : tdls) { String item = tdl.toString(); dependencies.add(item); LOG.info("\t" + item); if (item.startsWith("top")) { top = item; } if (item.startsWith("root")) { root = item; } int start = item.indexOf("("); int end = item.lastIndexOf(")"); item = item.substring(start + 1, end); String[] attr = item.split(","); String k = attr[0].trim(); String v = attr[1].trim(); String value = map.get(k); if (value == null) { map.put(k, v); } else { // value += ":"; value += v; map.put(k, value); } } questionStructure.setDependencies(dependencies); String mainPartForTop = null; String mainPartForRoot = null; if (top != null) { mainPartForTop = topPattern(top, map); } if (root != null) { mainPartForRoot = rootPattern(root, map); } questionStructure.setMainPartForTop(mainPartForTop); questionStructure.setMainPartForRoot(mainPartForRoot); if (questionStructure.getMainPart() == null) { LOG.error("" + question); } else { LOG.info("" + questionStructure.getMainPart()); } return questionStructure; }
From source file:sentenceParser.sentenceParser.java
/** * * @param input//www. j av a 2 s . c o m * @return */ public List<String> sentenceDeparser(String input) { //String sent2 = ("animals were divided into three groups: 1) rats with alloxan-induced diabetes; 2) diabetic rats treated with isophane insulin (2 iu/day); and 3) matching controls"); // Use the default tokenizer for this TreebankLanguagePack Tokenizer<? extends HasWord> toke = tlp.getTokenizerFactory().getTokenizer(new StringReader(input)); List<? extends HasWord> sentence = toke.tokenize(); Tree parseTree = lp.parse(sentence); GrammaticalStructure gs = gsf.newGrammaticalStructure(parseTree); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); List<String> val = new ArrayList<String>(); ; for (TypedDependency tmp : tdl) { val.add(tmp.toString()); } return val; }