List of usage examples for org.apache.lucene.util BytesRefHash get
public BytesRef get(int bytesID, BytesRef ref)
From source file:com.sindicetech.siren.search.node.NodeConstantScoreAutoRewrite.java
License:Open Source License
@Override public Query rewrite(final IndexReader reader, final MultiNodeTermQuery query) throws IOException { // Disabled cutoffs final int docCountCutoff = Integer.MAX_VALUE; final int termCountLimit = Integer.MAX_VALUE; final CutOffTermCollector col = new CutOffTermCollector(docCountCutoff, termCountLimit); this.collectTerms(reader, query, col); final int size = col.pendingTerms.size(); if (col.hasCutOff) { return MultiNodeTermQuery.CONSTANT_SCORE_FILTER_REWRITE.rewrite(reader, query); } else if (size == 0) { return this.getTopLevelQuery(query); } else {//from w w w .j a v a 2 s . com final NodeBooleanQuery bq = this.getTopLevelQuery(query); final BytesRefHash pendingTerms = col.pendingTerms; final int sort[] = pendingTerms.sort(col.termsEnum.getComparator()); for (int i = 0; i < size; i++) { final int pos = sort[i]; // docFreq is not used for constant score here, we pass 1 // to explicitely set a fake value, so it's not calculated this.addClause(bq, new Term(query.field, pendingTerms.get(pos, new BytesRef())), 1, 1.0f, col.array.termState[pos]); } // Strip scores final NodeQuery result = new NodeConstantScoreQuery(bq); result.setBoost(query.getBoost()); return result; } }
From source file:de.unihildesheim.iw.lucene.util.BytesRefUtils.java
License:Open Source License
/** * Convert a {@link BytesRefHash} to a {@link BytesRefArray}. * * @param bh Hash/*from w w w . jav a2 s . c o m*/ * @return Array */ public static BytesRefArray hashToArray(@NotNull final BytesRefHash bh) { final BytesRefArray ba = new BytesRefArray(Counter.newCounter(false)); final BytesRef br = new BytesRef(); for (int i = bh.size() - 1; i >= 0; i--) { ba.append(bh.get(i, br)); } return ba; }
From source file:de.unihildesheim.iw.lucene.util.BytesRefUtils.java
License:Open Source License
/** * Convert a {@link BytesRefHash} to a Set. * * @param bh Hash//from ww w . j ava 2 s . c om * @return Set or {@code null}, if {@code bh} was null */ @Nullable @Contract("null -> null; !null -> !null") public static Set<String> hashToSet(@Nullable final BytesRefHash bh) { if (bh == null) { return null; } final Set<String> strSet = new HashSet<>(bh.size()); final BytesRef br = new BytesRef(); for (int i = bh.size() - 1; i >= 0; i--) { strSet.add(bh.get(i, br).utf8ToString()); } return strSet; }
From source file:org.elasticsearch.common.util.BytesRefHashTests.java
License:Apache License
private void assertAllIn(Set<String> strings, BytesRefHash hash) { BytesRef ref = new BytesRef(); BytesRef scratch = new BytesRef(); long count = hash.size(); for (String string : strings) { ref.copyChars(string);/*from w w w . j a va 2 s . c o m*/ long key = hash.add(ref); // add again to check duplicates assertEquals(string, hash.get((-key) - 1, scratch).utf8ToString()); assertEquals(count, hash.size()); assertTrue("key: " + key + " count: " + count + " string: " + string, key < count); } }
From source file:org.meresco.lucene.search.MerescoVector.java
License:Open Source License
public void printVector(BytesRefHash hash) { Iterator iter = entries.iterator(); while (iter.hasNext()) { iter.advance();// w w w .j a v a 2 s. c o m if (iter.value() > 0) { BytesRef b = new BytesRef(); hash.get(iter.key(), b); System.out.print(b.utf8ToString() + ":" + iter.value() + " "); } } System.out.println(); }
From source file:org.sindice.siren.search.node.NodeConstantScoreAutoRewrite.java
License:Apache License
@Override public Query rewrite(final IndexReader reader, final MultiNodeTermQuery query) throws IOException { // Disabled cutoffs final int docCountCutoff = Integer.MAX_VALUE; final int termCountLimit = Integer.MAX_VALUE; final CutOffTermCollector col = new CutOffTermCollector(docCountCutoff, termCountLimit); this.collectTerms(reader, query, col); final int size = col.pendingTerms.size(); if (col.hasCutOff) { return MultiNodeTermQuery.CONSTANT_SCORE_FILTER_REWRITE.rewrite(reader, query); } else if (size == 0) { return this.getTopLevelQuery(); } else {//from w w w . j ava2 s .co m final NodeBooleanQuery bq = this.getTopLevelQuery(); final BytesRefHash pendingTerms = col.pendingTerms; final int sort[] = pendingTerms.sort(col.termsEnum.getComparator()); for (int i = 0; i < size; i++) { final int pos = sort[i]; // docFreq is not used for constant score here, we pass 1 // to explicitely set a fake value, so it's not calculated this.addClause(bq, new Term(query.field, pendingTerms.get(pos, new BytesRef())), 1, 1.0f, col.array.termState[pos]); } // Strip scores final NodeQuery result = new NodeConstantScoreQuery(bq); result.setBoost(query.getBoost()); // set level and node constraints result.setLevelConstraint(query.getLevelConstraint()); result.setNodeConstraint(query.getNodeConstraint()[0], query.getNodeConstraint()[1]); // set ancestor result.setAncestorPointer(query.ancestor); return result; } }
From source file:solutions.siren.join.action.terms.collector.BytesRefTermsSet.java
License:Open Source License
@Override protected void addAll(TermsSet terms) { if (!(terms instanceof BytesRefTermsSet)) { throw new UnsupportedOperationException("Invalid type: BytesRefTermsSet expected."); }//w ww .j a v a2s .com BytesRefHash input = ((BytesRefTermsSet) terms).set; BytesRef reusable = new BytesRef(); for (int i = 0; i < input.size(); i++) { input.get(i, reusable); set.add(reusable); } }