List of usage examples for org.apache.lucene.analysis TokenStream getAttribute
public final <T extends Attribute> T getAttribute(Class<T> attClass)
The caller must pass in a Class<?
From source file:org.eclipse.recommenders.test.codesearch.rcp.indexer.analyzer.AnalysisTestBase.java
License:Open Source License
private List<String> parseKeywords(Analyzer analyzer, String field, String keywords) { List<String> result = Lists.newArrayList(); TokenStream stream = analyzer.tokenStream(field, new StringReader(keywords)); try {//from www . j a v a2 s .co m while (stream.incrementToken()) { result.add(stream.getAttribute(TermAttribute.class).term()); } stream.close(); } catch (IOException e) { // not thrown b/c we're using a string reader... } return result; }
From source file:org.elasticsearch.index.analysis.CustomWBAnalysisTests.java
License:Apache License
public static void assertSimpleTSOutput(TokenStream stream, String[] expected) throws IOException { stream.reset();/*from ww w . j a va 2 s . co m*/ CharTermAttribute termAttr = stream.getAttribute(CharTermAttribute.class); assertThat(termAttr, notNullValue()); int i = 0; while (stream.incrementToken()) { assertThat(expected.length, greaterThan(i)); assertThat("expected different term at index " + i, expected[i++], equalTo(termAttr.toString())); } assertThat("not all tokens produced", i, equalTo(expected.length)); }
From source file:org.elasticsearch.index.analysis.morphology.SimpleMorphologyAnalysisTests.java
License:Apache License
public static void assertSimpleTSOutput(TokenStream stream, String[] expected) throws IOException { stream.reset();//from ww w . j av a 2s . c o m CharTermAttribute termAttr = stream.getAttribute(CharTermAttribute.class); Assert.assertNotNull(termAttr); int i = 0; while (stream.incrementToken()) { Assert.assertTrue("got extra term: " + termAttr.toString(), i < expected.length); Assert.assertEquals("expected different term at index " + i, termAttr.toString(), expected[i]); i++; } Assert.assertEquals("not all tokens produced", i, expected.length); }
From source file:org.elasticsearch.index.analysis.PaodingAnalysisTests.java
License:Apache License
public List getname(String param) throws IOException { System.setProperty("paoding.dic.home.config-first", "D:/Projects/Java Related/ElasticSearch/plugins/elasticsearch-analysis-paoding/config/paoding/dic"); //?(??)//from w w w.ja v a2 s .c o m Analyzer ika = new PaodingAnalyzer(); List<String> keys = new ArrayList<String>(); TokenStream ts = null; try { Reader r = new StringReader(param); ts = ika.tokenStream("TestField", r); CharTermAttribute termAtt = (CharTermAttribute) ts.getAttribute(CharTermAttribute.class); TypeAttribute typeAtt = (TypeAttribute) ts.getAttribute(TypeAttribute.class); String key = null; while (ts.incrementToken()) { if ("word".equals(typeAtt.type())) { key = termAtt.toString(); if (key.length() >= 2) { keys.add(key); } } } } catch (IOException e) { e.printStackTrace(); } finally { if (ts != null) { ts.close(); } } Map<String, Integer> keyMap = new HashMap<String, Integer>(); Integer $ = null; //?? for (String key : keys) { keyMap.put(key, ($ = keyMap.get(key)) == null ? 1 : $ + 1); } List<Map.Entry<String, Integer>> keyList = new ArrayList<Map.Entry<String, Integer>>(keyMap.entrySet()); //? Collections.sort(keyList, new Comparator<Map.Entry<String, Integer>>() { public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) { return (o2.getValue() - o1.getValue()); } }); //?? String id = null; String str = ""; List list = new ArrayList(); if (keyList.size() > 0) { for (int i = 0; i < keyList.size(); i++) { id = keyList.get(i).toString(); String[] strs = id.split("\\="); str = strs[0]; list.add(strs[0]); System.out.println("id:" + id); } } return list; }
From source file:org.elasticsearch.index.analysis.split.AnalysisTests.java
License:Apache License
@Test public void testTokenFilter() throws IOException { String[] strings = new String[] { "<em>abc</em>def", "<em>this is just a NGram</em>abc<em>def</em>This is", "? dong ai hua <em>just</em> the NGram", "xsflsy02.sa.nhnsystem.com", "nomad::Job::ReturnAnswer:163", "2013-01-10 06:29:07 +0000", "<123>456", "<em>NNB</em>" + "=ULPUSFCVXBNFC " + "NB=GYYDSNJYGIYTEMBW npic=" + "<em>SFCuenZVHbx0RFZFoh+a0WALs7qRAYM/3vD26gfSTs4O8u/7rIqsl9I5OnJV9LgnCA</em> " + "page_uid=RT+2zc5Y7tlssvof8wCsssssstZ-140745 " + "BMR= nid_inf=438366115 NID_MATCH_M=1 " + "NID_AUT=<em>95R9DDUsQ6SpQrid2Qpfe0s5BsyH6VRO0jBmpZ/Nmq4TrgPddxY8gUzhTVFyhECwFBBH6tnpd8YslNUK+ARdKEOJSwxM7HOspmslEoVHHHDgTqdfF60lI8opO9JKWVFaAnswVnIFNHTdHUdaCeFvSQ<em> " + "NID_SES=<em>AAABRQjdQk1opAW5ceebr50CEmXN7HrzMrImW4FrXlJACJ1QU2fYDyjIpO/cO/2/+iM0BwZTnLgX4EClbkFwar9MJDr/+0dfX91dMvXV+8WuyiCiWxCWd4FwrsCMHcXthwQGV+C1bCrbU+5C/qeOeGuJCGwVt769y8+Tuy+KBuTGbKDMuUF/SyRq5IwNQ3YL1pMGs+cAnFN2xqFplgJtZvlhI8+8f3GfMxZqEHlXmSSlSpCWkZZYzz9wx2WarU+WtU4WGpnW0Y+Kc347mW2mNaVIDq+AHf4HXE8JHsPqvlzNWlkyS5AHw3tc5bWFy0MhxngOnyG7VqTheb4yxPRhTY0D6fF4TDPr7fjsJ5tuA9oxH+BGuoy6uYIs8uoRI1+HULgI0WCQpiNeVtI1eskacsENBnqECJ3OOyAFzAcr9msv7pr8LYtx0TsNVlLWVS7ug1uH5w</em> " + "ncvid=#vid#_118.217.45.216p3Lj WMONID=DEZg20K2BGS ncvc2=7c1ce4c094a2a31133c5b88ab14e2e56eda35ebba8bf21da60ba865aeeca2ee728d016cd172bbf93e37c2bf73b9136e8073a1f11e2d0ab9cf43394518fbf0ec3adaba8a9b6abb4aba4a0a3a4a1a6b615 nci4=0337dafeeaa7c87a25cb8c9b96771b78d997768ada8665b7478abf4dfaff3ac3c336f650f4ba5c697e8fb3613570e67cd88ff44bafb0f9e0ca00aa61b78337fa95b1bc9bba8bb9b7b691b485cdbeae8da997b3aba285a091e6919cbc98a9ea9c93b78ebff2838aad88b9878b82a580ce8083848988888b8cb9 JSESSIONID=E365D0634FED26492BFFD5DEEE789B66 personaconmain|ektmfrl645=AE8BC98FD74D619FF7B13C83191E1F5EAFCD0F25C43D6BDC693E26D777419A2F845E79DA02B04219 personacon|ektmfrl645= cafeCookieToken=5KCBru-K8k8aHwkbio4dPmLlMyK6WlPYqN0319U4UeImDS9UVPpo70IVLHK9eybq6eJc-rNfllMgB5Fk_i2j-rKM1mCuoOqZ ncu=82b94171693746ae8766724d5696dc1a83e17aed" };//from ww w . j a va2 s . c o m String[] expected = new String[] { "<em>abc</em><i>def</i>", "<em>this is just a NGram</em><i>abc</i><em>def</em>This is", "<i></i><i></i><i>?</i> <i>dong</i> <i>ai</i> <i>hua</i> <em>just</em> the <i>NGram</i>", "<i>xsflsy02</i>.<i>sa.nhnsystem.com</i>", "<i>nomad</i>::<i>Job</i>::<i>ReturnAnswer</i>:<i>163</i>", "<i>2013</i>-<i>01</i>-<i>10</i> <i>06</i>:<i>29</i>:<i>07</i> +<i>0000</i>", "<<i>123</i>><i>456</i>", "<em>NNB</em>=<i>ULPUSFCVXBNFC</i> <i>NB</i>=<i>GYYDSNJYGIYTEMBW</i> <i>npic</i>=<em>SFCuenZVHbx0RFZFoh+a0WALs7qRAYM/3vD26gfSTs4O8u/7rIqsl9I5OnJV9LgnCA</em> <i>page_uid</i>=<i>RT</i>+<i>2zc5Y7tlssvof8wCsssssstZ</i>-<i>140745</i> <i>BMR</i>= <i>nid_inf</i>=<i>438366115</i> <i>NID_MATCH_M</i>=<i>1</i> <i>NID_AUT</i>= <i>ncvid</i>=#<i>vid</i>#<i>_118.217.45.216p3Lj</i> <i>WMONID</i>=<i>DEZg20K2BGS</i> <i>ncvc2</i>=<i>7c1ce4c094a2a31133c5b88ab14e2e56eda35ebba8bf21da60ba865aeeca2ee728d016cd172bbf93e37c2bf73b9136e8073a1f11e2d0ab9cf43394518fbf0ec3adaba8a9b6abb4aba4a0a3a4a1a6b615</i> <i>nci4</i>=<i>0337dafeeaa7c87a25cb8c9b96771b78d997768ada8665b7478abf4dfaff3ac3c336f650f4ba5c697e8fb3613570e67cd88ff44bafb0f9e0ca00aa61b78337fa95b1bc9bba8bb9b7b691b485cdbeae8da997b3aba285a091e6919cbc98a9ea9c93b78ebff2838aad88b9878b82a580ce8083848988888b8cb9</i> <i>JSESSIONID</i>=<i>E365D0634FED26492BFFD5DEEE789B66</i> <i>personaconmain</i>|<i>ektmfrl645</i>=<i>AE8BC98FD74D619FF7B13C83191E1F5EAFCD0F25C43D6BDC693E26D777419A2F845E79DA02B04219</i> <i>personacon</i>|<i>ektmfrl645</i>= <i>cafeCookieToken</i>=<i>5KCBru</i>-<i>K8k8aHwkbio4dPmLlMyK6WlPYqN0319U4UeImDS9UVPpo70IVLHK9eybq6eJc</i>-<i>rNfllMgB5Fk_i2j</i>-<i>rKM1mCuoOqZ</i> <i>ncu</i>=<i>82b94171693746ae8766724d5696dc1a83e17aed</i>" }; Analyzer analyzer = new SplitAnalyzer(Lucene.ANALYZER_VERSION); for (int i = 0, len = strings.length; i < len; i++) { StringReader sr = new StringReader(strings[i]); TokenStream stream = analyzer.tokenStream("f", sr); stream.reset(); List<String> list = new ArrayList<String>(); while (stream.incrementToken()) { CharTermAttribute ta = stream.getAttribute(CharTermAttribute.class); list.add(ta.toString()); System.out.println(ta.toString()); } Joiner joiner = Joiner.on(""); System.out.println("Result:" + joiner.join(list)); Assert.assertEquals(joiner.join(list), expected[i]); } }
From source file:org.elasticsearch.index.mapper.hashsplitter.HashSplitterFieldMapper.java
License:Apache License
@Override public Query fieldQuery(String value, @Nullable QueryParseContext context) { // Use HashSplitterSearch* analysis and post-process it to create the real query TokenStream tok = null; try {/*w w w.j av a 2s . co m*/ tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(value)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); BooleanQuery q = new BooleanQuery(); try { while (tok.incrementToken()) { Term term = names().createIndexNameTerm(termAtt.toString()); q.add(new TermQuery(term), BooleanClause.Occur.MUST); } tok.end(); tok.close(); } catch (IOException e) { e.printStackTrace(); q = null; } return q; }
From source file:org.elasticsearch.index.mapper.hashsplitter.HashSplitterFieldMapper.java
License:Apache License
@Override public Filter fieldFilter(String value, @Nullable QueryParseContext context) { // Use HashSplitterSearch* analysis and post-process it to create the real query TokenStream tok = null; try {//from www . j av a2 s.c o m tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(value)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); BooleanFilter f = new BooleanFilter(); try { while (tok.incrementToken()) { Term term = names().createIndexNameTerm(termAtt.toString()); f.add(new TermFilter(term), BooleanClause.Occur.MUST); } tok.end(); tok.close(); } catch (IOException e) { e.printStackTrace(); f = null; } return f; }
From source file:org.elasticsearch.index.mapper.hashsplitter.HashSplitterFieldMapper.java
License:Apache License
@Override public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, @Nullable QueryParseContext context) { // Use HashSplitterSearch* analysis and post-process it to create the real query TokenStream tok = null; try {//from w w w .j a v a2s .c o m tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(value)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); BooleanQuery q = new BooleanQuery(); try { int remainingSize = sizeIsVariable ? 0 : sizeValue; // note: prefixes are not included while (tok.incrementToken()) { Term term = names().createIndexNameTerm(termAtt.toString()); if (termAtt.length() < 1 + chunkLength) { if (remainingSize > 0) { // implies size is fixed if (remainingSize < chunkLength) q.add(new PrefixLengthQuery(term, 1 + remainingSize, 1 + remainingSize), BooleanClause.Occur.MUST); else q.add(new PrefixLengthQuery(term, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.MUST); } else { // varying size: only limit to the chunkLength q.add(new PrefixLengthQuery(term, 0, 1 + chunkLength), BooleanClause.Occur.MUST); } } else { q.add(new TermQuery(term), BooleanClause.Occur.MUST); } remainingSize -= termAtt.length() - 1; // termAtt contains the prefix, remainingSize doesn't take it into account } tok.end(); tok.close(); } catch (IOException e) { e.printStackTrace(); q = null; } return q; }
From source file:org.elasticsearch.index.mapper.hashsplitter.HashSplitterFieldMapper.java
License:Apache License
@Override public Filter prefixFilter(String value, @Nullable QueryParseContext context) { // Use HashSplitterSearch* analysis and post-process it to create the real filter TokenStream tok = null; try {/* ww w . ja v a2s.com*/ tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(value)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); BooleanFilter f = new BooleanFilter(); try { int remainingSize = sizeIsVariable ? 0 : sizeValue; // note: prefixes are not included while (tok.incrementToken()) { Term term = names().createIndexNameTerm(termAtt.toString()); if (termAtt.length() < 1 + chunkLength) { if (remainingSize > 0) { // implies size is fixed if (remainingSize < chunkLength) f.add(new PrefixLengthFilter(term, 1 + remainingSize, 1 + remainingSize), BooleanClause.Occur.MUST); else f.add(new PrefixLengthFilter(term, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.MUST); } else { // varying size: only limit to the chunkLength f.add(new PrefixLengthFilter(term, 0, 1 + chunkLength), BooleanClause.Occur.MUST); } } else { f.add(new TermFilter(term), BooleanClause.Occur.MUST); } remainingSize -= termAtt.length() - 1; // termAtt contains the prefix, remainingSize doesn't take it into account } tok.end(); tok.close(); } catch (IOException e) { e.printStackTrace(); f = null; } return f; }
From source file:org.elasticsearch.index.mapper.hashsplitter.HashSplitterFieldMapper.java
License:Apache License
@Override public Filter rangeFilter(String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { // Special case: -infinity to +infinity if (lowerTerm == null && upperTerm == null) { if (sizeIsVariable) return null; StringBuilder sbWildcardPart = new StringBuilder(); for (int i = 0; i < chunkLength; i++) sbWildcardPart.append(wildcardOne); String wildcardPart = sbWildcardPart.toString(); BooleanFilter filter = new BooleanFilter(); for (int i = sizeValue / chunkLength - 1; i >= 0; i--) { filter.add(new WildcardFilter(names().createIndexNameTerm(prefixes.charAt(i) + wildcardPart)), BooleanClause.Occur.MUST); }/* ww w . j a v a 2 s .c o m*/ if (sizeValue % chunkLength != 0) { // If the size is not dividible by chunkLength, // we still have a last chunk, but that has a shorter length filter.add( new WildcardFilter(names().createIndexNameTerm(prefixes.charAt(sizeValue / chunkLength + 1) + wildcardPart.substring(0, sizeValue % chunkLength))), BooleanClause.Occur.MUST); } return filter; } // Check for emptyness if (lowerTerm != null && upperTerm != null) { int cmp = lowerTerm.compareTo(upperTerm); // Bound invertion if (cmp > 0) return MatchNoDocsFilter.INSTANCE; // Equal bounds if (cmp == 0) { // and both inclusive bounds: singleton if (includeLower && includeUpper) { // Special case: equal terms return fieldFilter(lowerTerm, context); } // otherwise, empty range return MatchNoDocsFilter.INSTANCE; } } // Analyze lower and upper terms List<String> lowerTerms = new LinkedList<String>(); List<String> upperTerms = new LinkedList<String>(); if (lowerTerm != null) { TokenStream tok = null; try { tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(lowerTerm)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); try { while (tok.incrementToken()) lowerTerms.add(termAtt.toString()); tok.end(); tok.close(); } catch (IOException e) { return null; } } if (upperTerm != null) { TokenStream tok = null; try { tok = indexAnalyzer.reusableTokenStream(names().indexNameClean(), new FastStringReader(upperTerm)); tok.reset(); } catch (IOException e) { return null; } CharTermAttribute termAtt = tok.getAttribute(CharTermAttribute.class); try { while (tok.incrementToken()) upperTerms.add(termAtt.toString()); tok.end(); tok.close(); } catch (IOException e) { return null; } } // Generate the filter BooleanFilter topLevelAndFilter = new BooleanFilter(); Iterator<String> lowers = lowerTerms.iterator(); Iterator<String> uppers = upperTerms.iterator(); String currLower = null; String currUpper = null; int remainingLowerSize = sizeIsVariable ? 0 : sizeValue; int remainingUpperSize = sizeIsVariable ? 0 : sizeValue; // First, the common prefix while (lowers.hasNext() && uppers.hasNext()) { currLower = lowers.next(); currUpper = uppers.next(); // The last part cannot be part of the prefix // because that special case has already been handled if (!lowers.hasNext() || !uppers.hasNext()) break; if (!currLower.equals(currUpper)) break; topLevelAndFilter.add(new TermFilter(names().createIndexNameTerm(currLower)), BooleanClause.Occur.MUST); remainingLowerSize -= currLower.length() - 1; remainingUpperSize -= currUpper.length() - 1; } String subPrefixLower = currLower; BooleanFilter secondLevelOrFilter = new BooleanFilter(); BooleanFilter lastFilter; // Add the range part of the query (secondLevelOrFilter) to the prefix part is already in topLevelAndFilter topLevelAndFilter.add(secondLevelOrFilter, BooleanClause.Occur.MUST); // We still have secondLevelOrFilter to populate lastFilter = new BooleanFilter(); // Handle the first diverging token of the lowerTerm (if it's not also the last available!) if (lowers.hasNext()) { lastFilter.add(new TermFilter(names().createIndexNameTerm(currLower)), BooleanClause.Occur.MUST); remainingLowerSize -= currLower.length() - 1; currLower = lowers.next(); } secondLevelOrFilter.add(lastFilter, BooleanClause.Occur.SHOULD); // Then get to the last token of the lowerTerm while (lowers.hasNext()) { BooleanFilter orFilter = new BooleanFilter(); lastFilter.add(orFilter, BooleanClause.Occur.MUST); orFilter.add(new TermRangeLengthFilter(names().indexName(), currLower, luceneTermUpperBound(currLower), false, false, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.SHOULD); BooleanFilter nextFilter = new BooleanFilter(); nextFilter.add(new TermFilter(names().createIndexNameTerm(currLower)), BooleanClause.Occur.MUST); orFilter.add(nextFilter, BooleanClause.Occur.SHOULD); lastFilter = nextFilter; remainingLowerSize -= currLower.length() - 1; currLower = lowers.next(); } // Handle the last token of the lowerTerm if (remainingLowerSize < 0) lastFilter.add(new TermRangeLengthFilter(names().indexName(), currLower, luceneTermUpperBound(currLower), includeLower, false, 0, 1 + chunkLength), BooleanClause.Occur.MUST); else if (remainingLowerSize < chunkLength) lastFilter.add( new TermRangeLengthFilter(names().indexName(), currLower, luceneTermUpperBound(currLower), includeLower, false, 1 + remainingLowerSize, 1 + remainingLowerSize), BooleanClause.Occur.MUST); else lastFilter.add(new TermRangeLengthFilter(names().indexName(), currLower, luceneTermUpperBound(currLower), includeLower, false, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.MUST); // Range from the non prefix part of the lowerTerm to the non prefix part of the upperTerm if (remainingUpperSize < 0) secondLevelOrFilter.add(new TermRangeLengthFilter(names().indexName(), subPrefixLower, currUpper, false, false, 0, 1 + chunkLength), BooleanClause.Occur.SHOULD); else if (remainingUpperSize < chunkLength) secondLevelOrFilter.add(new TermRangeLengthFilter(names().indexName(), subPrefixLower, currUpper, false, false, 1 + remainingUpperSize, 1 + remainingUpperSize), BooleanClause.Occur.SHOULD); else secondLevelOrFilter.add(new TermRangeLengthFilter(names().indexName(), subPrefixLower, currUpper, false, false, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.SHOULD); lastFilter = new BooleanFilter(); // Handle the first diverging token of the upperTerm (if it's not also the last available!) if (uppers.hasNext()) { lastFilter.add(new TermFilter(names().createIndexNameTerm(currUpper)), BooleanClause.Occur.MUST); remainingUpperSize -= currUpper.length() - 1; currUpper = uppers.next(); } secondLevelOrFilter.add(lastFilter, BooleanClause.Occur.SHOULD); // Then get to the last token of the upperTerm while (uppers.hasNext()) { BooleanFilter orFilter = new BooleanFilter(); lastFilter.add(orFilter, BooleanClause.Occur.MUST); orFilter.add(new TermRangeLengthFilter(names().indexName(), luceneTermLowerBound(currUpper), currUpper, false, false, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.SHOULD); BooleanFilter nextFilter = new BooleanFilter(); nextFilter.add(new TermFilter(names().createIndexNameTerm(currUpper)), BooleanClause.Occur.MUST); orFilter.add(nextFilter, BooleanClause.Occur.SHOULD); lastFilter = nextFilter; remainingUpperSize -= currUpper.length() - 1; currUpper = uppers.next(); } // Handle the last token of the upperTerm if (remainingUpperSize < 0) lastFilter.add(new TermRangeLengthFilter(names().indexName(), luceneTermLowerBound(currUpper), currUpper, false, includeUpper, 0, 1 + chunkLength), BooleanClause.Occur.MUST); else if (remainingUpperSize < chunkLength) lastFilter.add( new TermRangeLengthFilter(names().indexName(), luceneTermLowerBound(currUpper), currUpper, false, includeUpper, 1 + remainingUpperSize, 1 + remainingUpperSize), BooleanClause.Occur.MUST); else lastFilter.add(new TermRangeLengthFilter(names().indexName(), luceneTermLowerBound(currUpper), currUpper, false, includeUpper, 1 + chunkLength, 1 + chunkLength), BooleanClause.Occur.MUST); return topLevelAndFilter; }