List of usage examples for org.apache.lucene.index LeafReader close
@Override public final synchronized void close() throws IOException
From source file:com.meizu.nlp.classification.ClassificationTestBase.java
License:Apache License
protected void checkCorrectClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query) throws Exception { LeafReader leafReader = null; try {// ww w . j ava2 s . c o m populateSampleIndex(analyzer); leafReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader()); classifier.train(leafReader, textFieldName, classFieldName, analyzer, query); ClassificationResult<T> classificationResult = classifier.assignClass(inputDoc); assertNotNull(classificationResult.getAssignedClass()); assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass()); assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0); } finally { if (leafReader != null) leafReader.close(); } }
From source file:com.meizu.nlp.classification.ClassificationTestBase.java
License:Apache License
protected void checkOnlineClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query) throws Exception { LeafReader leafReader = null; try {/*from w ww . j a va 2 s .c o m*/ populateSampleIndex(analyzer); leafReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader()); classifier.train(leafReader, textFieldName, classFieldName, analyzer, query); ClassificationResult<T> classificationResult = classifier.assignClass(inputDoc); assertNotNull(classificationResult.getAssignedClass()); assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass()); assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0); updateSampleIndex(); ClassificationResult<T> secondClassificationResult = classifier.assignClass(inputDoc); assertEquals(classificationResult.getAssignedClass(), secondClassificationResult.getAssignedClass()); assertEquals(Double.valueOf(classificationResult.getScore()), Double.valueOf(secondClassificationResult.getScore())); } finally { if (leafReader != null) leafReader.close(); } }
From source file:com.meizu.nlp.classification.ClassificationTestBase.java
License:Apache License
protected void checkPerformance(Classifier<T> classifier, Analyzer analyzer, String classFieldName) throws Exception { LeafReader leafReader = null; long trainStart = System.currentTimeMillis(); try {//from w w w .j av a 2s . com populatePerformanceIndex(analyzer); leafReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader()); classifier.train(leafReader, textFieldName, classFieldName, analyzer); long trainEnd = System.currentTimeMillis(); long trainTime = trainEnd - trainStart; assertTrue("training took more than 2 mins : " + trainTime / 1000 + "s", trainTime < 120000); } finally { if (leafReader != null) leafReader.close(); } }
From source file:org.apache.solr.index.TestSlowCompositeReaderWrapper.java
License:Apache License
public void testCoreListenerOnSlowCompositeReaderWrapper() throws IOException { RandomIndexWriter w = new RandomIndexWriter(random(), newDirectory()); final int numDocs = TestUtil.nextInt(random(), 1, 5); for (int i = 0; i < numDocs; ++i) { w.addDocument(new Document()); if (random().nextBoolean()) { w.commit();/*w w w .j a va 2 s . c om*/ } } w.commit(); w.close(); final IndexReader reader = DirectoryReader.open(w.w.getDirectory()); final LeafReader leafReader = SlowCompositeReaderWrapper.wrap(reader); final int numListeners = TestUtil.nextInt(random(), 1, 10); final List<LeafReader.CoreClosedListener> listeners = new ArrayList<>(); AtomicInteger counter = new AtomicInteger(numListeners); for (int i = 0; i < numListeners; ++i) { CountCoreListener listener = new CountCoreListener(counter, leafReader.getCoreCacheKey()); listeners.add(listener); leafReader.addCoreClosedListener(listener); } for (int i = 0; i < 100; ++i) { leafReader.addCoreClosedListener(listeners.get(random().nextInt(listeners.size()))); } final int removed = random().nextInt(numListeners); Collections.shuffle(listeners, random()); for (int i = 0; i < removed; ++i) { leafReader.removeCoreClosedListener(listeners.get(i)); } assertEquals(numListeners, counter.get()); // make sure listeners are registered on the wrapped reader and that closing any of them has the same effect if (random().nextBoolean()) { reader.close(); } else { leafReader.close(); } assertEquals(removed, counter.get()); w.w.getDirectory().close(); }
From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java
License:Apache License
public void testHugeBinaryValues() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); // FSDirectory because SimpleText will consume gobbs of // space when storing big binary values: Directory d = newFSDirectory(createTempDir("hugeBinaryValues")); boolean doFixed = random().nextBoolean(); int numDocs;//from w w w .j a v a 2 s . c o m int fixedLength = 0; if (doFixed) { // Sometimes make all values fixed length since some // codecs have different code paths for this: numDocs = TestUtil.nextInt(random(), 10, 20); fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024); } else { numDocs = TestUtil.nextInt(random(), 100, 200); } IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer)); List<byte[]> docBytes = new ArrayList<>(); long totalBytes = 0; for (int docID = 0; docID < numDocs; docID++) { // we don't use RandomIndexWriter because it might add // more docvalues than we expect !!!! // Must be > 64KB in size to ensure more than 2 pages in // PagedBytes would be needed: int numBytes; if (doFixed) { numBytes = fixedLength; } else if (docID == 0 || random().nextInt(5) == 3) { numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024); } else { numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024); } totalBytes += numBytes; if (totalBytes > 5 * 1024 * 1024) { break; } byte[] bytes = new byte[numBytes]; random().nextBytes(bytes); docBytes.add(bytes); Document doc = new Document(); BytesRef b = new BytesRef(bytes); b.length = bytes.length; doc.add(new BinaryDocValuesField("field", b)); doc.add(new StringField("id", "" + docID, Field.Store.YES)); try { w.addDocument(doc); } catch (IllegalArgumentException iae) { if (iae.getMessage().indexOf("is too large") == -1) { throw iae; } else { // OK: some codecs can't handle binary DV > 32K assertFalse(codecAcceptsHugeBinaryValues("field")); w.rollback(); d.close(); return; } } } DirectoryReader r; try { r = DirectoryReader.open(w); } catch (IllegalArgumentException iae) { if (iae.getMessage().indexOf("is too large") == -1) { throw iae; } else { assertFalse(codecAcceptsHugeBinaryValues("field")); // OK: some codecs can't handle binary DV > 32K w.rollback(); d.close(); return; } } w.close(); LeafReader ar = SlowCompositeReaderWrapper.wrap(r); TestUtil.checkReader(ar); BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field"); for (int docID = 0; docID < docBytes.size(); docID++) { Document doc = ar.document(docID); assertEquals(docID, s.nextDoc()); BytesRef bytes = s.binaryValue(); byte[] expected = docBytes.get(Integer.parseInt(doc.get("id"))); assertEquals(expected.length, bytes.length); assertEquals(new BytesRef(expected), bytes); } assertTrue(codecAcceptsHugeBinaryValues("field")); ar.close(); d.close(); }
From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java
License:Apache License
public void testHugeBinaryValueLimit() throws Exception { // We only test DVFormats that have a limit assumeFalse("test requires codec with limits on max binary field length", codecAcceptsHugeBinaryValues("field")); Analyzer analyzer = new MockAnalyzer(random()); // FSDirectory because SimpleText will consume gobbs of // space when storing big binary values: Directory d = newFSDirectory(createTempDir("hugeBinaryValues")); boolean doFixed = random().nextBoolean(); int numDocs;/*from www.j ava 2s. co m*/ int fixedLength = 0; if (doFixed) { // Sometimes make all values fixed length since some // codecs have different code paths for this: numDocs = TestUtil.nextInt(random(), 10, 20); fixedLength = LARGE_BINARY_FIELD_LENGTH; } else { numDocs = TestUtil.nextInt(random(), 100, 200); } IndexWriter w = new IndexWriter(d, newIndexWriterConfig(analyzer)); List<byte[]> docBytes = new ArrayList<>(); long totalBytes = 0; for (int docID = 0; docID < numDocs; docID++) { // we don't use RandomIndexWriter because it might add // more docvalues than we expect !!!! // Must be > 64KB in size to ensure more than 2 pages in // PagedBytes would be needed: int numBytes; if (doFixed) { numBytes = fixedLength; } else if (docID == 0 || random().nextInt(5) == 3) { numBytes = LARGE_BINARY_FIELD_LENGTH; } else { numBytes = TestUtil.nextInt(random(), 1, LARGE_BINARY_FIELD_LENGTH); } totalBytes += numBytes; if (totalBytes > 5 * 1024 * 1024) { break; } byte[] bytes = new byte[numBytes]; random().nextBytes(bytes); docBytes.add(bytes); Document doc = new Document(); BytesRef b = new BytesRef(bytes); b.length = bytes.length; doc.add(new BinaryDocValuesField("field", b)); doc.add(new StringField("id", "" + docID, Field.Store.YES)); w.addDocument(doc); } DirectoryReader r = DirectoryReader.open(w); w.close(); LeafReader ar = SlowCompositeReaderWrapper.wrap(r); TestUtil.checkReader(ar); BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field"); for (int docID = 0; docID < docBytes.size(); docID++) { assertEquals(docID, s.nextDoc()); Document doc = ar.document(docID); BytesRef bytes = s.binaryValue(); byte[] expected = docBytes.get(Integer.parseInt(doc.get("id"))); assertEquals(expected.length, bytes.length); assertEquals(new BytesRef(expected), bytes); } ar.close(); d.close(); }