List of usage examples for org.apache.lucene.store RAMDirectory close
@Override public void close()
From source file:org.apache.blur.store.blockcache_v2.CacheIndexOutputTest.java
License:Apache License
@Test public void test1() throws IOException { Random random = new Random(seed); RAMDirectory directory = new RAMDirectory(); Cache cache = CacheIndexInputTest.getCache(); CacheIndexOutput indexOutput = new CacheIndexOutput(null, "test", cache, directory, IOContext.DEFAULT); indexOutput.writeByte((byte) 1); indexOutput.writeByte((byte) 2); byte[] b = new byte[16000]; random.nextBytes(b);//www. jav a2 s .c om indexOutput.writeBytes(b, 16000); indexOutput.close(); IndexInput input = directory.openInput("test", IOContext.DEFAULT); assertEquals(16002, input.length()); assertEquals(1, input.readByte()); assertEquals(2, input.readByte()); byte[] buf = new byte[16000]; input.readBytes(buf, 0, 16000); input.close(); assertArrayEquals(b, buf); directory.close(); }
From source file:org.apache.blur.store.blockcache_v2.CacheIndexOutputTest.java
License:Apache License
@Test public void test2() throws IOException { Cache cache = CacheIndexInputTest.getCache(); RAMDirectory directory = new RAMDirectory(); RAMDirectory directory2 = new RAMDirectory(); Random random = new Random(seed); String name = "test2"; long size = (10 * 1024 * 1024) + 13; IndexOutput output = directory.createOutput(name, IOContext.DEFAULT); CacheIndexOutput cacheIndexOutput = new CacheIndexOutput(null, name, cache, directory2, IOContext.DEFAULT); CacheIndexInputTest.writeRandomData(size, random, output, cacheIndexOutput); output.close();/* w w w. j a v a2 s .c om*/ cacheIndexOutput.close(); IndexInput input = directory.openInput(name, IOContext.DEFAULT); IndexInput testInput = directory2.openInput(name, IOContext.DEFAULT); CacheIndexInputTest.readRandomData(input, testInput, random, sampleSize, maxBufSize, maxOffset); testInput.close(); input.close(); directory.close(); directory2.close(); }
From source file:org.apache.maven.index.context.TrackingLockFactoryTest.java
License:Apache License
@Test public void testLockUnlock() throws IOException { final TrackingLockFactory lf = new TrackingLockFactory(new SingleInstanceLockFactory()); final RAMDirectory ram = new RAMDirectory(lf); final Lock foo = ram.obtainLock("foo"); final Lock bar = ram.obtainLock("bar"); bar.close();// w w w. j a v a 2s. c o m foo.close(); ram.close(); }
From source file:org.apache.maven.index.context.TrackingLockFactoryTest.java
License:Apache License
@Test public void testLockLocked() throws IOException { final TrackingLockFactory lf = new TrackingLockFactory(new SingleInstanceLockFactory()); final RAMDirectory ram = new RAMDirectory(lf); final Lock foo = ram.obtainLock("foo"); boolean thrownLOFE = false; try {//from www . j a v a 2 s .co m ram.obtainLock("foo"); } catch (LockObtainFailedException e) { thrownLOFE = true; } assertTrue(thrownLOFE); foo.close(); final Lock foo2 = ram.obtainLock("foo"); foo2.close(); ram.close(); }
From source file:org.crosswire.jsword.index.lucene.LuceneIndex.java
License:Open Source License
/** * Generate an index to use, telling the job about progress as you go. * /* www. ja va2 s. com*/ * @throws BookException * If we fail to read the index files */ public LuceneIndex(Book book, URI storage, boolean create) throws BookException { assert create; this.book = book; File finalPath = null; try { finalPath = NetUtil.getAsFile(storage); this.path = finalPath.getCanonicalPath(); } catch (IOException ex) { // TRANSLATOR: Error condition: Could not initialize a search index. Lucene is the name of the search technology being used. throw new BookException(UserMsg.gettext("Failed to initialize Lucene search engine."), ex); } // Indexing the book is a good way to police data errors. DataPolice.setBook(book.getBookMetaData()); // TRANSLATOR: Progress label indicating the start of indexing. {0} is a placeholder for the book's short name. String jobName = UserMsg.gettext("Creating index. Processing {0}", book.getInitials()); Progress job = JobManager.createJob(jobName, Thread.currentThread()); job.beginJob(jobName); IndexStatus finalStatus = IndexStatus.UNDONE; Analyzer analyzer = new LuceneAnalyzer(book); List<Key> errors = new ArrayList<Key>(); File tempPath = new File(path + '.' + IndexStatus.CREATING.toString()); try { synchronized (CREATING) { book.setIndexStatus(IndexStatus.CREATING); // An index is created by opening an IndexWriter with the create // argument set to true. // IndexWriter writer = new // IndexWriter(tempPath.getCanonicalPath(), analyzer, true); // Create the index in core. final RAMDirectory ramDir = new RAMDirectory(); IndexWriter writer = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); generateSearchIndexImpl(job, errors, writer, book.getGlobalKeyList(), 0); // TRANSLATOR: Progress label for optimizing a search index. This may take a bit of time, so we have a label for it. job.setSectionName(UserMsg.gettext("Optimizing")); job.setWork(95); // Consolidate the index into the minimum number of files. // writer.optimize(); /* Optimize is done by addIndexes */ writer.close(); // Write the core index to disk. final Directory destination = FSDirectory.open(new File(tempPath.getCanonicalPath())); IndexWriter fsWriter = new IndexWriter(destination, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); fsWriter.addIndexesNoOptimize(new Directory[] { ramDir }); fsWriter.optimize(); fsWriter.close(); // Free up the space used by the ram directory ramDir.close(); job.setCancelable(false); if (!job.isFinished()) { if (!tempPath.renameTo(finalPath)) { // TRANSLATOR: The search index could not be moved to it's final location. throw new BookException(UserMsg.gettext("Installation failed.")); } } if (finalPath.exists()) { finalStatus = IndexStatus.DONE; } if (!errors.isEmpty()) { StringBuilder buf = new StringBuilder(); for (Key error : errors) { buf.append(error); buf.append('\n'); } // TRANSLATOR: It is likely that one or more verses could not be indexed due to errors in those verses. // This message gives a listing of them to the user. Reporter.informUser(this, UserMsg.gettext("The following verses have errors and could not be indexed\n{0}", buf)); } } } catch (IOException ex) { job.cancel(); // TRANSLATOR: Common error condition: Some error happened while creating a search index. throw new BookException(UserMsg.gettext("Failed to initialize Lucene search engine."), ex); } finally { book.setIndexStatus(finalStatus); job.done(); } }
From source file:org.elasticsearch.search.suggest.completion.CompletionPostingsFormatTest.java
License:Apache License
@Test public void testCompletionPostingsFormat() throws IOException { AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);/*from w w w.jav a 2 s . co m*/ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true); List<Completion090PostingsFormat.CompletionLookupProvider> providers = Lists.newArrayList(providerV1, currentProvider); Completion090PostingsFormat.CompletionLookupProvider randomProvider = providers .get(getRandom().nextInt(providers.size())); RAMDirectory dir = new RAMDirectory(); writeData(dir, randomProvider); IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT); LookupFactory load = currentProvider.load(input); PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat()); NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT)); Lookup lookup = load.getLookup( new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null)); List<LookupResult> result = lookup.lookup("ge", false, 10); assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters")); assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10")); dir.close(); }
From source file:org.elasticsearch.search.suggest.completion.CompletionPostingsFormatTest.java
License:Apache License
@Test public void testProviderBackwardCompatibilityForVersion1() throws IOException { AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);/*from ww w.j a v a2 s . c om*/ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true); RAMDirectory dir = new RAMDirectory(); writeData(dir, providerV1); IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT); LookupFactory load = currentProvider.load(input); PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat()); NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT)); AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load .getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null)); assertThat(analyzingSuggestHolder.sepLabel, is(AnalyzingCompletionLookupProviderV1.SEP_LABEL)); assertThat(analyzingSuggestHolder.payloadSep, is(AnalyzingCompletionLookupProviderV1.PAYLOAD_SEP)); assertThat(analyzingSuggestHolder.endByte, is(AnalyzingCompletionLookupProviderV1.END_BYTE)); dir.close(); }
From source file:org.elasticsearch.search.suggest.completion.CompletionPostingsFormatTest.java
License:Apache License
@Test public void testProviderVersion2() throws IOException { AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);//from www .j a va 2 s . c o m RAMDirectory dir = new RAMDirectory(); writeData(dir, currentProvider); IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT); LookupFactory load = currentProvider.load(input); PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat()); NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT)); AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load .getAnalyzingSuggestHolder(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null)); assertThat(analyzingSuggestHolder.sepLabel, is(XAnalyzingSuggester.SEP_LABEL)); assertThat(analyzingSuggestHolder.payloadSep, is(XAnalyzingSuggester.PAYLOAD_SEP)); assertThat(analyzingSuggestHolder.endByte, is(XAnalyzingSuggester.END_BYTE)); dir.close(); }
From source file:org.elasticsearch.search.suggest.completion.CompletionPostingsFormatTest.java
License:Apache License
public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights) throws IOException { RAMDirectory dir = new RAMDirectory(); FilterCodec filterCodec = new FilterCodec("filtered", Codec.getDefault()) { public PostingsFormat postingsFormat() { return mapper.postingsFormatProvider().get(); }//from w w w .ja v a 2 s. com }; IndexWriterConfig indexWriterConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, mapper.indexAnalyzer()); indexWriterConfig.setCodec(filterCodec); IndexWriter writer = new IndexWriter(dir, indexWriterConfig); for (int i = 0; i < weights.length; i++) { Document doc = new Document(); BytesRef payload = mapper.buildPayload(new BytesRef(surfaces[i]), weights[i], new BytesRef(Long.toString(weights[i]))); doc.add(mapper.getCompletionField(terms[i], payload)); if (randomBoolean()) { writer.commit(); } writer.addDocument(doc); } writer.commit(); writer.forceMerge(1); writer.commit(); DirectoryReader reader = DirectoryReader.open(writer, true); assertThat(reader.leaves().size(), equalTo(1)); assertThat(reader.leaves().get(0).reader().numDocs(), equalTo(weights.length)); AtomicReaderContext atomicReaderContext = reader.leaves().get(0); Terms luceneTerms = atomicReaderContext.reader().terms(mapper.name()); Lookup lookup = ((Completion090PostingsFormat.CompletionTerms) luceneTerms).getLookup(mapper, new CompletionSuggestionContext(null)); reader.close(); writer.close(); dir.close(); return lookup; }
From source file:org.elasticsearch.search.suggest.completion.CompletionPostingsFormatTest.java
License:Apache License
@Test public void testNoDocs() throws IOException { AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true); RAMDirectory dir = new RAMDirectory(); IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT); FieldsConsumer consumer = provider.consumer(output); FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, DocValuesType.SORTED, DocValuesType.BINARY, new HashMap<String, String>()); TermsConsumer addField = consumer.addField(fieldInfo); addField.finish(0, 0, 0);//from w w w .ja v a 2 s .c o m consumer.close(); output.close(); IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT); LookupFactory load = provider.load(input); PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new Elasticsearch090PostingsFormat()); NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT)); assertNull(load.getLookup( new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, AbstractFieldMapper.MultiFields.empty(), null), new CompletionSuggestionContext(null))); dir.close(); }