List of usage examples for org.apache.lucene.index IndexWriter rollback
@Override public void rollback() throws IOException
IndexWriter
without committing any changes that have occurred since the last commit (or since it was opened, if commit hasn't been called). From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java
License:Apache License
@Override public void start() throws EngineException { store.incRef();//from w w w .j a va 2 s. co m try (InternalLock _ = writeLock.acquire()) { if (indexWriter != null) { throw new EngineAlreadyStartedException(shardId); } if (closed) { throw new EngineClosedException(shardId); } if (logger.isDebugEnabled()) { logger.debug("starting AsynchronousEngine"); } try { this.indexWriter = createWriter(); mergeScheduler.removeListener(this.throttle); this.throttle = new IndexThrottle(mergeScheduler, logger, indexingService); mergeScheduler.addListener(throttle); } catch (IOException e) { maybeFailEngine(e, "start"); if (this.indexWriter != null) { try { IndexWriter pending = indexWriter; indexWriter = null; pending.rollback(); } catch (IOException e1) { e.addSuppressed(e1); } } throw new EngineCreationFailureException(shardId, "failed to create engine", e); } try { // commit on a just opened writer will commit even if there are no changes done to it // we rely on that for the commit data translog id key if (Lucene.indexExists(store.directory())) { Map<String, String> commitUserData = Lucene.readSegmentInfos(store.directory()).getUserData(); if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) { translogIdGenerator.set(Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY))); } else { translogIdGenerator.set(System.currentTimeMillis()); indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get()))); indexWriter.commit(); } } else { translogIdGenerator.set(System.currentTimeMillis()); indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get()))); indexWriter.commit(); } translog.newTranslog(translogIdGenerator.get()); this.searcherManager = buildSearchManager(indexWriter); versionMap.setManager(searcherManager); readLastCommittedSegmentsInfo(); } catch (IOException e) { maybeFailEngine(e, "start"); try { indexWriter.rollback(); } catch (IOException e1) { // ignore } finally { IOUtils.closeWhileHandlingException(indexWriter); } throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); } } finally { store.decRef(); } }
From source file:org.elasticsearch.index.engine.internal.AsynchronousEngine.java
License:Apache License
@Override public void flush(Flush flush) throws EngineException { ensureOpen();/*w w w. j a va 2 s. co m*/ if (flush.type() == Flush.Type.NEW_WRITER || flush.type() == Flush.Type.COMMIT_TRANSLOG) { // check outside the lock as well so we can check without blocking on the write lock if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush [" + flush.type() + "] is not allowed"); } } int currentFlushing = flushing.incrementAndGet(); if (currentFlushing > 1 && !flush.waitIfOngoing()) { flushing.decrementAndGet(); throw new FlushNotAllowedEngineException(shardId, "already flushing..."); } flushLock.lock(); try { if (flush.type() == Flush.Type.NEW_WRITER) { try (InternalLock _ = writeLock.acquire()) { if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); } // disable refreshing, not dirty dirty = false; try { { // commit and close the current writer - we write the current tanslog ID just in case final long translogId = translog.currentId(); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); indexWriter.rollback(); } indexWriter = createWriter(); mergeScheduler.removeListener(this.throttle); this.throttle = new IndexThrottle(mergeScheduler, this.logger, indexingService); mergeScheduler.addListener(throttle); // commit on a just opened writer will commit even if there are no changes done to it // we rely on that for the commit data translog id key if (flushNeeded || flush.force()) { flushNeeded = false; long translogId = translogIdGenerator.incrementAndGet(); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); translog.newTranslog(translogId); } SearcherManager current = this.searcherManager; this.searcherManager = buildSearchManager(indexWriter); versionMap.setManager(searcherManager); try { IOUtils.close(current); } catch (Throwable t) { logger.warn("Failed to close current SearcherManager", t); } maybePruneDeletedTombstones(); } catch (Throwable t) { throw new FlushFailedEngineException(shardId, t); } } } else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) { try (InternalLock _ = readLock.acquire()) { final IndexWriter indexWriter = currentIndexWriter(); if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); } if (flushNeeded || flush.force()) { flushNeeded = false; try { long translogId = translogIdGenerator.incrementAndGet(); translog.newTransientTranslog(translogId); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); // we need to refresh in order to clear older version values refresh(new Refresh("version_table_flush").force(true)); // we need to move transient to current only after we refresh // so items added to current will still be around for realtime get // when tans overrides it translog.makeTransientCurrent(); } catch (Throwable e) { translog.revertTransient(); throw new FlushFailedEngineException(shardId, e); } } } // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (enableGcDeletes) { pruneDeletedTombstones(); } } else if (flush.type() == Flush.Type.COMMIT) { // note, its ok to just commit without cleaning the translog, its perfectly fine to replay a // translog on an index that was opened on a committed point in time that is "in the future" // of that translog try (InternalLock _ = readLock.acquire()) { final IndexWriter indexWriter = currentIndexWriter(); // we allow to *just* commit if there is an ongoing recovery happening... // its ok to use this, only a flush will cause a new translogId, and we are locked here from // other flushes use flushLock try { long translogId = translog.currentId(); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); } catch (Throwable e) { throw new FlushFailedEngineException(shardId, e); } } // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (enableGcDeletes) { pruneDeletedTombstones(); } } else { throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported"); } // reread the last committed segment infos try (InternalLock _ = readLock.acquire()) { ensureOpen(); readLastCommittedSegmentsInfo(); } catch (Throwable e) { if (!closed) { logger.warn("failed to read latest segment infos on flush", e); if (Lucene.isCorruptionException(e)) { throw new FlushFailedEngineException(shardId, e); } } } } catch (FlushFailedEngineException ex) { maybeFailEngine(ex, "flush"); throw ex; } finally { flushLock.unlock(); flushing.decrementAndGet(); } }
From source file:org.elasticsearch.index.engine.internal.ZyucInternalEngine.java
License:Apache License
@Override public void start() throws EngineException { store.incRef();//from w w w . j a v a 2s. co m try (InternalLock _ = writeLock.acquire()) { if (indexWriter != null) { throw new EngineAlreadyStartedException(shardId); } if (closed) { throw new EngineClosedException(shardId); } if (logger.isDebugEnabled()) { logger.debug("starting engine"); } try { this.indexWriter = createWriter(); mergeScheduler.removeListener(this.throttle); this.throttle = new IndexThrottle(mergeScheduler, logger, indexingService); mergeScheduler.addListener(throttle); } catch (IOException e) { maybeFailEngine(e, "start"); if (this.indexWriter != null) { try { IndexWriter pending = indexWriter; indexWriter = null; pending.rollback(); } catch (IOException e1) { e.addSuppressed(e1); } } throw new EngineCreationFailureException(shardId, "failed to create engine", e); } try { // commit on a just opened writer will commit even if there are no changes done to it // we rely on that for the commit data translog id key if (Lucene.indexExists(store.directory())) { Map<String, String> commitUserData = Lucene.readSegmentInfos(store.directory()).getUserData(); if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) { translogIdGenerator.set(Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY))); } else { translogIdGenerator.set(System.currentTimeMillis()); indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get()))); indexWriter.commit(); } } else { translogIdGenerator.set(System.currentTimeMillis()); indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get()))); indexWriter.commit(); } translog.newTranslog(translogIdGenerator.get()); this.searcherManager = buildSearchManager(indexWriter); versionMap.setManager(searcherManager); readLastCommittedSegmentsInfo(); } catch (IOException e) { maybeFailEngine(e, "start"); try { indexWriter.rollback(); } catch (IOException e1) { // ignore } finally { IOUtils.closeWhileHandlingException(indexWriter); } throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); } } finally { store.decRef(); } }
From source file:org.elasticsearch.index.engine.internal.ZyucInternalEngine.java
License:Apache License
@Override public void flush(Flush flush) throws EngineException { ensureOpen();/*from w w w. j a va 2 s .c o m*/ if (flush.type() == Flush.Type.NEW_WRITER || flush.type() == Flush.Type.COMMIT_TRANSLOG) { // check outside the lock as well so we can check without blocking on the write lock if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush [" + flush.type() + "] is not allowed"); } } int currentFlushing = flushing.incrementAndGet(); if (currentFlushing > 1 && !flush.waitIfOngoing()) { flushing.decrementAndGet(); throw new FlushNotAllowedEngineException(shardId, "already flushing..."); } flushLock.lock(); try { if (flush.type() == Flush.Type.NEW_WRITER) { try (InternalLock _ = writeLock.acquire()) { if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); } // disable refreshing, not dirty dirty = false; try { { // commit and close the current writer - we write the current tanslog ID just in case final long translogId = translog.currentId(); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); indexWriter.rollback(); } indexWriter = createWriter(); mergeScheduler.removeListener(this.throttle); this.throttle = new IndexThrottle(mergeScheduler, this.logger, indexingService); mergeScheduler.addListener(throttle); // commit on a just opened writer will commit even if there are no changes done to it // we rely on that for the commit data translog id key if (flushNeeded || flush.force()) { flushNeeded = false; long translogId = translogIdGenerator.incrementAndGet(); indexWriter.setCommitData( Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); translog.newTranslog(translogId); } SearcherManager current = this.searcherManager; this.searcherManager = buildSearchManager(indexWriter); versionMap.setManager(searcherManager); try { IOUtils.close(current); } catch (Throwable t) { logger.warn("Failed to close current SearcherManager", t); } maybePruneDeletedTombstones(); } catch (Throwable t) { throw new FlushFailedEngineException(shardId, t); } } } else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) { try (InternalLock _ = readLock.acquire()) { final IndexWriter indexWriter = currentIndexWriter(); if (onGoingRecoveries.get() > 0) { throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); } if (flushNeeded || flush.force()) { flushNeeded = false; try { long translogId = translogIdGenerator.incrementAndGet(); translog.newTransientTranslog(translogId); // indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); // we need to refresh in order to clear older version values refresh(new Refresh("version_table_flush").force(true)); // we need to move transient to current only after we refresh // so items added to current will still be around for realtime get // when tans overrides it translog.makeTransientCurrent(); } catch (Throwable e) { translog.revertTransient(); throw new FlushFailedEngineException(shardId, e); } } } // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (enableGcDeletes) { pruneDeletedTombstones(); } } else if (flush.type() == Flush.Type.COMMIT) { // note, its ok to just commit without cleaning the translog, its perfectly fine to replay a // translog on an index that was opened on a committed point in time that is "in the future" // of that translog try (InternalLock _ = readLock.acquire()) { final IndexWriter indexWriter = currentIndexWriter(); // we allow to *just* commit if there is an ongoing recovery happening... // its ok to use this, only a flush will cause a new translogId, and we are locked here from // other flushes use flushLock try { long translogId = translog.currentId(); // indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); indexWriter.commit(); } catch (Throwable e) { throw new FlushFailedEngineException(shardId, e); } } // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (enableGcDeletes) { pruneDeletedTombstones(); } } else { throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported"); } // reread the last committed segment infos try (InternalLock _ = readLock.acquire()) { ensureOpen(); readLastCommittedSegmentsInfo(); } catch (Throwable e) { if (!closed) { logger.warn("failed to read latest segment infos on flush", e); if (Lucene.isCorruptionException(e)) { throw new FlushFailedEngineException(shardId, e); } } } } catch (FlushFailedEngineException ex) { maybeFailEngine(ex, "flush"); throw ex; } finally { flushLock.unlock(); flushing.decrementAndGet(); } }
From source file:org.jabylon.index.properties.jobs.impl.ReorgIndexJob.java
License:Open Source License
public static void indexWorkspace(RepositoryConnector connector, IProgressMonitor monitor) throws CorruptIndexException, IOException { long time = System.currentTimeMillis(); logger.info("Reorg of search index started"); IndexWriter writer = null; CDONet4jSession session = null;/*www.ja va 2 s . com*/ SubMonitor submon = SubMonitor.convert(monitor, 100); try { writer = IndexActivator.getDefault().obtainIndexWriter(); writer.deleteAll(); session = connector.createSession(); CDOView view = connector.openView(session); CDOResource resource = view.getResource(ServerConstants.WORKSPACE_RESOURCE); Workspace workspace = (Workspace) resource.getContents().get(0); indexWorkspace(workspace, writer, submon.newChild(95)); indexTMX(writer, submon.newChild(5)); writer.commit(); } catch (OutOfMemoryError error) { logger.error("Out of memory during index reorg", error); //As suggested by lucene documentation writer.close(); } catch (Exception e) { logger.error("Exception during index reorg. Rolling back", e); if (writer != null) writer.rollback(); throw new IllegalStateException("Failed to write index", e); } finally { if (monitor != null) monitor.done(); if (session != null) { session.close(); } IndexActivator.getDefault().returnIndexWriter(writer); } long duration = (System.currentTimeMillis() - time) / 1000; logger.info("Search Index Reorg finished. Took {} seconds", duration); }
From source file:org.ms123.common.data.lucene.LuceneServiceImpl.java
License:Open Source License
public synchronized void addToIndex(LuceneSession session, Object obj) { IndexWriter iw = session.getIndexWriter(); StoreDesc sdesc = session.getStoreDesc(); String namespace = sdesc.getNamespace(); try {/*from w w w. j ava 2 s .c om*/ String entityName = getEntityName(obj); Object id = getId(obj, session.getPrimaryKey()); //System.out.println("addToIndex:"+id+"/iw:"+iw); Map p = new HashMap(); p.put("view", "global-search"); p.put(ENTITY, entityName); p.put(StoreDesc.STORE_ID, sdesc.getStoreId()); p.put(StoreDesc.PACK, sdesc.getPack()); List<Map> searchFields = m_settingService.getFieldsForEntityView(sdesc.getNamespace(), entityName, "global-search"); Document doc = new Document(); Field field = new Field(ENTITY, entityName, Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(field); if (id instanceof Long) { NumericField nfield = new NumericField("id_numeric", Field.Store.YES, true); nfield = nfield.setLongValue((Long) id); doc.add(nfield); } field = new Field("id", id.toString(), Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(field); populate(obj, searchFields, doc); String[] _id = new String[2]; _id[0] = id.toString(); _id[1] = entityName; session.addId(_id); iw.addDocument(doc); iw.commit(); } catch (Exception e) { try { iw.rollback(); } catch (Exception x) { } e.printStackTrace(); throw new RuntimeException(e); } }
From source file:org.ms123.common.data.lucene.LuceneServiceImpl.java
License:Open Source License
public synchronized void deleteFromIndex(LuceneSession session, Object obj) { StoreDesc sdesc = session.getStoreDesc(); String namespace = sdesc.getNamespace(); IndexWriter iw = getRealIndexWriter(namespace); try {//from ww w . j a va 2 s .co m String entityName = getEntityName(obj); Object id = PropertyUtils.getProperty(obj, session.getPrimaryKey()); Term term1 = new Term("id", id.toString()); Term term2 = new Term(ENTITY, entityName); TermQuery query1 = new TermQuery(term1); TermQuery query2 = new TermQuery(term2); BooleanQuery and = new BooleanQuery(); and.add(query1, BooleanClause.Occur.MUST); and.add(query2, BooleanClause.Occur.MUST); IndexSearcher is = getIndexSearcher(namespace, iw); TopDocs hits = is.search(and, 10); if (hits.scoreDocs.length == 0) { System.out.println("LuceneServiceImpl.deleteFromIndex:id(" + id + ") not found"); } else if (hits.scoreDocs.length == 1) { System.out.println("LuceneServiceImpl.deleting:" + and); iw.deleteDocuments(and); } else if (hits.scoreDocs.length > 1) { throw new IllegalArgumentException( "LuceneServiceImpl.delete:Term (" + and + ") matches more than 1 document in the index."); } iw.commit(); } catch (Exception e) { try { iw.rollback(); } catch (Exception x) { } e.printStackTrace(); throw new RuntimeException(e); } }
From source file:perf.IDPerfTest.java
License:Apache License
private static Result testOne(String indexPath, String desc, IDIterator ids, final int minTermsInBlock, final int maxTermsInBlock) throws IOException { System.out.println("\ntest: " + desc + " termBlocks=" + minTermsInBlock + "/" + maxTermsInBlock); Directory dir = FSDirectory.open(new File(indexPath)); //IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_48, new StandardAnalyzer(Version.LUCENE_48)); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_8, new StandardAnalyzer(Version.LUCENE_4_8)); iwc.setMergeScheduler(new SerialMergeScheduler()); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); // So I can walk the files and get the *.tip sizes: iwc.setUseCompoundFile(false);/*from w ww .j a v a 2 s .co m*/ iwc.setCodec(new Lucene53Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return new Lucene50PostingsFormat(minTermsInBlock, maxTermsInBlock); } }); /// 7/7/7 segment structure: iwc.setMaxBufferedDocs(ID_COUNT / 777); iwc.setRAMBufferSizeMB(-1); //iwc.setInfoStream(new PrintStreamInfoStream(System.out)); //iwc.setMergePolicy(new LogDocMergePolicy()); ((TieredMergePolicy) iwc.getMergePolicy()).setFloorSegmentMB(.001); ((TieredMergePolicy) iwc.getMergePolicy()).setNoCFSRatio(0.0); //((LogDocMergePolicy) iwc.getMergePolicy()).setMinMergeDocs(1000); iwc.getMergePolicy().setNoCFSRatio(0.0); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); ft.setTokenized(true); ft.freeze(); BytesRef idValue = new BytesRef(64); Field idField = new Field("id", new BinaryTokenStream(idValue), ft); doc.add(idField); long t0 = System.nanoTime(); BytesRef[] lookupIDs = new BytesRef[ID_SEARCH_COUNT]; Random random = new Random(17); int lookupCount = 0; double rate = 1.01 * ((double) ID_SEARCH_COUNT) / ID_COUNT; for (int i = 0; i < ID_COUNT; i++) { ids.next(idValue); if (lookupCount < lookupIDs.length && random.nextDouble() <= rate) { lookupIDs[lookupCount++] = BytesRef.deepCopyOf(idValue); } // Trickery: the idsIter changed the idValue which the BinaryTokenStream reuses for each added doc w.addDocument(doc); } if (lookupCount < lookupIDs.length) { throw new RuntimeException("didn't get enough lookup ids: " + lookupCount + " vs " + lookupIDs.length); } long indexTime = System.nanoTime() - t0; System.out.println(" indexing done; waitForMerges..."); w.waitForMerges(); IndexReader r = DirectoryReader.open(w, true); System.out.println(" reader=" + r); shuffle(random, lookupIDs); shuffle(random, lookupIDs); long bestTime = Long.MAX_VALUE; long checksum = 0; List<AtomicReaderContext> leaves = new ArrayList<>(r.leaves()); // Sort largest to smallest: Collections.sort(leaves, new Comparator<AtomicReaderContext>() { @Override public int compare(AtomicReaderContext c1, AtomicReaderContext c2) { return c2.reader().maxDoc() - c1.reader().maxDoc(); } }); TermsEnum[] termsEnums = new TermsEnum[leaves.size()]; DocsEnum[] docsEnums = new DocsEnum[leaves.size()]; int[] docBases = new int[leaves.size()]; for (int i = 0; i < leaves.size(); i++) { //System.out.println("i=" + i + " count=" + leaves.get(i).reader().maxDoc()); termsEnums[i] = leaves.get(i).reader().fields().terms("id").iterator(null); docBases[i] = leaves.get(i).docBase; } long rawLookupCount = 0; int countx = 0; for (int iter = 0; iter < 5; iter++) { t0 = System.nanoTime(); BlockTreeTermsReader.seekExactFastNotFound = 0; BlockTreeTermsReader.seekExactFastRootNotFound = 0; rawLookupCount = 0; for (BytesRef id : lookupIDs) { if (countx++ < 50) { System.out.println(" id=" + id); } boolean found = false; for (int seg = 0; seg < termsEnums.length; seg++) { rawLookupCount++; if (termsEnums[seg].seekExact(id)) { docsEnums[seg] = termsEnums[seg].docs(null, docsEnums[seg], 0); int docID = docsEnums[seg].nextDoc(); if (docID == DocsEnum.NO_MORE_DOCS) { // uh-oh! throw new RuntimeException("id not found: " + id); } // paranoia: checksum += docID + docBases[seg]; found = true; // Optimization vs MultiFields: we don't need to check any more segments since id is PK break; } } if (found == false) { // uh-oh! throw new RuntimeException("id not found: " + id); } } long lookupTime = System.nanoTime() - t0; System.out.println(String.format(Locale.ROOT, " iter=" + iter + " lookupTime=%.3f sec", lookupTime / 1000000000.0)); if (lookupTime < bestTime) { bestTime = lookupTime; System.out.println(" **"); } } long totalBytes = 0; long termsIndexTotalBytes = 0; for (String fileName : dir.listAll()) { long bytes = dir.fileLength(fileName); totalBytes += bytes; if (fileName.endsWith(".tip")) { termsIndexTotalBytes += bytes; } } r.close(); w.rollback(); dir.close(); return new Result(desc, ID_COUNT / (indexTime / 1000000.0), lookupIDs.length / (bestTime / 1000000.0), totalBytes, termsIndexTotalBytes, checksum, BlockTreeTermsReader.seekExactFastNotFound, BlockTreeTermsReader.seekExactFastRootNotFound, rawLookupCount, minTermsInBlock, maxTermsInBlock); }
From source file:perf.Indexer.java
License:Apache License
private static void _main(String[] clArgs) throws Exception { Args args = new Args(clArgs); // EG: -facets Date -facets characterCount ... FacetsConfig facetsConfig = new FacetsConfig(); facetsConfig.setHierarchical("Date", true); final Set<String> facetFields = new HashSet<String>(); if (args.hasArg("-facets")) { for (String arg : args.getStrings("-facets")) { facetFields.add(arg);// ww w .j a va2s. c om } } final String dirImpl = args.getString("-dirImpl"); final String dirPath = args.getString("-indexPath") + "/index"; final Directory dir; OpenDirectory od = OpenDirectory.get(dirImpl); dir = od.open(Paths.get(dirPath)); final String analyzer = args.getString("-analyzer"); final Analyzer a; if (analyzer.equals("EnglishAnalyzer")) { a = new EnglishAnalyzer(); } else if (analyzer.equals("StandardAnalyzer")) { a = new StandardAnalyzer(); } else if (analyzer.equals("StandardAnalyzerNoStopWords")) { a = new StandardAnalyzer(CharArraySet.EMPTY_SET); } else if (analyzer.equals("ShingleStandardAnalyzer")) { a = new ShingleAnalyzerWrapper(new StandardAnalyzer(), 2, 2); } else if (analyzer.equals("ShingleStandardAnalyzerNoStopWords")) { a = new ShingleAnalyzerWrapper(new StandardAnalyzer(CharArraySet.EMPTY_SET), 2, 2); } else { throw new RuntimeException("unknown analyzer " + analyzer); } final String lineFile = args.getString("-lineDocsFile"); // -1 means all docs in the line file: final int docCountLimit = args.getInt("-docCountLimit"); final int numThreads = args.getInt("-threadCount"); final boolean doForceMerge = args.getFlag("-forceMerge"); final boolean verbose = args.getFlag("-verbose"); String indexSortField = null; SortField.Type indexSortType = null; if (args.hasArg("-indexSort")) { indexSortField = args.getString("-indexSort"); int i = indexSortField.indexOf(':'); if (i == -1) { throw new IllegalArgumentException( "-indexSort should have form field:type; got: " + indexSortField); } String typeString = indexSortField.substring(i + 1, indexSortField.length()); if (typeString.equals("long")) { indexSortType = SortField.Type.LONG; } else if (typeString.equals("string")) { indexSortType = SortField.Type.STRING; } else { throw new IllegalArgumentException("-indexSort can only handle 'long' sort; got: " + typeString); } indexSortField = indexSortField.substring(0, i); } else { indexSortType = null; } final double ramBufferSizeMB = args.getDouble("-ramBufferMB"); final int maxBufferedDocs = args.getInt("-maxBufferedDocs"); final String defaultPostingsFormat = args.getString("-postingsFormat"); final boolean doDeletions = args.getFlag("-deletions"); final boolean printDPS = args.getFlag("-printDPS"); final boolean waitForMerges = args.getFlag("-waitForMerges"); final boolean waitForCommit = args.getFlag("-waitForCommit"); final String mergePolicy = args.getString("-mergePolicy"); final Mode mode; final boolean doUpdate = args.getFlag("-update"); if (doUpdate) { mode = Mode.UPDATE; } else { mode = Mode.valueOf(args.getString("-mode", "add").toUpperCase(Locale.ROOT)); } int randomDocIDMax; if (mode == Mode.UPDATE) { randomDocIDMax = args.getInt("-randomDocIDMax"); } else { randomDocIDMax = -1; } final String idFieldPostingsFormat = args.getString("-idFieldPostingsFormat"); final boolean addGroupingFields = args.getFlag("-grouping"); final boolean useCFS = args.getFlag("-cfs"); final boolean storeBody = args.getFlag("-store"); final boolean tvsBody = args.getFlag("-tvs"); final boolean bodyPostingsOffsets = args.getFlag("-bodyPostingsOffsets"); final int maxConcurrentMerges = args.getInt("-maxConcurrentMerges"); final boolean addDVFields = args.getFlag("-dvfields"); final boolean doRandomCommit = args.getFlag("-randomCommit"); final boolean useCMS = args.getFlag("-useCMS"); final boolean disableIOThrottle = args.getFlag("-disableIOThrottle"); if (waitForCommit == false && waitForMerges) { throw new RuntimeException("pass -waitForCommit if you pass -waitForMerges"); } if (waitForCommit == false && doForceMerge) { throw new RuntimeException("pass -waitForCommit if you pass -forceMerge"); } if (waitForCommit == false && doDeletions) { throw new RuntimeException("pass -waitForCommit if you pass -deletions"); } if (useCMS == false && disableIOThrottle) { throw new RuntimeException("-disableIOThrottle only makes sense with -useCMS"); } final double nrtEverySec; if (args.hasArg("-nrtEverySec")) { nrtEverySec = args.getDouble("-nrtEverySec"); } else { nrtEverySec = -1.0; } // True to start back at the beginning if we run out of // docs from the line file source: final boolean repeatDocs = args.getFlag("-repeatDocs"); final String facetDVFormatName; if (facetFields.isEmpty()) { facetDVFormatName = "Lucene54"; } else { facetDVFormatName = args.getString("-facetDVFormat"); } if (addGroupingFields && docCountLimit == -1) { a.close(); throw new RuntimeException("cannot add grouping fields unless docCount is set"); } args.check(); System.out.println("Dir: " + dirImpl); System.out.println("Index path: " + dirPath); System.out.println("Analyzer: " + analyzer); System.out.println("Line file: " + lineFile); System.out.println("Doc count limit: " + (docCountLimit == -1 ? "all docs" : "" + docCountLimit)); System.out.println("Threads: " + numThreads); System.out.println("Force merge: " + (doForceMerge ? "yes" : "no")); System.out.println("Verbose: " + (verbose ? "yes" : "no")); System.out.println("RAM Buffer MB: " + ramBufferSizeMB); System.out.println("Max buffered docs: " + maxBufferedDocs); System.out.println("Default postings format: " + defaultPostingsFormat); System.out.println("Do deletions: " + (doDeletions ? "yes" : "no")); System.out.println("Wait for merges: " + (waitForMerges ? "yes" : "no")); System.out.println("Wait for commit: " + (waitForCommit ? "yes" : "no")); System.out.println("IO throttle: " + (disableIOThrottle ? "no" : "yes")); System.out.println("Merge policy: " + mergePolicy); System.out.println("Mode: " + mode); if (mode == Mode.UPDATE) { System.out.println("DocIDMax: " + randomDocIDMax); } System.out.println("ID field postings format: " + idFieldPostingsFormat); System.out.println("Add grouping fields: " + (addGroupingFields ? "yes" : "no")); System.out.println("Compound file format: " + (useCFS ? "yes" : "no")); System.out.println("Store body field: " + (storeBody ? "yes" : "no")); System.out.println("Term vectors for body field: " + (tvsBody ? "yes" : "no")); System.out.println("Facet DV Format: " + facetDVFormatName); System.out.println("Facet fields: " + facetFields); System.out.println("Body postings offsets: " + (bodyPostingsOffsets ? "yes" : "no")); System.out.println("Max concurrent merges: " + maxConcurrentMerges); System.out.println("Add DocValues fields: " + addDVFields); System.out.println("Use ConcurrentMergeScheduler: " + useCMS); if (nrtEverySec > 0.0) { System.out.println("Open & close NRT reader every: " + nrtEverySec + " sec"); } else { System.out.println("Open & close NRT reader every: never"); } System.out.println("Repeat docs: " + repeatDocs); if (verbose) { InfoStream.setDefault(new PrintStreamInfoStream(System.out)); } final IndexWriterConfig iwc = new IndexWriterConfig(a); if (indexSortField != null) { iwc.setIndexSort(new Sort(new SortField(indexSortField, indexSortType))); } if (mode == Mode.UPDATE) { iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); } else { iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); } iwc.setMaxBufferedDocs(maxBufferedDocs); iwc.setRAMBufferSizeMB(ramBufferSizeMB); // So flushed segments do/don't use CFS: iwc.setUseCompoundFile(useCFS); final AtomicBoolean indexingFailed = new AtomicBoolean(); iwc.setMergeScheduler(getMergeScheduler(indexingFailed, useCMS, maxConcurrentMerges, disableIOThrottle)); iwc.setMergePolicy(getMergePolicy(mergePolicy, useCFS)); // Keep all commit points: if (doDeletions || doForceMerge) { iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); } final Codec codec = new Lucene62Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return PostingsFormat.forName(field.equals("id") ? idFieldPostingsFormat : defaultPostingsFormat); } private final DocValuesFormat facetsDVFormat = DocValuesFormat.forName(facetDVFormatName); //private final DocValuesFormat lucene42DVFormat = DocValuesFormat.forName("Lucene42"); //private final DocValuesFormat diskDVFormat = DocValuesFormat.forName("Disk"); // private final DocValuesFormat lucene45DVFormat = DocValuesFormat.forName("Lucene45"); private final DocValuesFormat directDVFormat = DocValuesFormat.forName("Direct"); @Override public DocValuesFormat getDocValuesFormatForField(String field) { if (facetFields.contains(field) || field.equals("$facets")) { return facetsDVFormat; //} else if (field.equals("$facets_sorted_doc_values")) { //return diskDVFormat; } else { // Use default DVFormat for all else: // System.out.println("DV: field=" + field + " format=" + super.getDocValuesFormatForField(field)); return super.getDocValuesFormatForField(field); } } }; iwc.setCodec(codec); System.out.println("IW config=" + iwc); IndexWriter w = new IndexWriter(dir, iwc); System.out.println("Index has " + w.maxDoc() + " docs"); final TaxonomyWriter taxoWriter; if (facetFields.isEmpty() == false) { taxoWriter = new DirectoryTaxonomyWriter(od.open(Paths.get(args.getString("-indexPath"), "facets")), IndexWriterConfig.OpenMode.CREATE); } else { taxoWriter = null; } // Fixed seed so group field values are always consistent: final Random random = new Random(17); LineFileDocs lineFileDocs = new LineFileDocs(lineFile, repeatDocs, storeBody, tvsBody, bodyPostingsOffsets, false, taxoWriter, facetFields, facetsConfig, addDVFields); float docsPerSecPerThread = -1f; //float docsPerSecPerThread = 100f; IndexThreads threads = new IndexThreads(random, w, indexingFailed, lineFileDocs, numThreads, docCountLimit, addGroupingFields, printDPS, mode, docsPerSecPerThread, null, nrtEverySec, randomDocIDMax); System.out.println("\nIndexer: start"); final long t0 = System.currentTimeMillis(); threads.start(); while (!threads.done() && indexingFailed.get() == false) { Thread.sleep(100); // Commits once per minute on average: if (doRandomCommit && random.nextInt(600) == 17) { System.out.println("Indexer: now commit"); long commitStartNS = System.nanoTime(); w.commit(); System.out.println(String.format(Locale.ROOT, "Indexer: commit took %.1f msec", (System.nanoTime() - commitStartNS) / 1000000.)); } } threads.stop(); final long t1 = System.currentTimeMillis(); System.out.println("\nIndexer: indexing done (" + (t1 - t0) + " msec); total " + w.maxDoc() + " docs"); // if we update we can not tell how many docs if (threads.failed.get()) { throw new RuntimeException("exceptions during indexing"); } if (mode != Mode.UPDATE && docCountLimit != -1 && w.maxDoc() != docCountLimit) { throw new RuntimeException("w.maxDoc()=" + w.maxDoc() + " but expected " + docCountLimit); } final Map<String, String> commitData = new HashMap<String, String>(); if (waitForMerges) { w.close(); IndexWriterConfig iwc2 = new IndexWriterConfig(a); iwc2.setMergeScheduler( getMergeScheduler(indexingFailed, useCMS, maxConcurrentMerges, disableIOThrottle)); iwc2.setMergePolicy(getMergePolicy(mergePolicy, useCFS)); iwc2.setCodec(codec); iwc2.setUseCompoundFile(useCFS); iwc2.setMaxBufferedDocs(maxBufferedDocs); iwc2.setRAMBufferSizeMB(ramBufferSizeMB); if (indexSortField != null) { iwc2.setIndexSort(new Sort(new SortField(indexSortField, indexSortType))); } w = new IndexWriter(dir, iwc2); long t2 = System.currentTimeMillis(); System.out.println("\nIndexer: waitForMerges done (" + (t2 - t1) + " msec)"); } if (waitForCommit) { commitData.put("userData", "multi"); w.setLiveCommitData(commitData.entrySet()); long t2 = System.currentTimeMillis(); w.commit(); long t3 = System.currentTimeMillis(); System.out.println("\nIndexer: commit multi (took " + (t3 - t2) + " msec)"); } else { w.rollback(); w = null; } if (doForceMerge) { long forceMergeStartMSec = System.currentTimeMillis(); w.forceMerge(1); long forceMergeEndMSec = System.currentTimeMillis(); System.out.println( "\nIndexer: force merge done (took " + (forceMergeEndMSec - forceMergeStartMSec) + " msec)"); commitData.put("userData", "single"); w.setLiveCommitData(commitData.entrySet()); w.commit(); final long t5 = System.currentTimeMillis(); System.out.println("\nIndexer: commit single done (took " + (t5 - forceMergeEndMSec) + " msec)"); } if (doDeletions) { final long t5 = System.currentTimeMillis(); // Randomly delete 5% of the docs final Set<Integer> deleted = new HashSet<Integer>(); final int maxDoc = w.maxDoc(); final int toDeleteCount = (int) (maxDoc * 0.05); System.out.println("\nIndexer: delete " + toDeleteCount + " docs"); while (deleted.size() < toDeleteCount) { final int id = random.nextInt(maxDoc); if (!deleted.contains(id)) { deleted.add(id); w.deleteDocuments(new Term("id", LineFileDocs.intToID(id))); } } final long t6 = System.currentTimeMillis(); System.out.println("\nIndexer: deletes done (took " + (t6 - t5) + " msec)"); commitData.put("userData", doForceMerge ? "delsingle" : "delmulti"); w.setLiveCommitData(commitData.entrySet()); w.commit(); final long t7 = System.currentTimeMillis(); System.out.println("\nIndexer: commit delmulti done (took " + (t7 - t6) + " msec)"); if (doUpdate || w.numDocs() != maxDoc - toDeleteCount) { throw new RuntimeException( "count mismatch: w.numDocs()=" + w.numDocs() + " but expected " + (maxDoc - toDeleteCount)); } } if (taxoWriter != null) { System.out.println("Taxonomy has " + taxoWriter.getSize() + " ords"); taxoWriter.commit(); taxoWriter.close(); } final long tCloseStart = System.currentTimeMillis(); if (w != null) { w.close(); w = null; } if (waitForCommit) { System.out.println("\nIndexer: at close: " + SegmentInfos.readLatestCommit(dir)); System.out.println("\nIndexer: close took " + (System.currentTimeMillis() - tCloseStart) + " msec"); } dir.close(); final long tFinal = System.currentTimeMillis(); System.out.println("\nIndexer: net bytes indexed " + threads.getBytesIndexed()); final long indexingTime; if (waitForCommit) { indexingTime = tFinal - t0; System.out.println("\nIndexer: finished (" + indexingTime + " msec)"); } else { indexingTime = t1 - t0; System.out.println("\nIndexer: finished (" + indexingTime + " msec), excluding commit"); } System.out.println( "\nIndexer: " + (threads.getBytesIndexed() / 1024. / 1024. / 1024. / (indexingTime / 3600000.)) + " GB/hour plain text"); }
From source file:perf.NRTPerfTest.java
License:Apache License
public static void main(String[] args) throws Exception { final String dirImpl = args[0]; final String dirPath = args[1]; final String commit = args[2]; final String lineDocFile = args[3]; final long seed = Long.parseLong(args[4]); final double docsPerSec = Double.parseDouble(args[5]); final double runTimeSec = Double.parseDouble(args[6]); final int numSearchThreads = Integer.parseInt(args[7]); int numIndexThreads = Integer.parseInt(args[8]); if (numIndexThreads > docsPerSec) { System.out.println("INFO: numIndexThreads higher than docsPerSec, adjusting numIndexThreads"); numIndexThreads = (int) Math.max(1, docsPerSec); }/* ww w. j a v a 2 s.c o m*/ final double reopenPerSec = Double.parseDouble(args[9]); final Mode mode = Mode.valueOf(args[10].toUpperCase(Locale.ROOT)); statsEverySec = Integer.parseInt(args[11]); final boolean doCommit = args[12].equals("yes"); final double mergeMaxWriteMBPerSec = Double.parseDouble(args[13]); if (mergeMaxWriteMBPerSec != 0.0) { throw new IllegalArgumentException("mergeMaxWriteMBPerSec must be 0.0 until LUCENE-3202 is done"); } final String tasksFile = args[14]; if (Files.notExists(Paths.get(tasksFile))) { throw new FileNotFoundException("tasks file not found " + tasksFile); } final boolean hasProcMemInfo = Files.exists(Paths.get("/proc/meminfo")); System.out.println("DIR=" + dirImpl); System.out.println("Index=" + dirPath); System.out.println("Commit=" + commit); System.out.println("LineDocs=" + lineDocFile); System.out.println("Docs/sec=" + docsPerSec); System.out.println("Run time sec=" + runTimeSec); System.out.println("NumSearchThreads=" + numSearchThreads); System.out.println("NumIndexThreads=" + numIndexThreads); System.out.println("Reopen/sec=" + reopenPerSec); System.out.println("Mode=" + mode); System.out.println("tasksFile=" + tasksFile); System.out.println("Record stats every " + statsEverySec + " seconds"); final int count = (int) ((runTimeSec / statsEverySec) + 2); docsIndexedByTime = new AtomicInteger[count]; searchesByTime = new AtomicInteger[count]; totalUpdateTimeByTime = new AtomicLong[count]; final AtomicInteger reopensByTime[] = new AtomicInteger[count]; for (int i = 0; i < count; i++) { docsIndexedByTime[i] = new AtomicInteger(); searchesByTime[i] = new AtomicInteger(); totalUpdateTimeByTime[i] = new AtomicLong(); reopensByTime[i] = new AtomicInteger(); } System.out.println( "Max merge MB/sec = " + (mergeMaxWriteMBPerSec <= 0.0 ? "unlimited" : mergeMaxWriteMBPerSec)); final Random random = new Random(seed); final LineFileDocs docs = new LineFileDocs(lineDocFile, true, false, false, false, false, null, new HashSet<String>(), null, true); final Directory dir0; if (dirImpl.equals("MMapDirectory")) { dir0 = new MMapDirectory(Paths.get(dirPath)); } else if (dirImpl.equals("NIOFSDirectory")) { dir0 = new NIOFSDirectory(Paths.get(dirPath)); } else if (dirImpl.equals("SimpleFSDirectory")) { dir0 = new SimpleFSDirectory(Paths.get(dirPath)); } else { docs.close(); throw new RuntimeException("unknown directory impl \"" + dirImpl + "\""); } //final NRTCachingDirectory dir = new NRTCachingDirectory(dir0, 10, 200.0, mergeMaxWriteMBPerSec); final NRTCachingDirectory dir = new NRTCachingDirectory(dir0, 20, 400.0); //final MergeScheduler ms = dir.getMergeScheduler(); //final Directory dir = dir0; //final MergeScheduler ms = new ConcurrentMergeScheduler(); final String field = "body"; // Open an IW on the requested commit point, but, don't // delete other (past or future) commit points: // TODO take Analyzer as parameter StandardAnalyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); final IndexWriterConfig conf = new IndexWriterConfig(analyzer); conf.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); conf.setRAMBufferSizeMB(256.0); //iwc.setMergeScheduler(ms); final Codec codec = new Lucene62Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { if (field.equals("id")) { return PostingsFormat.forName("Memory"); } else { return PostingsFormat.forName("Lucene50"); } } private final DocValuesFormat direct = DocValuesFormat.forName("Direct"); @Override public DocValuesFormat getDocValuesFormatForField(String field) { return direct; } }; conf.setCodec(codec); /* iwc.setMergePolicy(new LogByteSizeMergePolicy()); ((LogMergePolicy) iwc.getMergePolicy()).setUseCompoundFile(false); ((LogMergePolicy) iwc.getMergePolicy()).setMergeFactor(30); ((LogByteSizeMergePolicy) iwc.getMergePolicy()).setMaxMergeMB(10000.0); System.out.println("USING LOG BS MP"); */ TieredMergePolicy tmp = new TieredMergePolicy(); tmp.setNoCFSRatio(0.0); tmp.setMaxMergedSegmentMB(1000000.0); //tmp.setReclaimDeletesWeight(3.0); //tmp.setMaxMergedSegmentMB(7000.0); conf.setMergePolicy(tmp); if (!commit.equals("none")) { conf.setIndexCommit(PerfUtils.findCommitPoint(commit, dir)); } // Make sure merges run @ higher prio than indexing: final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) conf.getMergeScheduler(); cms.setMaxMergesAndThreads(4, 1); conf.setMergedSegmentWarmer(new MergedReaderWarmer(field)); final IndexWriter w = new IndexWriter(dir, conf); // w.setInfoStream(System.out); IndexThreads.UpdatesListener updatesListener = new IndexThreads.UpdatesListener() { long startTimeNS; @Override public void beforeUpdate() { startTimeNS = System.nanoTime(); } @Override public void afterUpdate() { int idx = currentQT.get(); totalUpdateTimeByTime[idx].addAndGet(System.nanoTime() - startTimeNS); docsIndexedByTime[idx].incrementAndGet(); } }; IndexThreads indexThreads = new IndexThreads(random, w, new AtomicBoolean(false), docs, numIndexThreads, -1, false, false, mode, (float) (docsPerSec / numIndexThreads), updatesListener, -1.0, w.maxDoc()); // NativePosixUtil.mlockTermsDict(startR, "id"); final SearcherManager manager = new SearcherManager(w, null); IndexSearcher s = manager.acquire(); try { System.out.println("Reader=" + s.getIndexReader()); } finally { manager.release(s); } final DirectSpellChecker spellChecker = new DirectSpellChecker(); final IndexState indexState = new IndexState(manager, null, field, spellChecker, "PostingsHighlighter", null); final QueryParser qp = new QueryParser(field, analyzer); TaskParser taskParser = new TaskParser(indexState, qp, field, 10, random, true); final TaskSource tasks = new RandomTaskSource(taskParser, tasksFile, random) { @Override public void taskDone(Task task, long queueTimeNS, int toalHitCount) { searchesByTime[currentQT.get()].incrementAndGet(); } }; System.out.println("Task repeat count 1"); System.out.println("Tasks file " + tasksFile); System.out.println("Num task per cat 20"); final TaskThreads taskThreads = new TaskThreads(tasks, indexState, numSearchThreads); final ReopenThread reopenThread = new ReopenThread(reopenPerSec, manager, reopensByTime, runTimeSec); reopenThread.setName("ReopenThread"); reopenThread.setPriority(4 + Thread.currentThread().getPriority()); System.out.println("REOPEN PRI " + reopenThread.getPriority()); indexThreads.start(); reopenThread.start(); taskThreads.start(); Thread.currentThread().setPriority(5 + Thread.currentThread().getPriority()); System.out.println("TIMER PRI " + Thread.currentThread().getPriority()); //System.out.println("Start: " + new Date()); final long startMS = System.currentTimeMillis(); final long stopMS = startMS + (long) (runTimeSec * 1000); int lastQT = -1; while (true) { final long t = System.currentTimeMillis(); if (t >= stopMS) { break; } final int qt = (int) ((t - startMS) / statsEverySec / 1000); currentQT.set(qt); if (qt != lastQT) { final int prevQT = lastQT; lastQT = qt; if (prevQT > 0) { final String other; if (hasProcMemInfo) { other = " D=" + getLinuxDirtyBytes(); } else { other = ""; } int prev = prevQT - 1; System.out.println(String.format("QT %d searches=%d docs=%d reopens=%s totUpdateTime=%d", prev, searchesByTime[prev].get(), docsIndexedByTime[prev].get(), reopensByTime[prev].get() + other, TimeUnit.NANOSECONDS.toMillis(totalUpdateTimeByTime[prev].get()))); } } Thread.sleep(25); } taskThreads.stop(); reopenThread.join(); indexThreads.stop(); System.out.println("By time:"); for (int i = 0; i < searchesByTime.length - 2; i++) { System.out.println(String.format(" %d searches=%d docs=%d reopens=%d totUpdateTime=%d", i * statsEverySec, searchesByTime[i].get(), docsIndexedByTime[i].get(), reopensByTime[i].get(), TimeUnit.NANOSECONDS.toMillis(totalUpdateTimeByTime[i].get()))); } manager.close(); if (doCommit) { w.close(); } else { w.rollback(); } }