List of usage examples for org.apache.lucene.index IndexWriter addIndexes
public long addIndexes(CodecReader... readers) throws IOException
From source file:org.apache.solr.update.SolrIndexSplitter.java
License:Apache License
public void split() throws IOException { List<AtomicReaderContext> leaves = searcher.getTopReaderContext().leaves(); List<OpenBitSet[]> segmentDocSets = new ArrayList<OpenBitSet[]>(leaves.size()); log.info("SolrIndexSplitter: partitions=" + numPieces + " segments=" + leaves.size()); for (AtomicReaderContext readerContext : leaves) { assert readerContext.ordInParent == segmentDocSets.size(); // make sure we're going in order OpenBitSet[] docSets = split(readerContext); segmentDocSets.add(docSets);//from ww w .jav a 2 s.c o m } // would it be more efficient to write segment-at-a-time to each new index? // - need to worry about number of open descriptors // - need to worry about if IW.addIndexes does a sync or not... // - would be more efficient on the read side, but prob less efficient merging IndexReader[] subReaders = new IndexReader[leaves.size()]; for (int partitionNumber = 0; partitionNumber < numPieces; partitionNumber++) { log.info("SolrIndexSplitter: partition #" + partitionNumber + (ranges != null ? " range=" + ranges.get(partitionNumber) : "")); for (int segmentNumber = 0; segmentNumber < subReaders.length; segmentNumber++) { subReaders[segmentNumber] = new LiveDocsReader(leaves.get(segmentNumber), segmentDocSets.get(segmentNumber)[partitionNumber]); } boolean success = false; RefCounted<IndexWriter> iwRef = null; IndexWriter iw = null; if (cores != null) { SolrCore subCore = cores.get(partitionNumber); iwRef = subCore.getUpdateHandler().getSolrCoreState().getIndexWriter(subCore); iw = iwRef.get(); } else { SolrCore core = searcher.getCore(); String path = paths.get(partitionNumber); iw = SolrIndexWriter.create( "SplittingIndexWriter" + partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path, core.getDirectoryFactory(), true, core.getLatestSchema(), core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec()); } try { // This merges the subreaders and will thus remove deletions (i.e. no optimize needed) iw.addIndexes(subReaders); success = true; } finally { if (iwRef != null) { iwRef.decref(); } else { if (success) { IOUtils.close(iw); } else { IOUtils.closeWhileHandlingException(iw); } } } } }
From source file:org.archive.nutchwax.IndexMerger.java
License:Apache License
/** * Merge all input indexes to the single output index *//* w ww. ja v a2 s . co m*/ public void merge(IndexReader[] readers, Path outputIndex, Path localWorkingDir, boolean parallel) throws IOException { LOG.info("merging indexes to: " + outputIndex); FileSystem localFs = FileSystem.getLocal(getConf()); if (localFs.exists(localWorkingDir)) { localFs.delete(localWorkingDir, true); } localFs.mkdirs(localWorkingDir); // Get local output target // FileSystem fs = FileSystem.get(getConf()); if (fs.exists(outputIndex)) { throw new FileAlreadyExistsException("Output directory " + outputIndex + " already exists!"); } Path tmpLocalOutput = new Path(localWorkingDir, "merge-output"); Path localOutput = fs.startLocalOutput(outputIndex, tmpLocalOutput); // // Merge indices // IndexWriter writer = new IndexWriter(localOutput.toString(), null, true); writer.setMergeFactor(getConf().getInt("indexer.mergeFactor", IndexWriter.DEFAULT_MERGE_FACTOR)); writer.setMaxBufferedDocs(getConf().getInt("indexer.minMergeDocs", IndexWriter.DEFAULT_MAX_BUFFERED_DOCS)); writer.setMaxMergeDocs(getConf().getInt("indexer.maxMergeDocs", IndexWriter.DEFAULT_MAX_MERGE_DOCS)); writer.setTermIndexInterval( getConf().getInt("indexer.termIndexInterval", IndexWriter.DEFAULT_TERM_INDEX_INTERVAL)); writer.setInfoStream(LogUtil.getDebugStream(LOG)); writer.setUseCompoundFile(false); writer.setSimilarity(new NutchSimilarity()); writer.addIndexes(readers); writer.close(); // // Put target back // fs.completeLocalOutput(outputIndex, tmpLocalOutput); LOG.info("done merging"); }
From source file:org.archive.tnh.tools.IndexMerger.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("IndexMerger [-v|-o|-f] <dest> <source>..."); System.exit(1);//from w w w .ja va2 s. c o m } boolean verbose = false; boolean optimize = false; boolean force = false; int i = 0; for (; i < args.length; i++) { if ("-o".equals(args[i])) { optimize = true; } else if ("-f".equals(args[i])) { force = true; } else if ("-v".equals(args[i])) { verbose = true; } else { break; } } if ((args.length - i) < (2 - (optimize ? 1 : 0))) { System.err.println("Erorr: no source files!"); System.err.println("IndexMerger [-v|-o|-f] <dest> <source>..."); System.exit(1); } File dest = new File(args[i++]); if (!force && dest.exists()) { System.err.println("Destination exits, use -f to force merging into existing index: " + dest); System.exit(2); } IndexReader ir[] = new IndexReader[args.length - i]; for (int j = i; j < args.length; j++) { ir[j - i] = IndexReader.open(new MMapDirectory(new File(args[j])), true /* read-only */ ); } IndexWriter w = null; try { // Configure the IndexWriter. IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_35, null); config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); w = new IndexWriter(new MMapDirectory(dest), config); if (verbose) { w.setInfoStream(System.out); } if (ir.length > 0) { w.addIndexes(ir); } if (optimize) { w.optimize(); } w.commit(); w.close(); } catch (IOException ioe) { System.err.println("Error: " + args[0] + " " + ioe); if (w != null) w.close(); } }
From source file:org.columba.mail.folder.search.LuceneQueryEngine.java
License:Mozilla Public License
protected void mergeRAMtoIndex() throws IOException { IndexReader ramReader = getRAMReader(); IndexReader fileReader = getFileReader(); LOG.fine("Lucene: Merging RAMIndex to FileIndex"); /*//from w ww .j av a 2 s .c o m * Document doc; for( int i=0; i<ramReader.numDocs(); i++) { doc = * ramReader.document(i); if( !deleted.contains(new * Integer(ramReader.document(i).getField("uid").stringValue())) ) { * fileIndex.addDocument(doc); } } */ ListIterator it = deleted.listIterator(); while (it.hasNext()) { String uid = it.next().toString(); if (ramReader.delete(new Term("uid", uid)) == 0) { fileReader.delete(new Term("uid", uid)); } } fileReader.close(); ramReader.close(); IndexWriter fileIndex = new IndexWriter(luceneIndexDir, analyzer, false); fileIndex.addIndexes(new Directory[] { ramIndexDir }); fileIndex.optimize(); fileIndex.close(); initRAMDir(); deleted.clear(); }
From source file:org.deshang.content.indexing.util.lucene.LuceneIndexUtil.java
License:Apache License
public synchronized void storeIndex(Directory index, String path) throws IOException { LOGGER.debug("Enter storeIndex(Directory, String)"); File dir = new File(path); if (dir.exists() && !dir.isDirectory()) { throw new IOException("Specified path name [" + path + "] is not a directory"); } else if (dir.exists()) { deleteFile(dir);/*from w ww.j a v a 2 s .c o m*/ } dir.mkdirs(); FSDirectory fs = FSDirectory.open(dir); IndexWriter writer = new IndexWriter(fs, getIndexWriterConfig()); writer.addIndexes(index); writer.close(); LOGGER.debug("Exit storeIndex(Directory, String)"); }
From source file:org.eu.bitzone.Leia.java
License:Apache License
/** * Open indicated index and re-initialize all GUI and plugins. * * @param pName path to index// w w w . j a va 2 s. c om * @param force if true, and the index is locked, unlock it first. If false, and the index is locked, an error will be * reported. * @param readOnly open in read-only mode, and disallow modifications. */ public void openIndex(final String name, final boolean force, final String dirImpl, final boolean ro, final boolean ramdir, final boolean keepCommits, final IndexCommit point, final int tiiDivisor) { pName = name; readOnly = ro; removeAll(); final File baseFileDir = new File(name); this.baseDir = baseFileDir.toString(); addComponent(this, "/xml/luke.xml", null, null); statmsg = find("statmsg"); if (dir != null) { try { if (ir != null) { ir.close(); } if (ar != null) { ar.close(); } } catch (final Exception e) { } ; try { if (dir != null) { dir.close(); } } catch (final Exception e) { } ; } final ArrayList<Directory> dirs = new ArrayList<Directory>(); Throwable lastException = null; try { Directory d = openDirectory(dirImpl, pName, false); if (IndexWriter.isLocked(d)) { if (!ro) { if (force) { IndexWriter.unlock(d); } else { errorMsg("Index is locked. Try 'Force unlock' when opening."); d.close(); d = null; return; } } } boolean existsSingle = false; // IR.indexExists doesn't report the cause of error try { new SegmentInfos().read(d); existsSingle = true; } catch (final Throwable e) { e.printStackTrace(); lastException = e; // } if (!existsSingle) { // try multi final File[] files = baseFileDir.listFiles(); for (final File f : files) { if (f.isFile()) { continue; } Directory d1 = openDirectory(dirImpl, f.toString(), false); if (IndexWriter.isLocked(d1)) { if (!ro) { if (force) { IndexWriter.unlock(d1); } else { errorMsg("Index is locked. Try 'Force unlock' when opening."); d1.close(); d1 = null; return; } } } existsSingle = false; try { new SegmentInfos().read(d1); existsSingle = true; } catch (final Throwable e) { lastException = e; e.printStackTrace(); } if (!existsSingle) { d1.close(); continue; } dirs.add(d1); } } else { dirs.add(d); } if (dirs.size() == 0) { if (lastException != null) { errorMsg( "Invalid directory at the location, check console for more information. Last exception:\n" + lastException.toString()); } else { errorMsg( "No valid directory at the location, try another location.\nCheck console for other possible causes."); } return; } if (ramdir) { showStatus("Loading index into RAMDirectory ..."); final Directory dir1 = new RAMDirectory(); final IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)); final IndexWriter iw1 = new IndexWriter(dir1, cfg); iw1.addIndexes(dirs.toArray(new Directory[dirs.size()])); iw1.close(); showStatus("RAMDirectory loading done!"); if (dir != null) { dir.close(); } dirs.clear(); dirs.add(dir1); } IndexDeletionPolicy policy; if (keepCommits) { policy = new KeepAllIndexDeletionPolicy(); } else { policy = new KeepLastIndexDeletionPolicy(); } final ArrayList<DirectoryReader> readers = new ArrayList<DirectoryReader>(); for (final Directory dd : dirs) { DirectoryReader reader; if (tiiDivisor > 1) { reader = DirectoryReader.open(dd, tiiDivisor); } else { reader = DirectoryReader.open(dd); } readers.add(reader); } if (readers.size() == 1) { ir = readers.get(0); dir = ((DirectoryReader) ir).directory(); } else { ir = new MultiReader(readers.toArray(new IndexReader[readers.size()])); } is = new IndexSearcher(ir); // XXX slowAccess = false; initOverview(); initPlugins(); showStatus("Index successfully open."); } catch (final Exception e) { e.printStackTrace(); errorMsg(e.getMessage()); return; } }
From source file:org.exoplatform.services.jcr.impl.core.query.lucene.IndexMigration.java
License:Apache License
/** * Checks if the given <code>index</code> needs to be migrated. * * @param index the index to check and migration if needed. * @param directoryManager the directory manager. * @throws IOException if an error occurs while migrating the index. */// w ww .jav a2s . c o m public static void migrate(PersistentIndex index, DirectoryManager directoryManager) throws IOException { Directory indexDir = index.getDirectory(); log.debug("Checking {} ...", indexDir); ReadOnlyIndexReader reader = index.getReadOnlyIndexReader(); try { if (IndexFormatVersion.getVersion(reader).getVersion() >= IndexFormatVersion.V3.getVersion()) { // index was created with Jackrabbit 1.5 or higher // no need for migration log.debug("IndexFormatVersion >= V3, no migration needed"); return; } // assert: there is at least one node in the index, otherwise the // index format version would be at least V3 TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, "")); try { Term t = terms.term(); if (t.text().indexOf('\uFFFF') == -1) { log.debug("Index already migrated"); return; } } finally { terms.close(); } } finally { reader.release(); index.releaseWriterAndReaders(); } // if we get here then the index must be migrated log.debug("Index requires migration {}", indexDir); String migrationName = index.getName() + "_v2.3"; if (directoryManager.hasDirectory(migrationName)) { directoryManager.delete(migrationName); } Directory migrationDir = directoryManager.getDirectory(migrationName); try { IndexWriter writer = new IndexWriter(migrationDir, new JcrStandartAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); try { IndexReader r = new MigrationIndexReader(IndexReader.open(index.getDirectory())); try { writer.addIndexes(new IndexReader[] { r }); writer.close(); } finally { r.close(); } } finally { writer.close(); } } finally { migrationDir.close(); } directoryManager.delete(index.getName()); if (!directoryManager.rename(migrationName, index.getName())) { throw new IOException("failed to move migrated directory " + migrationDir); } log.info("Migrated " + index.getName()); }
From source file:org.genemania.completion.lucene.IndexBuilder.java
License:Open Source License
void write(String path, Directory source, Analyzer analyzer, MaxFieldLength length) throws IOException { FSDirectory directory = FSDirectory.open(new File(path)); try {// w ww.j a v a2s . c o m IndexReader reader = IndexReader.open(source, true); try { IndexWriter writer = new IndexWriter(directory, analyzer, length); try { writer.addIndexes(new IndexReader[] { reader }); } finally { writer.close(); } } finally { reader.close(); } } finally { directory.close(); } }
From source file:org.genemania.mediator.lucene.exporter.Generic2LuceneExporter.java
License:Open Source License
public void export() throws Exception { final ExportProfile profile = createExportProfile(basePath, profileName); Analyzer analyzer = createAnalyzer(); try {//from ww w .j ava 2 s . com final Map<String, Long> namingSourceIds = new HashMap<String, Long>(); File indexFile = new File(makeIndexPath("base")); FSDirectory directory = FSDirectory.open(indexFile); final IndexWriter indexWriter = new IndexWriter(directory, analyzer, true, MaxFieldLength.UNLIMITED); processFile(genericDbPath, "GENE_NAMING_SOURCES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); exportNamingSource(indexWriter, parts); namingSourceIds.put(parts[1], Long.parseLong(parts[0])); return true; } }); processFile(genericDbPath, "TAGS.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); exportTag(indexWriter, parts); return true; } }); processFile(genericDbPath, "ONTOLOGIES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); exportOntologies(indexWriter, parts); return true; } }); processFile(genericDbPath, "ONTOLOGY_CATEGORIES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); exportOntologyCategories(indexWriter, parts); return true; } }); exportStatistics(indexWriter); indexWriter.close(); String[] organisms = config.getSection("Organisms").getEntry("organisms").split("\\s*,\\s*"); for (final String organismId : organisms) { Section organismSection = config.getSection(organismId); final String shortName = organismSection.getEntry("short_name"); System.out.println(shortName); RAMDirectory ramDirectory = new RAMDirectory(); final IndexWriter writer = new IndexWriter(ramDirectory, analyzer, true, MaxFieldLength.UNLIMITED); final Organism organism = new Organism(); processFile(genericDbPath, "ORGANISMS.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); if (parts[1].equals(shortName)) { exportOrganism(writer, parts); populateOrganism(organism, parts); return false; } return true; } }); final Long entrezNamingSourceId = namingSourceIds.get("Entrez Gene ID"); final Map<Long, String> externalIds = new HashMap<Long, String>(); final Map<Long, Long> externalNamingSourceIds = new HashMap<Long, Long>(); final Set<Long> nodes = new HashSet<Long>(); processFile(genericDbPath, "GENES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long organismId = Long.parseLong(parts[5]); if (organismId == organism.getId()) { exportGene(writer, parts); long nodeId = Long.parseLong(parts[4]); nodes.add(nodeId); long namingSourceId = Long.parseLong(parts[3]); if (namingSourceId == entrezNamingSourceId) { externalIds.put(nodeId, parts[1]); externalNamingSourceIds.put(nodeId, namingSourceId); } } return true; } }); final Map<Long, Long> geneDataToNodeIds = new HashMap<Long, Long>(); processFile(genericDbPath, "NODES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long nodeId = Long.parseLong(parts[0]); if (nodes.contains(nodeId)) { exportNode(writer, parts, String.valueOf(organism.getId())); geneDataToNodeIds.put(Long.parseLong(parts[2]), nodeId); } return true; } }); processFile(genericDbPath, "GENE_DATA.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long geneDataId = Long.parseLong(parts[0]); Long nodeId = geneDataToNodeIds.get(geneDataId); if (nodeId != null) { String externalId = externalIds.get(nodeId); long namingSourceId = -1; if (externalId != null) { namingSourceId = externalNamingSourceIds.get(nodeId); } exportGeneData(writer, parts, externalId, namingSourceId); } return true; } }); final Set<Long> groups = new HashSet<Long>(); processFile(genericDbPath, "NETWORK_GROUPS.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long organismId = Long.parseLong(parts[4]); if (organismId == organism.getId()) { exportGroup(writer, parts); groups.add(Long.parseLong(parts[0])); } return true; } }); final Set<Long> metadata = new HashSet<Long>(); final Set<Long> networks = new HashSet<Long>(); processFile(genericDbPath, "NETWORKS.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long groupId = Long.parseLong(parts[5]); long networkId = Long.parseLong(parts[0]); if (groups.contains(groupId) && profile.includesNetwork(parts)) { exportNetwork(writer, parts); long metadataId = Long.parseLong(parts[2]); metadata.add(metadataId); networks.add(networkId); } return true; } }); processFile(genericDbPath, "NETWORK_METADATA.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long metadataId = Long.parseLong(parts[0]); if (metadata.contains(metadataId)) { exportNetworkMetadata(writer, parts); } return true; } }); processFile(genericDbPath, "NETWORK_TAG_ASSOC.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long networkId = Long.parseLong(parts[1]); if (networks.contains(networkId)) { exportNetworkTagAssoc(writer, parts); } return true; } }); final Set<Long> attribute_groups = new HashSet<Long>(); processFile(genericDbPath, "ATTRIBUTE_GROUPS.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long organismId = Long.parseLong(parts[1]); if (organismId == organism.getId()) { exportAttributeGroup(writer, parts); long group_id = Long.parseLong(parts[0]); attribute_groups.add(group_id); } return true; } }); final Set<Long> attributes = new HashSet<Long>(); processFile(genericDbPath, "ATTRIBUTES.txt", new FileHandler() { @Override public boolean process(String line) throws IOException { String[] parts = line.split("\t", -1); long group_id = Long.parseLong(parts[1]); if (attribute_groups.contains(group_id)) { exportAttribute(writer, parts); long attribute_id = Long.parseLong(parts[0]); attributes.add(attribute_id); } return true; } }); writer.close(); String gmOrganismId = organismSection.getEntry("gm_organism_id"); File organismFile = new File(makeIndexPath(String.format("%s", gmOrganismId))); FSDirectory fileDirectory = FSDirectory.open(organismFile); IndexWriter organismWriter = new IndexWriter(fileDirectory, analyzer, true, MaxFieldLength.UNLIMITED); IndexReader reader = IndexReader.open(ramDirectory); organismWriter.addIndexes(new IndexReader[] { reader }); organismWriter.close(); fileDirectory.close(); ramDirectory.close(); Properties properties = new Properties(); properties.put("short_name", shortName); properties.put("common_name", organismSection.getEntry("common_name")); properties.put("organism_id", gmOrganismId); String propertyPath = String.format("%s%smetadata.xml", gmOrganismId, File.separator); FileOutputStream out = new FileOutputStream(makeIndexPath(propertyPath)); try { properties.storeToXML(out, null, "UTF-8"); } finally { out.close(); } } } finally { close(); } }
From source file:org.getopt.luke.Luke.java
License:Apache License
/** * Open indicated index and re-initialize all GUI and plugins. * @param name path to index// ww w . j av a2s. c o m * @param force if true, and the index is locked, unlock it first. If false, and * the index is locked, an error will be reported. * @param ro open in read-only mode, and disallow modifications. */ public void openIndex(String name, boolean force, String dirImpl, boolean ro, boolean ramdir, boolean keepCommits, IndexCommit point, int tiiDivisor) { pName = name; readOnly = ro; removeAll(); File baseFileDir = new File(name); this.baseDir = baseFileDir.toString(); addComponent(this, "/xml/luke.xml", null, null); statmsg = find("statmsg"); if (dir != null) { try { if (ir != null) ir.close(); if (ar != null) ar.close(); } catch (Exception e) { } ; try { if (dir != null) dir.close(); } catch (Exception e) { } ; } ArrayList<Directory> dirs = new ArrayList<Directory>(); Throwable lastException = null; try { Directory d = openDirectory(dirImpl, pName, false); if (IndexWriter.isLocked(d)) { if (!ro) { if (force) { IndexWriter.unlock(d); } else { errorMsg("Index is locked. Try 'Force unlock' when opening."); d.close(); d = null; return; } } } boolean existsSingle = false; // IR.indexExists doesn't report the cause of error try { new SegmentInfos().read(d); existsSingle = true; } catch (Throwable e) { e.printStackTrace(); lastException = e; // } if (!existsSingle) { // try multi File[] files = baseFileDir.listFiles(); for (File f : files) { if (f.isFile()) { continue; } Directory d1 = openDirectory(dirImpl, f.toString(), false); if (IndexWriter.isLocked(d1)) { if (!ro) { if (force) { IndexWriter.unlock(d1); } else { errorMsg("Index is locked. Try 'Force unlock' when opening."); d1.close(); d1 = null; return; } } } existsSingle = false; try { new SegmentInfos().read(d1); existsSingle = true; } catch (Throwable e) { lastException = e; e.printStackTrace(); } if (!existsSingle) { d1.close(); continue; } dirs.add(d1); } } else { dirs.add(d); } if (dirs.size() == 0) { if (lastException != null) { errorMsg( "Invalid directory at the location, check console for more information. Last exception:\n" + lastException.toString()); } else { errorMsg( "No valid directory at the location, try another location.\nCheck console for other possible causes."); } return; } if (ramdir) { showStatus("Loading index into RAMDirectory ..."); Directory dir1 = new RAMDirectory(); IndexWriterConfig cfg = new IndexWriterConfig(LV, new WhitespaceAnalyzer(LV)); IndexWriter iw1 = new IndexWriter(dir1, cfg); iw1.addIndexes((Directory[]) dirs.toArray(new Directory[dirs.size()])); iw1.close(); showStatus("RAMDirectory loading done!"); if (dir != null) dir.close(); dirs.clear(); dirs.add(dir1); } IndexDeletionPolicy policy; if (keepCommits) { policy = new KeepAllIndexDeletionPolicy(); } else { policy = new KeepLastIndexDeletionPolicy(); } ArrayList<DirectoryReader> readers = new ArrayList<DirectoryReader>(); for (Directory dd : dirs) { DirectoryReader reader; if (tiiDivisor > 1) { reader = DirectoryReader.open(dd, tiiDivisor); } else { reader = DirectoryReader.open(dd); } readers.add(reader); } if (readers.size() == 1) { ir = readers.get(0); dir = ((DirectoryReader) ir).directory(); } else { ir = new MultiReader((IndexReader[]) readers.toArray(new IndexReader[readers.size()])); } is = new IndexSearcher(ir); // XXX slowAccess = false; initOverview(); initPlugins(); showStatus("Index successfully open."); } catch (Exception e) { e.printStackTrace(); errorMsg(e.getMessage()); return; } }