List of usage examples for org.apache.lucene.index IndexWriter getDirectory
public Directory getDirectory()
From source file:it.doqui.index.ecmengine.business.personalization.multirepository.index.lucene.IndexInfoProxyServiceNoTC.java
License:Open Source License
/** * Metodo di utilit privato che inserisce un singolo documento lucene nell'indice su cui aperto il writer * * @param doc documento lucene da inserire * @param writer da utilizzare per inserire il documento * @throws LuceneIndexException//from www . ja v a 2 s . c o m * @throws IOException */ private synchronized void insert(Document doc, IndexWriter writer) throws LuceneIndexException, IOException { if (s_logger.isDebugEnabled()) { s_logger.debug("[IndexInfoProxyServiceNoTC::insert] (" + this + ") Writing doc: " + doc.getField("ID") + " [To directory: " + writer.getDirectory() + "]"); //s_logger.debug("[IndexInfoProxyServiceNoTC::insert] " + "Document: ["+doc.toString().replaceAll(" ", "\r\n")+"]"); } writer.addDocument(doc); }
From source file:net.tooan.ynpay.third.mongodb.lucene.BuguIndex.java
License:Apache License
public void close() { try {// w w w. ja v a 2s .co m if (executor != null) { executor.shutdown(); executor.awaitTermination(5, TimeUnit.SECONDS); } if (scheduler != null) { scheduler.shutdown(); scheduler.awaitTermination(5, TimeUnit.SECONDS); } if (clusterConfig != null) { clusterConfig.invalidate(); } } catch (InterruptedException ex) { logger.error(ex.getMessage(), ex); } Map<String, IndexWriter> map = IndexWriterCache.getInstance().getAll(); for (IndexWriter writer : map.values()) { if (writer != null) { Directory dir = writer.getDirectory(); try { writer.commit(); writer.close(true); } catch (CorruptIndexException ex) { logger.error("Can not commit and close the lucene index", ex); } catch (IOException ex) { logger.error("Can not commit and close the lucene index", ex); } finally { try { if (dir != null && IndexWriter.isLocked(dir)) { IndexWriter.unlock(dir); } } catch (IOException ex) { logger.error("Can not unlock the lucene index", ex); } } } } }
From source file:net.ymate.platform.module.search.support.IndexHelper.java
License:Apache License
public void release() { Searchs.__doStopSafed(__scheduler);//from w w w . jav a 2s .c om // _LOG.debug("Release IndexHelper"); for (IndexWriter writer : Searchs.__WRITER_CACHES.values()) { if (writer != null) { Directory dir = writer.getDirectory(); try { writer.commit(); writer.close(true); } catch (Exception ex) { _LOG.error("Commit And Close IndexWriter Error", RuntimeUtils.unwrapThrow(ex)); } finally { try { if (dir != null && IndexWriter.isLocked(dir)) { IndexWriter.unlock(dir); } } catch (IOException ex) { _LOG.error("Unlock IndexWriter", RuntimeUtils.unwrapThrow(ex)); } } } } }
From source file:org.apache.geode.cache.lucene.internal.PartitionedRepositoryManagerJUnitTest.java
License:Apache License
protected void checkRepository(IndexRepositoryImpl repo0, int bucketId) { IndexWriter writer0 = repo0.getWriter(); RegionDirectory dir0 = (RegionDirectory) writer0.getDirectory(); assertEquals(fileBuckets.get(bucketId), dir0.getFileSystem().getFileRegion()); assertEquals(chunkBuckets.get(bucketId), dir0.getFileSystem().getChunkRegion()); assertEquals(serializer, repo0.getSerializer()); }
From source file:org.apache.geode.cache.lucene.internal.RawLuceneRepositoryManagerJUnitTest.java
License:Apache License
@Override protected void checkRepository(IndexRepositoryImpl repo0, int bucketId) { IndexWriter writer0 = repo0.getWriter(); Directory dir0 = writer0.getDirectory(); assertTrue(dir0 instanceof NIOFSDirectory); }
From source file:org.apache.jena.larq.IndexBuilderBase.java
License:Apache License
/** Manage a Lucene index that has already been created */ public IndexBuilderBase(IndexWriter existingWriter) { dir = existingWriter.getDirectory(); indexWriter = existingWriter;/*from ww w . j a va 2s .co m*/ }
From source file:org.apache.maven.index.updater.IndexDataReader.java
License:Apache License
public IndexDataReadResult readIndex(IndexWriter w, IndexingContext context) throws IOException { long timestamp = readHeader(); Date date = null;/*from w ww. ja v a2 s. c om*/ if (timestamp != -1) { date = new Date(timestamp); IndexUtils.updateTimestamp(w.getDirectory(), date); } int n = 0; Document doc; Set<String> rootGroups = new LinkedHashSet<>(); Set<String> allGroups = new LinkedHashSet<>(); while ((doc = readDocument()) != null) { ArtifactInfo ai = IndexUtils.constructArtifactInfo(doc, context); if (ai != null) { w.addDocument(IndexUtils.updateDocument(doc, context, false, ai)); rootGroups.add(ai.getRootGroup()); allGroups.add(ai.getGroupId()); } else { w.addDocument(doc); } n++; } w.commit(); IndexDataReadResult result = new IndexDataReadResult(); result.setDocumentCount(n); result.setTimestamp(date); result.setRootGroups(rootGroups); result.setAllGroups(allGroups); return result; }
From source file:org.apache.solr.cloud.hdfs.HdfsWriteToMultipleCollectionsTest.java
License:Apache License
@Override public void doTest() throws Exception { int docCount = random().nextInt(1313) + 1; int cnt = random().nextInt(4) + 1; for (int i = 0; i < cnt; i++) { createCollection(ACOLLECTION + i, 2, 2, 9); }// w ww. j av a2s . c o m for (int i = 0; i < cnt; i++) { waitForRecoveriesToFinish(ACOLLECTION + i, false); } List<CloudSolrServer> cloudServers = new ArrayList<>(); List<StopableIndexingThread> threads = new ArrayList<>(); for (int i = 0; i < cnt; i++) { CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress()); server.setDefaultCollection(ACOLLECTION + i); cloudServers.add(server); StopableIndexingThread indexThread = new StopableIndexingThread(null, server, "1", true, docCount); threads.add(indexThread); indexThread.start(); } int addCnt = 0; for (StopableIndexingThread thread : threads) { thread.join(); addCnt += thread.getNumAdds() - thread.getNumDeletes(); } long collectionsCount = 0; for (CloudSolrServer server : cloudServers) { server.commit(); collectionsCount += server.query(new SolrQuery("*:*")).getResults().getNumFound(); } for (CloudSolrServer server : cloudServers) { server.shutdown(); } assertEquals(addCnt, collectionsCount); BlockCache lastBlockCache = null; // assert that we are using the block directory and that write and read caching are being used for (JettySolrRunner jetty : jettys) { CoreContainer cores = ((SolrDispatchFilter) jetty.getDispatchFilter().getFilter()).getCores(); Collection<SolrCore> solrCores = cores.getCores(); for (SolrCore core : solrCores) { if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) { assertTrue(core.getDirectoryFactory() instanceof HdfsDirectoryFactory); RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core); try { IndexWriter iw = iwRef.get(); NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory(); BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate(); assertTrue(blockDirectory.isBlockCacheReadEnabled()); assertTrue(blockDirectory.isBlockCacheWriteEnabled()); Cache cache = blockDirectory.getCache(); // we know its a BlockDirectoryCache, but future proof assertTrue(cache instanceof BlockDirectoryCache); BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache(); if (lastBlockCache != null) { if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) { assertEquals(lastBlockCache, blockCache); } else { assertNotSame(lastBlockCache, blockCache); } } lastBlockCache = blockCache; } finally { iwRef.decref(); } } } } }
From source file:org.apache.solr.core.SolrCore.java
License:Apache License
/** * Creates a new core and register it in the list of cores. * If a core with the same name already exists, it will be stopped and replaced by this one. *@param dataDir the index directory//from w w w.j a va 2s.com *@param config a solr config instance *@param schema a solr schema instance * *@since solr 1.3 */ public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd, UpdateHandler updateHandler, IndexDeletionPolicyWrapper delPolicy, SolrCore prev) { coreDescriptor = cd; this.setName(name); resourceLoader = config.getResourceLoader(); this.solrConfig = config; if (updateHandler == null) { initDirectoryFactory(); } if (dataDir == null) { if (cd.usingDefaultDataDir()) dataDir = config.getDataDir(); if (dataDir == null) { try { dataDir = cd.getDataDir(); if (!directoryFactory.isAbsolute(dataDir)) { dataDir = directoryFactory.getDataHome(cd); } } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e); } } } dataDir = SolrResourceLoader.normalizeDir(dataDir); log.info(logid + "Opening new SolrCore at " + resourceLoader.getInstanceDir() + ", dataDir=" + dataDir); if (null != cd && null != cd.getCloudDescriptor()) { // we are evidently running in cloud mode. // // In cloud mode, version field is required for correct consistency // ideally this check would be more fine grained, and individual features // would assert it when they initialize, but DistributedUpdateProcessor // is currently a big ball of wax that does more then just distributing // updates (ie: partial document updates), so it needs to work in no cloud // mode as well, and can't assert version field support on init. try { VersionInfo.getAndCheckVersionField(schema); } catch (SolrException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Schema will not work with SolrCloud mode: " + e.getMessage(), e); } } //Initialize JMX if (config.jmxConfig.enabled) { infoRegistry = new JmxMonitoredMap<String, SolrInfoMBean>(name, String.valueOf(this.hashCode()), config.jmxConfig); } else { log.info("JMX monitoring not detected for core: " + name); infoRegistry = new ConcurrentHashMap<String, SolrInfoMBean>(); } infoRegistry.put("fieldCache", new SolrFieldCacheMBean()); if (schema == null) { schema = IndexSchemaFactory.buildIndexSchema(IndexSchema.DEFAULT_SCHEMA_FILE, config); } this.schema = schema; final SimilarityFactory similarityFactory = schema.getSimilarityFactory(); if (similarityFactory instanceof SolrCoreAware) { // Similarity needs SolrCore before inform() is called on all registered SolrCoreAware listeners below ((SolrCoreAware) similarityFactory).inform(this); } this.dataDir = dataDir; this.startTime = System.currentTimeMillis(); this.maxWarmingSearchers = config.maxWarmingSearchers; booleanQueryMaxClauseCount(); final CountDownLatch latch = new CountDownLatch(1); try { initListeners(); if (delPolicy == null) { initDeletionPolicy(); } else { this.solrDelPolicy = delPolicy; } this.codec = initCodec(solrConfig, schema); if (updateHandler == null) { solrCoreState = new DefaultSolrCoreState(getDirectoryFactory()); } else { solrCoreState = updateHandler.getSolrCoreState(); directoryFactory = solrCoreState.getDirectoryFactory(); this.isReloaded = true; } initIndex(prev != null); initWriters(); initQParsers(); initValueSourceParsers(); initTransformerFactories(); this.searchComponents = Collections.unmodifiableMap(loadSearchComponents()); // Processors initialized before the handlers updateProcessorChains = loadUpdateProcessorChains(); reqHandlers = new RequestHandlers(this); reqHandlers.initHandlersFromConfig(solrConfig); // Handle things that should eventually go away initDeprecatedSupport(); // cause the executor to stall so firstSearcher events won't fire // until after inform() has been called for all components. // searchExecutor must be single-threaded for this to work searcherExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { latch.await(); return null; } }); // use the (old) writer to open the first searcher RefCounted<IndexWriter> iwRef = null; if (prev != null) { iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null); if (iwRef != null) { final IndexWriter iw = iwRef.get(); final SolrCore core = this; newReaderCreator = new Callable<DirectoryReader>() { // this is used during a core reload @Override public DirectoryReader call() throws Exception { if (getSolrConfig().nrtMode) { // if in NRT mode, need to open from the previous writer return indexReaderFactory.newReader(iw, core); } else { // if not NRT, need to create a new reader from the directory return indexReaderFactory.newReader(iw.getDirectory(), core); } } }; } } String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className; if (updateHandler == null) { this.updateHandler = createUpdateHandler( updateHandlerClass == null ? DirectUpdateHandler2.class.getName() : updateHandlerClass); } else { this.updateHandler = createUpdateHandler( updateHandlerClass == null ? DirectUpdateHandler2.class.getName() : updateHandlerClass, updateHandler); } infoRegistry.put("updateHandler", this.updateHandler); try { getSearcher(false, false, null, true); } finally { newReaderCreator = null; if (iwRef != null) iwRef.decref(); } // Finally tell anyone who wants to know resourceLoader.inform(resourceLoader); resourceLoader.inform(this); // last call before the latch is released. } catch (Throwable e) { latch.countDown();//release the latch, otherwise we block trying to do the close. This should be fine, since counting down on a latch of 0 is still fine //close down the searcher and any other resources, if it exists, as this is not recoverable close(); throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e.getMessage(), e); } finally { // allow firstSearcher events to fire and make sure it is released latch.countDown(); } infoRegistry.put("core", this); // register any SolrInfoMBeans SolrResourceLoader initialized // // this must happen after the latch is released, because a JMX server impl may // choose to block on registering until properties can be fetched from an MBean, // and a SolrCoreAware MBean may have properties that depend on getting a Searcher // from the core. resourceLoader.inform(infoRegistry); CoreContainer cc = cd.getCoreContainer(); if (cc != null && cc.isZooKeeperAware() && Slice.CONSTRUCTION.equals(cd.getCloudDescriptor().getShardState())) { // set update log to buffer before publishing the core getUpdateHandler().getUpdateLog().bufferUpdates(); cd.getCloudDescriptor().setShardState(null); cd.getCloudDescriptor().setShardRange(null); cd.getCloudDescriptor().setShardParent(null); } // For debugging // numOpens.incrementAndGet(); // openHandles.put(this, new RuntimeException("unclosed core - name:" + getName() + " refs: " + refCount.get())); }
From source file:org.apache.uima.lucas.indexer.IndexWriterProviderImplTest.java
License:Apache License
@Test public void testLoadData() throws IOException, ResourceInitializationException { expect(dataResource.getInputStream()).andReturn(propertiesInputStream); replay(dataResource);//from w ww.j a v a 2s . c o m indexWriterProviderImpl.load(dataResource); IndexWriter indexWriter = indexWriterProviderImpl.getIndexWriter(); FSDirectory fsDirectory = (FSDirectory) indexWriter.getDirectory(); String hostname = getHostName(); String pid = getPID(); String writerPath = fsDirectory.getFile().getAbsolutePath(); assertTrue(writerPath.contains(TEST_INDEX + "-" + hostname + "-" + pid)); assertEquals(513, indexWriter.getRAMBufferSizeMB(), 0.5); assertEquals(9999, indexWriter.getMaxFieldLength(), 0.5); String random = writerPath.substring(writerPath.lastIndexOf("-") + 1); assertTrue(random.matches("\\d+")); }