List of usage examples for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME
String WRITE_LOCK_NAME
To view the source code for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME.
Click Source Link
From source file:org.compass.core.lucene.engine.manager.DefaultLuceneSearchEngineIndexManager.java
License:Apache License
protected void doOperate(final IndexOperationCallback callback) throws SearchEngineException { // first aquire write lock for all the sub-indexes String[] subIndexes = searchEngineStore.getSubIndexes(); if (callback instanceof IndexOperationPlan) { IndexOperationPlan plan = (IndexOperationPlan) callback; subIndexes = searchEngineStore.polyCalcSubIndexes(plan.getSubIndexes(), plan.getAliases(), plan.getTypes());/*from w w w. j av a 2s .com*/ } final Lock[] writerLocks = new Lock[subIndexes.length]; try { if (log.isDebugEnabled()) { log.debug("Trying to obtain write locks"); } final String[] finalSubIndexes = subIndexes; searchEngineFactory.getTransactionContext().execute(new TransactionContextCallback<Object>() { public Object doInTransaction() throws CompassException { for (int i = 0; i < finalSubIndexes.length; i++) { Directory dir = getDirectory(finalSubIndexes[i]); writerLocks[i] = dir.makeLock(IndexWriter.WRITE_LOCK_NAME); try { writerLocks[i].obtain(luceneSettings.getTransactionLockTimout()); } catch (IOException e) { throw new SearchEngineException("Failed to retrieve transaction locks", e); } } return null; } }); if (log.isDebugEnabled()) { log.debug("Obtained write locks"); } if (log.isDebugEnabled()) { log.debug("Calling callback first step"); } // call the first step boolean continueToSecondStep = callback.firstStep(); if (!continueToSecondStep) { return; } // perform the replace operation // TODO here we need to make sure that no read operations will happen as well // tell eveybody that are using the index, to clear the cache clearCache(); notifyAllToClearCache(); if (waitForCacheInvalidationBeforeSecondStep != 0 && luceneSettings.isWaitForCacheInvalidationOnIndexOperation()) { // now wait for the cache invalidation try { if (log.isDebugEnabled()) { log.debug("Waiting [" + waitForCacheInvalidationBeforeSecondStep + "ms] for global cache invalidation"); } Thread.sleep(waitForCacheInvalidationBeforeSecondStep); } catch (InterruptedException e) { log.debug("Interrupted while waiting for cache invalidation", e); throw new SearchEngineException("Interrupted while waiting for cache invalidation", e); } } if (log.isDebugEnabled()) { log.debug("Calling callback second step"); } // call the second step callback.secondStep(); } finally { searchEngineFactory.getTransactionContext().execute(new TransactionContextCallback<Object>() { public Object doInTransaction() throws CompassException { LuceneUtils.clearLocks(writerLocks); return null; } }); } }
From source file:org.deeplearning4j.text.invertedindex.LuceneInvertedIndex.java
License:Apache License
private IndexWriter tryCreateWriter(IndexWriterConfig iwc) { try {/* w ww.ja v a 2s.c om*/ dir.close(); dir = null; FileUtils.deleteDirectory(new File(indexPath)); ensureDirExists(); if (lockFactory == null) lockFactory = new NativeFSLockFactory(new File(indexPath)); lockFactory.clearLock(IndexWriter.WRITE_LOCK_NAME); return new IndexWriter(dir, iwc); } catch (Exception e) { String id = UUID.randomUUID().toString(); indexPath = id; log.warn("Setting index path to " + id); log.warn("Couldn't create index ", e); return null; } }
From source file:org.eclipse.dltk.internal.core.index.lucene.IndexContainer.java
License:Open Source License
private void purgeLocks(Path path) { /*/*from w w w.ja va 2 s.c o m*/ * Checks if any write locks exist (might be not removed if JVM crashed * or was terminated abnormally) and simply deletes them. */ Path writeLockPath = path.resolve(IndexWriter.WRITE_LOCK_NAME); if (writeLockPath.toFile().exists()) { try { Files.delete(writeLockPath); } catch (IOException e) { Logger.logException(e); } } }
From source file:org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityTests.java
License:Apache License
void copyIndex(final Path src, final String indexName, final Path... dests) throws IOException { for (Path dest : dests) { Path indexDir = dest.resolve(indexName); assertFalse(Files.exists(indexDir)); Files.createDirectories(indexDir); }//w w w. jav a 2 s. com Files.walkFileTree(src, new SimpleFileVisitor<Path>() { @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path relativeDir = src.relativize(dir); for (Path dest : dests) { Path destDir = dest.resolve(indexName).resolve(relativeDir); Files.createDirectories(destDir); } return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { // skip lock file, we don't need it logger.trace("Skipping lock file: " + file.toString()); return FileVisitResult.CONTINUE; } Path relativeFile = src.relativize(file); Path destFile = dests[randomInt(dests.length - 1)].resolve(indexName).resolve(relativeFile); logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString()); Files.move(file, destFile); assertFalse(Files.exists(file)); assertTrue(Files.exists(destFile)); return FileVisitResult.CONTINUE; } }); }
From source file:org.elasticsearch.common.util.MultiDataPathUpgrader.java
License:Apache License
/** * Upgrades the given shard Id from multiple shard paths into the given target path. * * @see #pickShardPath(org.elasticsearch.index.shard.ShardId) *//*from w w w. j a v a 2 s .c o m*/ public void upgrade(ShardId shard, ShardPath targetPath) throws IOException { final Path[] paths = nodeEnvironment.availableShardPaths(shard); // custom data path doesn't need upgrading if (isTargetPathConfigured(paths, targetPath) == false) { throw new IllegalArgumentException("shard path must be one of the shards data paths"); } assert needsUpgrading(shard) : "Should not upgrade a path that needs no upgrading"; logger.info("{} upgrading multi data dir to {}", shard, targetPath.getDataPath()); final ShardStateMetaData loaded = ShardStateMetaData.FORMAT.loadLatestState(logger, paths); if (loaded == null) { throw new IllegalStateException(shard + " no shard state found in any of: " + Arrays.toString(paths) + " please check and remove them if possible"); } logger.info("{} loaded shard state {}", shard, loaded); ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath()); Files.createDirectories(targetPath.resolveIndex()); try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) { try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths); } catch (LockObtainFailedException ex) { throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex); } } upgradeFiles(shard, targetPath, targetPath.resolveTranslog(), ShardPath.TRANSLOG_FOLDER_NAME, paths); logger.info("{} wipe upgraded directories", shard); for (Path path : paths) { if (path.equals(targetPath.getShardStatePath()) == false) { logger.info("{} wipe shard directories: [{}]", shard, path); IOUtils.rm(path); } } if (FileSystemUtils.files(targetPath.resolveIndex()).length == 0) { throw new IllegalStateException("index folder [" + targetPath.resolveIndex() + "] is empty"); } if (FileSystemUtils.files(targetPath.resolveTranslog()).length == 0) { throw new IllegalStateException("translog folder [" + targetPath.resolveTranslog() + "] is empty"); } }
From source file:org.elasticsearch.common.util.MultiDataPathUpgrader.java
License:Apache License
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException { List<Path> movedFiles = new ArrayList<>(); for (Path path : paths) { if (path.equals(targetPath.getDataPath()) == false) { final Path sourceDir = path.resolve(folderName); if (Files.exists(sourceDir)) { logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir); try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) { Files.createDirectories(targetDir); for (Path file : stream) { if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) { continue; // skip write.lock }//from w ww .j a va2 s . c o m logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file)); final Path targetFile = targetDir.resolve(file.getFileName()); /* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that in the worst case the file exists twice but is never lost or half written.*/ final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString()); Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING); Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off Files.delete(file); movedFiles.add(targetFile); } } } } } if (movedFiles.isEmpty() == false) { // fsync later it might be on disk already logger.info("{} fsync files", shard); for (Path moved : movedFiles) { logger.info("{} syncing [{}]", shard, moved.getFileName()); IOUtils.fsync(moved, false); } logger.info("{} syncing directory [{}]", shard, targetDir); IOUtils.fsync(targetDir, true); } }
From source file:org.elasticsearch.index.store.StoreTest.java
License:Apache License
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException { for (String file : store.directory().listAll()) { if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.SEGMENTS_GEN.equals(file) && !Store.isChecksum(file)) { assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); } else {//ww w .ja v a 2s. c o m assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); } } }
From source file:org.elasticsearch.index.store.StoreTests.java
License:Apache License
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException { for (String file : store.directory().listAll()) { if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && !Store.isChecksum(file) && file.startsWith("extra") == false) { assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); } else {//from w ww. java 2 s .c om assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); } } }
From source file:org.elasticsearch.index.translog.TruncateTranslogCommand.java
License:Apache License
@Override protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception { boolean batch = options.has(batchMode); Path translogPath = getTranslogPath(options); Path idxLocation = translogPath.getParent().resolve("index"); if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { throw new ElasticsearchException( "translog directory [" + translogPath + "], must exist and be a directory"); }// w w w . j av a 2 s .c o m if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) { throw new ElasticsearchException( "unable to find a shard at [" + idxLocation + "], which must exist and be a directory"); } // Hold the lock open for the duration of the tool running try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { Set<Path> translogFiles; try { terminal.println("Checking existing translog files"); translogFiles = filesInDirectory(translogPath); } catch (IOException e) { terminal.println("encountered IOException while listing directory, aborting..."); throw new ElasticsearchException("failed to find existing translog files", e); } // Warn about ES being stopped and files being deleted warnAboutDeletingFiles(terminal, translogFiles, batch); List<IndexCommit> commits; try { terminal.println( "Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]"); commits = DirectoryReader.listCommits(dir); } catch (IndexNotFoundException infe) { throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe); } // Retrieve the generation and UUID from the existing data Map<String, String> commitData = commits.get(commits.size() - 1).getUserData(); String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY); String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY); if (translogGeneration == null || translogUUID == null) { throw new ElasticsearchException( "shard must have a valid translog generation and UUID but got: [{}] and: [{}]", translogGeneration, translogUUID); } terminal.println("Translog Generation: " + translogGeneration); terminal.println("Translog UUID : " + translogUUID); Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME); Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME); Path tempEmptyTranslog = translogPath.resolve( "temp-" + Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); Path realEmptyTranslog = translogPath .resolve(Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX); // Write empty checkpoint and translog to empty files long gen = Long.parseLong(translogGeneration); int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID); writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen); terminal.println("Removing existing translog files"); IOUtils.rm(translogFiles.toArray(new Path[] {})); terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]"); Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE); terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]"); Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE); // Fsync the translog directory after rename IOUtils.fsync(translogPath, true); } catch (LockObtainFailedException lofe) { throw new ElasticsearchException( "Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?"); } terminal.println("Done."); }
From source file:org.elasticsearch.index.translog.TruncateTranslogIT.java
License:Apache License
public void testCorruptTranslogTruncation() throws Exception { internalCluster().startNodesAsync(1, Settings.EMPTY).get(); assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1) .put("index.number_of_replicas", 0).put("index.refresh_interval", "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog ));/*from w w w . jav a2 s. com*/ ensureYellow(); // Index some documents int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar"); } disableTranslogFlush("test"); indexRandom(false, false, false, Arrays.asList(builders)); Set<Path> translogDirs = getTranslogDirs("test"); TruncateTranslogCommand ttc = new TruncateTranslogCommand(); MockTerminal t = new MockTerminal(); OptionParser parser = ttc.getParser(); for (Path translogDir : translogDirs) { OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b"); // Try running it before the shard is closed, it should flip out because it can't acquire the lock try { logger.info("--> running truncate while index is open on [{}]", translogDir.toAbsolutePath()); ttc.execute(t, options, new HashMap<String, String>()); fail("expected the truncate command to fail not being able to acquire the lock"); } catch (Exception e) { assertThat(e.getMessage(), containsString("Failed to lock shard's directory")); } } // Corrupt the translog file(s) logger.info("--> corrupting translog"); corruptRandomTranslogFiles("test"); // Restart the single node logger.info("--> restarting node"); internalCluster().fullRestart(); client().admin().cluster().prepareHealth().setWaitForYellowStatus() .setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get(); try { client().prepareSearch("test").setQuery(matchAllQuery()).get(); fail("all shards should be failed due to a corrupted translog"); } catch (SearchPhaseExecutionException e) { // Good, all shards should be failed because there is only a // single shard and its translog is corrupt } // Close the index so we can actually truncate the translog logger.info("--> closing 'test' index"); client().admin().indices().prepareClose("test").get(); for (Path translogDir : translogDirs) { final Path idxLocation = translogDir.getParent().resolve("index"); assertBusy(() -> { logger.info("--> checking that lock has been released for {}", idxLocation); try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { // Great, do nothing, we just wanted to obtain the lock } catch (LockObtainFailedException lofe) { logger.info("--> failed acquiring lock for {}", idxLocation); fail("still waiting for lock release at [" + idxLocation + "]"); } catch (IOException ioe) { fail("Got an IOException: " + ioe); } }); OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b"); logger.info("--> running truncate translog command for [{}]", translogDir.toAbsolutePath()); ttc.execute(t, options, new HashMap<String, String>()); logger.info("--> output:\n{}", t.getOutput()); } // Re-open index logger.info("--> opening 'test' index"); client().admin().indices().prepareOpen("test").get(); ensureYellow("test"); // Run a search and make sure it succeeds SearchResponse resp = client().prepareSearch("test").setQuery(matchAllQuery()).get(); ElasticsearchAssertions.assertNoFailures(resp); }