Example usage for org.apache.lucene.store NativeFSLockFactory INSTANCE

List of usage examples for org.apache.lucene.store NativeFSLockFactory INSTANCE

Introduction

In this page you can find the example usage for org.apache.lucene.store NativeFSLockFactory INSTANCE.

Prototype

NativeFSLockFactory INSTANCE

To view the source code for org.apache.lucene.store NativeFSLockFactory INSTANCE.

Click Source Link

Document

Singleton instance

Usage

From source file:com.b2international.index.lucene.Directories.java

License:Apache License

/**
 * Creates an FSDirectory instance, trying to pick the best implementation given the current environment. The directory returned uses the
 * {@link NativeFSLockFactory}./*from   ww w.j  a va  2s.  c  om*/
 *
 * <p>
 * Currently this returns {@link MMapDirectory} for most Solaris, Mac OS X and Windows 64-bit JREs, {@link NIOFSDirectory} for other non-Windows
 * JREs, and {@link SimpleFSDirectory} for other JREs on Windows. It is highly recommended that you consult the implementation's documentation for
 * your platform before using this method.
 *
 * <p>
 * <b>NOTE</b>: this method may suddenly change which implementation is returned from release to release, in the event that higher performance
 * defaults become possible; if the precise implementation is important to your application, please instantiate it directly, instead. For optimal
 * performance you should consider using {@link MMapDirectory} on 64 bit JVMs.
 *
 */
public static FSDirectory openFile(final Path path) throws IOException {
    return openFile(path, NativeFSLockFactory.INSTANCE);
}

From source file:org.elasticsearch.index.store.FsDirectoryService.java

License:Apache License

public static LockFactory buildLockFactory(Settings indexSettings) {
    String fsLock = indexSettings.get("index.store.fs.lock",
            indexSettings.get("index.store.fs.fs_lock", "native"));
    LockFactory lockFactory;/*from w  w  w .  jav a  2  s .com*/
    if (fsLock.equals("native")) {
        lockFactory = NativeFSLockFactory.INSTANCE;
    } else if (fsLock.equals("simple")) {
        lockFactory = SimpleFSLockFactory.INSTANCE;
    } else {
        throw new IllegalArgumentException("unrecognized fs_lock \"" + fsLock + "\": must be native or simple");
    }
    return lockFactory;
}

From source file:org.elasticsearch.index.translog.TruncateTranslogCommand.java

License:Apache License

@Override
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
    boolean batch = options.has(batchMode);

    Path translogPath = getTranslogPath(options);
    Path idxLocation = translogPath.getParent().resolve("index");

    if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) {
        throw new ElasticsearchException(
                "translog directory [" + translogPath + "], must exist and be a directory");
    }// w  w  w  . ja  va2s.c  om

    if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) {
        throw new ElasticsearchException(
                "unable to find a shard at [" + idxLocation + "], which must exist and be a directory");
    }

    // Hold the lock open for the duration of the tool running
    try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
            Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        Set<Path> translogFiles;
        try {
            terminal.println("Checking existing translog files");
            translogFiles = filesInDirectory(translogPath);
        } catch (IOException e) {
            terminal.println("encountered IOException while listing directory, aborting...");
            throw new ElasticsearchException("failed to find existing translog files", e);
        }

        // Warn about ES being stopped and files being deleted
        warnAboutDeletingFiles(terminal, translogFiles, batch);

        List<IndexCommit> commits;
        try {
            terminal.println(
                    "Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]");
            commits = DirectoryReader.listCommits(dir);
        } catch (IndexNotFoundException infe) {
            throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe);
        }

        // Retrieve the generation and UUID from the existing data
        Map<String, String> commitData = commits.get(commits.size() - 1).getUserData();
        String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY);
        String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
        if (translogGeneration == null || translogUUID == null) {
            throw new ElasticsearchException(
                    "shard must have a valid translog generation and UUID but got: [{}] and: [{}]",
                    translogGeneration, translogUUID);
        }
        terminal.println("Translog Generation: " + translogGeneration);
        terminal.println("Translog UUID      : " + translogUUID);

        Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME);
        Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME);
        Path tempEmptyTranslog = translogPath.resolve(
                "temp-" + Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
        Path realEmptyTranslog = translogPath
                .resolve(Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);

        // Write empty checkpoint and translog to empty files
        long gen = Long.parseLong(translogGeneration);
        int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID);
        writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen);

        terminal.println("Removing existing translog files");
        IOUtils.rm(translogFiles.toArray(new Path[] {}));

        terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]");
        Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE);
        terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]");
        Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE);

        // Fsync the translog directory after rename
        IOUtils.fsync(translogPath, true);

    } catch (LockObtainFailedException lofe) {
        throw new ElasticsearchException(
                "Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?");
    }

    terminal.println("Done.");
}

From source file:org.elasticsearch.index.translog.TruncateTranslogIT.java

License:Apache License

public void testCorruptTranslogTruncation() throws Exception {
    internalCluster().startNodesAsync(1, Settings.EMPTY).get();

    assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)
            .put("index.number_of_replicas", 0).put("index.refresh_interval", "-1")
            .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog
    ));/*from ww  w.j av  a  2 s . c om*/
    ensureYellow();

    // Index some documents
    int numDocs = scaledRandomIntBetween(100, 1000);
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar");
    }
    disableTranslogFlush("test");
    indexRandom(false, false, false, Arrays.asList(builders));
    Set<Path> translogDirs = getTranslogDirs("test");

    TruncateTranslogCommand ttc = new TruncateTranslogCommand();
    MockTerminal t = new MockTerminal();
    OptionParser parser = ttc.getParser();

    for (Path translogDir : translogDirs) {
        OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b");
        // Try running it before the shard is closed, it should flip out because it can't acquire the lock
        try {
            logger.info("--> running truncate while index is open on [{}]", translogDir.toAbsolutePath());
            ttc.execute(t, options, new HashMap<String, String>());
            fail("expected the truncate command to fail not being able to acquire the lock");
        } catch (Exception e) {
            assertThat(e.getMessage(), containsString("Failed to lock shard's directory"));
        }
    }

    // Corrupt the translog file(s)
    logger.info("--> corrupting translog");
    corruptRandomTranslogFiles("test");

    // Restart the single node
    logger.info("--> restarting node");
    internalCluster().fullRestart();
    client().admin().cluster().prepareHealth().setWaitForYellowStatus()
            .setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();

    try {
        client().prepareSearch("test").setQuery(matchAllQuery()).get();
        fail("all shards should be failed due to a corrupted translog");
    } catch (SearchPhaseExecutionException e) {
        // Good, all shards should be failed because there is only a
        // single shard and its translog is corrupt
    }

    // Close the index so we can actually truncate the translog
    logger.info("--> closing 'test' index");
    client().admin().indices().prepareClose("test").get();

    for (Path translogDir : translogDirs) {
        final Path idxLocation = translogDir.getParent().resolve("index");
        assertBusy(() -> {
            logger.info("--> checking that lock has been released for {}", idxLocation);
            try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
                    Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
                // Great, do nothing, we just wanted to obtain the lock
            } catch (LockObtainFailedException lofe) {
                logger.info("--> failed acquiring lock for {}", idxLocation);
                fail("still waiting for lock release at [" + idxLocation + "]");
            } catch (IOException ioe) {
                fail("Got an IOException: " + ioe);
            }
        });

        OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString(), "-b");
        logger.info("--> running truncate translog command for [{}]", translogDir.toAbsolutePath());
        ttc.execute(t, options, new HashMap<String, String>());
        logger.info("--> output:\n{}", t.getOutput());
    }

    // Re-open index
    logger.info("--> opening 'test' index");
    client().admin().indices().prepareOpen("test").get();
    ensureYellow("test");

    // Run a search and make sure it succeeds
    SearchResponse resp = client().prepareSearch("test").setQuery(matchAllQuery()).get();
    ElasticsearchAssertions.assertNoFailures(resp);
}

From source file:org.hibernate.search.store.impl.DefaultLockFactoryCreator.java

License:LGPL

@Override
public LockFactory createLockFactory(File indexDir, Properties dirConfiguration) {
    //For FS-based indexes default to "native", default to "single" otherwise.
    String defaultStrategy = indexDir == null ? "single" : "native";
    String lockFactoryName = dirConfiguration.getProperty(Environment.LOCKING_STRATEGY, defaultStrategy);
    if ("simple".equals(lockFactoryName)) {
        if (indexDir == null) {
            throw LOG.indexBasePathRequiredForLockingStrategy("simple");
        }/*from w w w .j ava2  s  .c  om*/
        return SimpleFSLockFactory.INSTANCE;
    } else if ("native".equals(lockFactoryName)) {
        if (indexDir == null) {
            throw LOG.indexBasePathRequiredForLockingStrategy("native");
        }
        return NativeFSLockFactory.INSTANCE;
    } else if ("single".equals(lockFactoryName)) {
        return new SingleInstanceLockFactory();
    } else if ("none".equals(lockFactoryName)) {
        return NoLockFactory.INSTANCE;
    } else {
        LockFactoryProvider lockFactoryFactory = ClassLoaderHelper.instanceFromName(LockFactoryProvider.class,
                lockFactoryName, Environment.LOCKING_STRATEGY, serviceManager);
        return lockFactoryFactory.createLockFactory(indexDir, dirConfiguration);
    }
}

From source file:org.modeshape.jcr.index.lucene.LuceneConfig.java

License:Apache License

private LockFactory lockFactory(String lockFactoryClass) {
    if (StringUtil.isBlank(lockFactoryClass)) {
        return null;
    }// ww  w.  j a  v a2  s.c  om
    switch (lockFactoryClass) {
    case "org.apache.lucene.store.NativeFSLockFactory": {
        return NativeFSLockFactory.INSTANCE;
    }
    case "org.apache.lucene.store.NoLockFactory": {
        return NoLockFactory.INSTANCE;
    }
    case "org.apache.lucene.store.SimpleFSLockFactory": {
        return SimpleFSLockFactory.INSTANCE;
    }
    default:
        throw new IllegalArgumentException("Unknown lock factory implementation: " + lockFactoryClass);
    }
}

From source file:org.opengrok.indexer.index.IndexDatabase.java

License:Open Source License

LockFactory pickLockFactory(RuntimeEnvironment env) {
    switch (env.getLuceneLocking()) {
    case ON://w  w  w .j  a va2 s. c o  m
    case SIMPLE:
        return SimpleFSLockFactory.INSTANCE;
    case NATIVE:
        return NativeFSLockFactory.INSTANCE;
    case OFF:
    default:
        return NoLockFactory.INSTANCE;
    }
}

From source file:org.opengrok.indexer.index.IndexVersion.java

License:Open Source License

/**
 * Check index version in given directory. It assumes that that all commits
 * in the Lucene segment file were done with the same version.
 *
 * @param dir directory with index/* ww w  .ja v  a2 s .co  m*/
 * @thows IOException if the directory cannot be opened
 */
private static void checkDir(File dir) throws Exception {
    LockFactory lockfact = NativeFSLockFactory.INSTANCE;
    int segVersion;

    try (Directory indexDirectory = FSDirectory.open(dir.toPath(), lockfact)) {
        SegmentInfos segInfos = null;

        try {
            segInfos = SegmentInfos.readLatestCommit(indexDirectory);
            segVersion = segInfos.getIndexCreatedVersionMajor();
        } catch (IndexNotFoundException e) {
            return;
        }
    }

    if (segVersion != Version.LATEST.major) {
        throw new IndexVersionException(String.format("Directory %s has index of version %d and Lucene has %d",
                dir.toString(), segVersion, Version.LATEST.major));
    }
}