Example usage for org.apache.lucene.util IOUtils fsync

List of usage examples for org.apache.lucene.util IOUtils fsync

Introduction

In this page you can find the example usage for org.apache.lucene.util IOUtils fsync.

Prototype

public static void fsync(Path fileToSync, boolean isDir) throws IOException 

Source Link

Document

Ensure that any writes to the given file is written to the storage device that contains it.

Usage

From source file:FlexLucene.Store.FlexFSDirectory.java

@Override
public void renameFile(String source, String dest) throws IOException {
    ensureOpen();/*from  w w  w. j  a va 2s . com*/
    Files.move(directory.resolve(source), directory.resolve(dest), StandardCopyOption.REPLACE_EXISTING);
    // TODO: should we move directory fsync to a separate 'syncMetadata' method?
    // for example, to improve listCommits(), IndexFileDeleter could also call that after deleting segments_Ns
    IOUtils.fsync(directory, true);
}

From source file:org.elasticsearch.common.blobstore.fs.FsBlobContainer.java

License:Apache License

@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
    final Path file = path.resolve(blobName);
    try (OutputStream outputStream = Files.newOutputStream(file)) {
        Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]);
    }/*from   w w  w .j a  va 2s .  co m*/
    IOUtils.fsync(file, false);
    IOUtils.fsync(path, true);
}

From source file:org.elasticsearch.common.blobstore.fs.FsBlobContainer.java

License:Apache License

@Override
public void writeBlob(String blobName, BytesReference data) throws IOException {
    final Path file = path.resolve(blobName);
    try (OutputStream outputStream = Files.newOutputStream(file)) {
        data.writeTo(outputStream);/*from  w w  w .j  a  v a 2 s . c o m*/
    }
    IOUtils.fsync(file, false);
    IOUtils.fsync(path, true);
}

From source file:org.elasticsearch.common.blobstore.fs.FsBlobContainer.java

License:Apache License

@Override
public void move(String source, String target) throws IOException {
    Path sourcePath = path.resolve(source);
    Path targetPath = path.resolve(target);
    // If the target file exists then Files.move() behaviour is implementation specific
    // the existing file might be replaced or this method fails by throwing an IOException.
    assert !Files.exists(targetPath);
    Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE);
    IOUtils.fsync(path, true);
}

From source file:org.elasticsearch.common.util.IndexFolderUpgrader.java

License:Apache License

/**
 * Moves the index folder found in <code>source</code> to <code>target</code>
 *//*  ww w .  j  a v  a  2  s .c  o m*/
void upgrade(final Index index, final Path source, final Path target) throws IOException {
    boolean success = false;
    try {
        Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
        success = true;
    } catch (NoSuchFileException | FileNotFoundException exception) {
        // thrown when the source is non-existent because the folder was renamed
        // by another node (shared FS) after we checked if the target exists
        logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node",
                exception, target);
        throw exception;
    } finally {
        if (success) {
            logger.info("{} moved from [{}] to [{}]", index, source, target);
            logger.trace("{} syncing directory [{}]", index, target);
            IOUtils.fsync(target, true);
        }
    }
}

From source file:org.elasticsearch.common.util.MultiDataPathUpgrader.java

License:Apache License

private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName,
        Path[] paths) throws IOException {
    List<Path> movedFiles = new ArrayList<>();
    for (Path path : paths) {
        if (path.equals(targetPath.getDataPath()) == false) {
            final Path sourceDir = path.resolve(folderName);
            if (Files.exists(sourceDir)) {
                logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir);
                try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) {
                    Files.createDirectories(targetDir);
                    for (Path file : stream) {
                        if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString())
                                || Files.isDirectory(file)) {
                            continue; // skip write.lock
                        }/*from  w ww  .  ja  v a  2  s  .c o  m*/
                        logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(),
                                Files.size(file));
                        final Path targetFile = targetDir.resolve(file.getFileName());
                        /* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that
                           in the worst case the file exists twice but is never lost or half written.*/
                        final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_",
                                "_" + file.getFileName().toString());
                        Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES,
                                StandardCopyOption.REPLACE_EXISTING);
                        Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off
                        Files.delete(file);
                        movedFiles.add(targetFile);
                    }
                }
            }
        }
    }
    if (movedFiles.isEmpty() == false) {
        // fsync later it might be on disk already
        logger.info("{} fsync files", shard);
        for (Path moved : movedFiles) {
            logger.info("{} syncing [{}]", shard, moved.getFileName());
            IOUtils.fsync(moved, false);
        }
        logger.info("{} syncing directory [{}]", shard, targetDir);
        IOUtils.fsync(targetDir, true);
    }
}

From source file:org.elasticsearch.gateway.local.state.meta.MetaDataStateFormat.java

License:Apache License

/**
 * Writes the given state to the given directories. The state is written to a
 * state directory ({@value #STATE_DIR_NAME}) underneath each of the given file locations and is created if it
 * doesn't exist. The state is serialized to a temporary file in that directory and is then atomically moved to
 * it's target filename of the pattern <tt>{prefix}{version}.st</tt>.
 *
 * @param state the state object to write
 * @param version the version of the state
 * @param locations the locations where the state should be written to.
 * @throws IOException if an IOException occurs
 *///w w w .j  a v  a 2s.  c o  m
public final void write(final T state, final long version, final File... locations) throws IOException {
    Preconditions.checkArgument(locations != null, "Locations must not be null");
    Preconditions.checkArgument(locations.length > 0, "One or more locations required");
    final long maxStateId = findMaxStateId(prefix, locations) + 1;
    assert maxStateId >= 0 : "maxStateId must be positive but was: [" + maxStateId + "]";
    final String fileName = prefix + maxStateId + STATE_FILE_EXTENSION;
    Path stateLocation = Paths.get(locations[0].getPath(), STATE_DIR_NAME);
    Files.createDirectories(stateLocation);
    final Path tmpStatePath = stateLocation.resolve(fileName + ".tmp");
    final Path finalStatePath = stateLocation.resolve(fileName);
    try {
        try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(Files.newOutputStream(tmpStatePath),
                BUFFER_SIZE)) {
            CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
            out.writeInt(format.index());
            out.writeLong(version);
            try (XContentBuilder builder = newXContentBuilder(format,
                    new org.elasticsearch.common.lucene.store.OutputStreamIndexOutput(out) {
                        @Override
                        public void close() throws IOException {
                            // this is important since some of the XContentBuilders write bytes on close.
                            // in order to write the footer we need to prevent closing the actual index input.
                        }
                    })) {

                builder.startObject();
                {
                    toXContent(builder, state);
                }
                builder.endObject();
            }
            CodecUtil.writeFooter(out);
        }
        IOUtils.fsync(tmpStatePath.toFile(), false); // fsync the state file
        Files.move(tmpStatePath, finalStatePath, StandardCopyOption.ATOMIC_MOVE);
        IOUtils.fsync(stateLocation.toFile(), true);
        for (int i = 1; i < locations.length; i++) {
            stateLocation = Paths.get(locations[i].getPath(), STATE_DIR_NAME);
            Files.createDirectories(stateLocation);
            Path tmpPath = stateLocation.resolve(fileName + ".tmp");
            Path finalPath = stateLocation.resolve(fileName);
            try {
                Files.copy(finalStatePath, tmpPath);
                Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE); // we are on the same FileSystem / Partition here we can do an atomic move
                IOUtils.fsync(stateLocation.toFile(), true); // we just fsync the dir here..
            } finally {
                Files.deleteIfExists(tmpPath);
            }
        }
    } finally {
        Files.deleteIfExists(tmpStatePath);
    }
    cleanupOldFiles(prefix, fileName, locations);
}

From source file:org.elasticsearch.gateway.MetaDataStateFormat.java

License:Apache License

/**
 * Writes the given state to the given directories. The state is written to a
 * state directory ({@value #STATE_DIR_NAME}) underneath each of the given file locations and is created if it
 * doesn't exist. The state is serialized to a temporary file in that directory and is then atomically moved to
 * it's target filename of the pattern <tt>{prefix}{version}.st</tt>.
 *
 * @param state the state object to write
 * @param version the version of the state
 * @param locations the locations where the state should be written to.
 * @throws IOException if an IOException occurs
 *//*  ww  w  .ja va2 s. c o  m*/
public final void write(final T state, final long version, final Path... locations) throws IOException {
    Preconditions.checkArgument(locations != null, "Locations must not be null");
    Preconditions.checkArgument(locations.length > 0, "One or more locations required");
    final long maxStateId = findMaxStateId(prefix, locations) + 1;
    assert maxStateId >= 0 : "maxStateId must be positive but was: [" + maxStateId + "]";
    final String fileName = prefix + maxStateId + STATE_FILE_EXTENSION;
    Path stateLocation = locations[0].resolve(STATE_DIR_NAME);
    Files.createDirectories(stateLocation);
    final Path tmpStatePath = stateLocation.resolve(fileName + ".tmp");
    final Path finalStatePath = stateLocation.resolve(fileName);
    try {
        final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")";
        try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc,
                Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
            CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
            out.writeInt(format.index());
            out.writeLong(version);
            try (XContentBuilder builder = newXContentBuilder(format, new IndexOutputOutputStream(out) {
                @Override
                public void close() throws IOException {
                    // this is important since some of the XContentBuilders write bytes on close.
                    // in order to write the footer we need to prevent closing the actual index input.
                }
            })) {

                builder.startObject();
                {
                    toXContent(builder, state);
                }
                builder.endObject();
            }
            CodecUtil.writeFooter(out);
        }
        IOUtils.fsync(tmpStatePath, false); // fsync the state file
        Files.move(tmpStatePath, finalStatePath, StandardCopyOption.ATOMIC_MOVE);
        IOUtils.fsync(stateLocation, true);
        for (int i = 1; i < locations.length; i++) {
            stateLocation = locations[i].resolve(STATE_DIR_NAME);
            Files.createDirectories(stateLocation);
            Path tmpPath = stateLocation.resolve(fileName + ".tmp");
            Path finalPath = stateLocation.resolve(fileName);
            try {
                Files.copy(finalStatePath, tmpPath);
                Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE); // we are on the same FileSystem / Partition here we can do an atomic move
                IOUtils.fsync(stateLocation, true); // we just fsync the dir here..
            } finally {
                Files.deleteIfExists(tmpPath);
            }
        }
    } finally {
        Files.deleteIfExists(tmpStatePath);
    }
    cleanupOldFiles(prefix, fileName, locations);
}

From source file:org.elasticsearch.index.translog.TruncateTranslogCommand.java

License:Apache License

@Override
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
    boolean batch = options.has(batchMode);

    Path translogPath = getTranslogPath(options);
    Path idxLocation = translogPath.getParent().resolve("index");

    if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) {
        throw new ElasticsearchException(
                "translog directory [" + translogPath + "], must exist and be a directory");
    }//from   w ww .  j  ava 2  s  .  c  o m

    if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) {
        throw new ElasticsearchException(
                "unable to find a shard at [" + idxLocation + "], which must exist and be a directory");
    }

    // Hold the lock open for the duration of the tool running
    try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
            Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        Set<Path> translogFiles;
        try {
            terminal.println("Checking existing translog files");
            translogFiles = filesInDirectory(translogPath);
        } catch (IOException e) {
            terminal.println("encountered IOException while listing directory, aborting...");
            throw new ElasticsearchException("failed to find existing translog files", e);
        }

        // Warn about ES being stopped and files being deleted
        warnAboutDeletingFiles(terminal, translogFiles, batch);

        List<IndexCommit> commits;
        try {
            terminal.println(
                    "Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]");
            commits = DirectoryReader.listCommits(dir);
        } catch (IndexNotFoundException infe) {
            throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe);
        }

        // Retrieve the generation and UUID from the existing data
        Map<String, String> commitData = commits.get(commits.size() - 1).getUserData();
        String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY);
        String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
        if (translogGeneration == null || translogUUID == null) {
            throw new ElasticsearchException(
                    "shard must have a valid translog generation and UUID but got: [{}] and: [{}]",
                    translogGeneration, translogUUID);
        }
        terminal.println("Translog Generation: " + translogGeneration);
        terminal.println("Translog UUID      : " + translogUUID);

        Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME);
        Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME);
        Path tempEmptyTranslog = translogPath.resolve(
                "temp-" + Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
        Path realEmptyTranslog = translogPath
                .resolve(Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);

        // Write empty checkpoint and translog to empty files
        long gen = Long.parseLong(translogGeneration);
        int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID);
        writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen);

        terminal.println("Removing existing translog files");
        IOUtils.rm(translogFiles.toArray(new Path[] {}));

        terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]");
        Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE);
        terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]");
        Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE);

        // Fsync the translog directory after rename
        IOUtils.fsync(translogPath, true);

    } catch (LockObtainFailedException lofe) {
        throw new ElasticsearchException(
                "Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?");
    }

    terminal.println("Done.");
}

From source file:org.elasticsearch.index.translog.TruncateTranslogCommand.java

License:Apache License

/** Write a checkpoint file to the given location with the given generation */
public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration)
        throws IOException {
    Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration);
    Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE,
            StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
    // fsync with metadata here to make sure.
    IOUtils.fsync(filename, false);
}