Example usage for java.nio.file StandardCopyOption ATOMIC_MOVE

List of usage examples for java.nio.file StandardCopyOption ATOMIC_MOVE

Introduction

In this page you can find the example usage for java.nio.file StandardCopyOption ATOMIC_MOVE.

Prototype

StandardCopyOption ATOMIC_MOVE

To view the source code for java.nio.file StandardCopyOption ATOMIC_MOVE.

Click Source Link

Document

Move the file as an atomic file system operation.

Usage

From source file:Main.java

public static void main(String[] args) throws Exception {
    Path sourceFile = Paths.get("C:/home/docs/users.txt");
    Path destinationFile = Paths.get("C:/home/music/users.txt");
    Files.move(sourceFile, destinationFile, StandardCopyOption.ATOMIC_MOVE);

}

From source file:Main.java

public static void main(String[] args) throws Exception {
    Path source = Paths.get("C:\\Java_Dev\\test1.txt");
    Path target = Paths.get("C:\\Java_Dev\\dir2\\test1.txt");

    try {/* w ww.j a  v  a 2  s .c om*/
        Path p = Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
        System.out.println(source + "  has  been  moved to " + p);
    } catch (NoSuchFileException e) {
        System.out.println("Source/target does  not  exist.");
    } catch (FileAlreadyExistsException e) {
        System.out.println(target + "  already exists.  Move failed.");
    } catch (DirectoryNotEmptyException e) {
        System.out.println(target + "  is not  empty.  Move failed.");
    } catch (AtomicMoveNotSupportedException e) {
        System.out.println("Atomic move is not  supported. MOve  failed.");
    } catch (IOException e) {
        e.printStackTrace();
    }

}

From source file:uk.co.modularaudio.componentdesigner.generators.ComponentDesignerSupportFileGenerator.java

public static void main(final String[] args) throws Exception {
    if (args.length != 1) {
        throw new IOException("Expecting only output directory: outputDir");
    }/*from   w w w. j a  v  a  2 s  . c om*/
    if (log.isInfoEnabled()) {
        log.info("Creating output in '" + args[0] + "'");
    }
    final File outputDir = new File(args[0]);
    if (!outputDir.exists()) {
        if (!outputDir.mkdirs()) {
            throw new IOException("Unable to create output directory");
        }
    }

    JTransformsConfigurator.setThreadsToOne();

    final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
    final Configuration config = ctx.getConfiguration();
    final LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME);
    loggerConfig.setLevel(Level.INFO);
    ctx.updateLoggers();

    final ComponentDesignerSupportFileGenerator sfg = new ComponentDesignerSupportFileGenerator();

    sfg.init();

    sfg.generateFiles();

    sfg.initialiseThingsNeedingComponentGraph();

    final String[] dbFilesToMove = sfg.getDatabaseFiles();
    sfg.destroy();

    // Finally move the (now closed) database files into the output directory
    for (final String dbFileToMove : dbFilesToMove) {
        final File source = new File(dbFileToMove);
        final String fileName = source.getName();
        final File target = new File(args[0] + File.separatorChar + fileName);
        Files.move(source.toPath(), target.toPath(), StandardCopyOption.ATOMIC_MOVE);
    }

}

From source file:hdfs.MiniHDFS.java

public static void main(String[] args) throws Exception {
    if (args.length != 1 && args.length != 3) {
        throw new IllegalArgumentException(
                "Expected: MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>], " + "got: "
                        + Arrays.toString(args));
    }//from   w ww  .  j  av  a  2  s.c  o  m
    boolean secure = args.length == 3;

    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // configure cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");

    // optionally configure security
    if (secure) {
        String kerberosPrincipal = args[1];
        String keytabFile = args[2];

        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
        cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true");
        cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
    }

    UserGroupInformation.setConfiguration(cfg);

    // TODO: remove hardcoded port!
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
    if (secure) {
        builder.nameNodePort(9998);
    } else {
        builder.nameNodePort(9999);
    }
    MiniDFSCluster dfs = builder.build();

    // Configure contents of the filesystem
    org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
    try (FileSystem fs = dfs.getFileSystem()) {

        // Set the elasticsearch user directory up
        fs.mkdirs(esUserPath);
        if (UserGroupInformation.isSecurityEnabled()) {
            List<AclEntry> acls = new ArrayList<>();
            acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch")
                    .setPermission(FsAction.ALL).build());
            fs.modifyAclEntries(esUserPath, acls);
        }

        // Install a pre-existing repository into HDFS
        String directoryName = "readonly-repository";
        String archiveName = directoryName + ".tar.gz";
        URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
        if (readOnlyRepositoryArchiveURL != null) {
            Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
            File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
            FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
            FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());

            fs.copyFromLocalFile(true, true,
                    new org.apache.hadoop.fs.Path(
                            tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
                    esUserPath.suffix("/existing/" + directoryName));

            FileUtils.deleteDirectory(tempDirectory.toFile());
        }
    }

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}

From source file:org.ulyssis.ipp.publisher.FileOutput.java

@Override
public void outputScore(Score score) {
    Path tmpFile = null;/*from www . j  a  v a  2  s. c om*/
    try {
        if (tmpDir.isPresent()) {
            tmpFile = Files.createTempFile(tmpDir.get(), "score-", ".json",
                    PosixFilePermissions.asFileAttribute(defaultPerms));
        } else {
            tmpFile = Files.createTempFile("score-", ".json",
                    PosixFilePermissions.asFileAttribute(defaultPerms));
        }
        BufferedWriter writer = Files.newBufferedWriter(tmpFile, StandardCharsets.UTF_8);
        Serialization.getJsonMapper().writer(new DefaultPrettyPrinter()).writeValue(writer, score);
        Files.move(tmpFile, filePath, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
    } catch (IOException e) {
        LOG.error("Error writing score to file!", e);
    } finally {
        try {
            if (tmpFile != null)
                Files.deleteIfExists(tmpFile);
        } catch (IOException ignored) {
        }
    }
}

From source file:com.liferay.sync.engine.documentlibrary.handler.DownloadFileHandler.java

@Override
protected void doHandleResponse(HttpResponse httpResponse) throws Exception {

    InputStream inputStream = null;

    try {/*from  w  w w  .j av  a2  s. c om*/
        SyncFile syncFile = (SyncFile) getParameterValue("syncFile");

        Path filePath = Paths.get(syncFile.getFilePathName());

        HttpEntity httpEntity = httpResponse.getEntity();

        inputStream = httpEntity.getContent();

        Path tempFilePath = Files.createTempFile(String.valueOf(filePath.getFileName()), ".tmp");

        if (Files.exists(filePath)) {
            Files.copy(filePath, tempFilePath);
        }

        if ((Boolean) getParameterValue("patch")) {
            IODeltaUtil.patch(tempFilePath, inputStream);
        } else {
            Files.copy(inputStream, tempFilePath, StandardCopyOption.REPLACE_EXISTING);
        }

        syncFile.setFileKey(FileUtil.getFileKey(tempFilePath));
        syncFile.setState(SyncFile.STATE_SYNCED);
        syncFile.setUiEvent(SyncFile.UI_EVENT_DOWNLOADED);

        SyncFileService.update(syncFile);

        Files.move(tempFilePath, filePath, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
    } finally {
        StreamUtil.cleanUp(inputStream);
    }
}

From source file:org.metaservice.frontend.rest.cache.FileSystemCacheResourceService.java

private InputStream cacheResource(InputStream inputStream, String resource, String mimetype)
        throws IOException {
    byte[] content = org.apache.commons.io.IOUtils.toByteArray(inputStream);

    Path source = Files.createTempFile(DigestUtils.md5Hex(resource), getExtension(mimetype));
    try (OutputStream outputStream = new FileOutputStream(source.toFile())) {
        IOUtils.write(content, outputStream);
    }/* ww w . j  a v a  2  s  .c om*/

    Path target = getCacheFile(resource, mimetype).toPath();
    if (!target.getParent().toFile().isDirectory()) {
        Files.createDirectories(target.getParent());
    }
    Files.move(source, target, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
    return new ByteArrayInputStream(content);
}

From source file:org.craftercms.studio.impl.v1.asset.processing.AbstractAssetProcessor.java

private Path moveToTmpFile(String repoPath, Path filePath) throws IOException {
    Path tmpFilePath = createTmpFile(repoPath);

    return Files.move(filePath, tmpFilePath, StandardCopyOption.ATOMIC_MOVE,
            StandardCopyOption.REPLACE_EXISTING);
}

From source file:org.roda.core.storage.fs.FSUtils.java

/**
 * Method that safely updates a file, given an inputstream, by copying the
 * content of the stream to a temporary file which then gets moved into the
 * final location (doing an atomic move). </br>
 * </br>/*from  w ww .j a va  2s.co  m*/
 * In theory (as it depends on the file system implementation), this method is
 * useful for ensuring thread safety. </br>
 * </br>
 * NOTE: the stream is closed in the end.
 * 
 * @param stream
 *          stream with the content to be updated
 * @param toPath
 *          location of the file being updated
 * 
 * @throws IOException
 *           if an error occurs while copying/moving
 * 
 */
public static void safeUpdate(InputStream stream, Path toPath) throws IOException {
    try {
        Path tempToPath = toPath.getParent()
                .resolve(toPath.getFileName().toString() + ".temp" + System.nanoTime());
        Files.copy(stream, tempToPath);
        Files.move(tempToPath, toPath, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
    } finally {
        IOUtils.closeQuietly(stream);
    }
}

From source file:org.artifactory.storage.db.binstore.service.FileBinaryProviderImpl.java

@Override
@Nonnull/*from   w w w. j  a v a2s  .  c om*/
public BinaryInfo addStream(InputStream in) throws IOException {
    check();
    File preFileStoreFile = null;
    Sha1Md5ChecksumInputStream checksumStream = null;
    try {
        // first save to a temp file and calculate checksums while saving
        if (in instanceof Sha1Md5ChecksumInputStream) {
            checksumStream = (Sha1Md5ChecksumInputStream) in;
        } else {
            checksumStream = new Sha1Md5ChecksumInputStream(in);
        }
        preFileStoreFile = writeToTempFile(checksumStream);
        BinaryInfo bd = new BinaryInfoImpl(checksumStream);
        log.trace("Inserting {} in file binary provider", bd);

        String sha1 = bd.getSha1();
        long fileLength = preFileStoreFile.length();
        if (fileLength != checksumStream.getTotalBytesRead()) {
            throw new IOException("File length is " + fileLength + " while total bytes read on" + " stream is "
                    + checksumStream.getTotalBytesRead());
        }

        Path target = getFile(sha1).toPath();
        if (!java.nio.file.Files.exists(target)) {
            // move the file from the pre-filestore to the filestore
            java.nio.file.Files.createDirectories(target.getParent());
            try {
                log.trace("Moving {} to {}", preFileStoreFile.getAbsolutePath(), target);
                java.nio.file.Files.move(preFileStoreFile.toPath(), target, StandardCopyOption.ATOMIC_MOVE);
                log.trace("Moved  {} to {}", preFileStoreFile.getAbsolutePath(), target);
            } catch (FileAlreadyExistsException ignore) {
                // May happen in heavy concurrency cases
                log.trace("Failed moving {} to {}. File already exist", preFileStoreFile.getAbsolutePath(),
                        target);
            }
            preFileStoreFile = null;
        } else {
            log.trace("File {} already exist in the file store. Deleting temp file: {}", target,
                    preFileStoreFile.getAbsolutePath());
        }
        return bd;
    } finally {
        IOUtils.closeQuietly(checksumStream);
        if (preFileStoreFile != null && preFileStoreFile.exists()) {
            if (!preFileStoreFile.delete()) {
                log.error("Could not delete temp file {}", preFileStoreFile.getAbsolutePath());
            }
        }
    }
}