Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:org.apache.accumulo.tserver.DirectoryDecommissioner.java

License:Apache License

public static Path checkTabletDirectory(TabletServer tserver, VolumeManager vm, KeyExtent extent, Path dir)
        throws IOException {
    if (isActiveVolume(dir))
        return dir;

    if (!dir.getParent().getParent().getName().equals(ServerConstants.TABLE_DIR)) {
        throw new IllegalArgumentException("Unexpected table dir " + dir);
    }/*from   w  ww  .  j  a v  a 2  s  . c  o m*/

    Path newDir = new Path(
            vm.choose(ServerConstants.getTablesDirs()) + "/" + dir.getParent().getName() + "/" + dir.getName());

    log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);
    if (extent.isRootTablet()) {
        // the root tablet is special case, its files need to be copied if its dir is changed

        // this code needs to be idempotent

        FileSystem fs1 = vm.getFileSystemByPath(dir);
        FileSystem fs2 = vm.getFileSystemByPath(newDir);

        if (!same(fs1, dir, fs2, newDir)) {
            if (fs2.exists(newDir)) {
                Path newDirBackup = getBackupName(fs2, newDir);
                // never delete anything because were dealing with the root tablet
                // one reason this dir may exist is because this method failed previously
                log.info("renaming " + newDir + " to " + newDirBackup);
                if (!fs2.rename(newDir, newDirBackup)) {
                    throw new IOException("Failed to rename " + newDir + " to " + newDirBackup);
                }
            }

            // do a lot of logging since this is the root tablet
            log.info("copying " + dir + " to " + newDir);
            if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) {
                throw new IOException("Failed to copy " + dir + " to " + newDir);
            }

            // only set the new location in zookeeper after a successful copy
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());

            // rename the old dir to avoid confusion when someone looks at filesystem... its ok if we fail here and this does not happen because the location in
            // zookeeper is the authority
            Path dirBackup = getBackupName(fs1, dir);
            log.info("renaming " + dir + " to " + dirBackup);
            fs1.rename(dir, dirBackup);
        } else {
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());
        }

        return dir;
    } else {
        MetadataTableUtil.updateTabletDir(extent, newDir.toString(), SystemCredentials.get(),
                tserver.getLock());
        return newDir;
    }
}

From source file:org.apache.accumulo.tserver.log.LocalWALRecovery.java

License:Apache License

public void recoverLocalWriteAheadLogs(FileSystem fs) throws IOException {
    for (String directory : options.directories) {
        File localDirectory = new File(directory);
        if (!localDirectory.isAbsolute()) {
            localDirectory = new File(System.getenv("ACCUMULO_HOME"), directory);
        }//from   w  ww .  j  av  a 2 s.c  o m

        if (!localDirectory.isDirectory()) {
            log.warn("Local walog dir " + localDirectory.getAbsolutePath()
                    + " does not exist or is not a directory.");
            continue;
        }

        if (options.destination == null) {
            // Defer loading the default value until now because it might require talking to zookeeper.
            options.destination = ServerConstants.getWalDirs()[0];
        }
        log.info("Copying WALs to " + options.destination);

        for (File file : localDirectory.listFiles()) {
            String name = file.getName();
            try {
                UUID.fromString(name);
            } catch (IllegalArgumentException ex) {
                log.info("Ignoring non-log file " + file.getAbsolutePath());
                continue;
            }

            LogFileKey key = new LogFileKey();
            LogFileValue value = new LogFileValue();

            log.info("Openning local log " + file.getAbsolutePath());

            Path localWal = new Path(file.toURI());
            FileSystem localFs = FileSystem.getLocal(fs.getConf());

            Reader reader = new SequenceFile.Reader(localFs, localWal, localFs.getConf());
            // Reader reader = new SequenceFile.Reader(localFs.getConf(), SequenceFile.Reader.file(localWal));
            Path tmp = new Path(options.destination + "/" + name + ".copy");
            FSDataOutputStream writer = fs.create(tmp);
            while (reader.next(key, value)) {
                try {
                    key.write(writer);
                    value.write(writer);
                } catch (EOFException ex) {
                    break;
                }
            }
            writer.close();
            reader.close();
            fs.rename(tmp, new Path(tmp.getParent(), name));

            if (options.deleteLocal) {
                if (file.delete()) {
                    log.info("Copied and deleted: " + name);
                } else {
                    log.info("Failed to delete: " + name + " (but it is safe for you to delete it manually).");
                }
            } else {
                log.info("Safe to delete: " + name);
            }
        }
    }
}

From source file:org.apache.beam.runners.spark.translation.streaming.Checkpoint.java

License:Apache License

private static void write(FileSystem fileSystem, Path checkpointFilePath, byte[] value) throws IOException {
    Path tmpPath = checkpointFilePath.suffix(TEMP_FILE_SUFFIX);
    Path backupPath = checkpointFilePath.suffix(BACKUP_FILE_SUFFIX);
    if (fileSystem.exists(checkpointFilePath)) {
        if (fileSystem.exists(backupPath)) {
            fileSystem.delete(backupPath, false);
        }//from   w ww . ja  v  a2 s .  c  om
        fileSystem.rename(checkpointFilePath, backupPath);
    }
    FSDataOutputStream os = fileSystem.create(tmpPath, true);
    os.write(value);
    os.close();
    fileSystem.rename(tmpPath, checkpointFilePath);
}

From source file:org.apache.blur.hive.BlurHiveMRLoaderOutputCommitter.java

License:Apache License

private void finishBulkJob(JobContext context, final boolean apply) throws IOException {
    final Configuration configuration = context.getConfiguration();
    PrivilegedExceptionAction<Void> action = new PrivilegedExceptionAction<Void>() {
        @Override//from www  . j  a va  2 s .  c om
        public Void run() throws Exception {
            String workingPathStr = configuration.get(BlurConstants.BLUR_BULK_UPDATE_WORKING_PATH);
            Path workingPath = new Path(workingPathStr);
            Path tmpDir = new Path(workingPath, "tmp");
            FileSystem fileSystem = tmpDir.getFileSystem(configuration);
            String loadId = configuration.get(BlurSerDe.BLUR_MR_LOAD_ID);
            Path loadPath = new Path(tmpDir, loadId);

            if (apply) {
                Path newDataPath = new Path(workingPath, "new");
                Path dst = new Path(newDataPath, loadId);
                if (!fileSystem.rename(loadPath, dst)) {
                    LOG.error("Could not move data from src [" + loadPath + "] to dst [" + dst + "]");
                    throw new IOException(
                            "Could not move data from src [" + loadPath + "] to dst [" + dst + "]");
                }

                TableDescriptor tableDescriptor = BlurOutputFormat.getTableDescriptor(configuration);
                String connectionStr = configuration.get(BlurSerDe.BLUR_CONTROLLER_CONNECTION_STR);
                BulkTableUpdateCommand bulkTableUpdateCommand = new BulkTableUpdateCommand();
                bulkTableUpdateCommand.setAutoLoad(true);
                bulkTableUpdateCommand.setTable(tableDescriptor.getName());
                bulkTableUpdateCommand.setWaitForDataBeVisible(true);

                Configuration config = new Configuration(false);
                config.addResource(HDFS_SITE_XML);
                config.addResource(YARN_SITE_XML);
                config.addResource(MAPRED_SITE_XML);

                bulkTableUpdateCommand.addExtraConfig(config);
                if (bulkTableUpdateCommand.run(BlurClient.getClient(connectionStr)) != 0) {
                    throw new IOException("Unknown error occured duing load.");
                }
            } else {
                fileSystem.delete(loadPath, true);
            }
            return null;
        }
    };
    UserGroupInformation userGroupInformation = BlurHiveOutputFormat.getUGI(configuration);
    try {
        userGroupInformation.doAs(action);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

@Override
public void run() {
    // Only allow one import to occur in the process at a time.
    _globalLock.lock();//ww w.ja  v a 2 s .c om
    try {
        if (_lastCleanup + _cleanupDelay < System.currentTimeMillis()) {
            try {
                cleanupOldDirs();
            } catch (IOException e) {
                LOG.error("Unknown error while trying to clean old directories on [{1}/{2}].", e, _shard,
                        _table);
            }
            _lastCleanup = System.currentTimeMillis();
        }
        Path path = _shardContext.getHdfsDirPath();
        Configuration configuration = _shardContext.getTableContext().getConfiguration();
        try {
            FileSystem fileSystem = path.getFileSystem(configuration);
            SortedSet<FileStatus> listStatus;
            while (true) {
                try {
                    listStatus = sort(fileSystem.listStatus(path, new PathFilter() {
                        @Override
                        public boolean accept(Path path) {
                            if (path != null && path.getName().endsWith(COMMIT)) {
                                return true;
                            }
                            return false;
                        }
                    }));
                    break;
                } catch (FileNotFoundException e) {
                    LOG.warn("File not found error, retrying.");
                }
                try {
                    Thread.sleep(100);
                } catch (InterruptedException e) {
                    return;
                }
            }
            for (FileStatus fileStatus : listStatus) {
                Path file = fileStatus.getPath();
                if (fileStatus.isDir() && file.getName().endsWith(COMMIT)) {
                    // rename to inuse, if good continue else rename to badindex
                    Path inuse = new Path(file.getParent(), rename(file.getName(), INUSE));
                    touch(fileSystem, new Path(file, INPROGRESS));
                    if (fileSystem.rename(file, inuse)) {
                        if (_testError != null) {
                            _testError.run();
                        }
                        HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, inuse);
                        try {
                            if (DirectoryReader.indexExists(hdfsDirectory)) {
                                IndexAction indexAction = getIndexAction(hdfsDirectory, fileSystem);
                                _blurIndex.process(indexAction);
                                return;
                            } else {
                                Path badindex = new Path(file.getParent(), rename(file.getName(), BADINDEX));
                                if (fileSystem.rename(inuse, badindex)) {
                                    LOG.error(
                                            "Directory found at [{0}] is not a vaild index, renaming to [{1}].",
                                            inuse, badindex);
                                } else {
                                    LOG.fatal(
                                            "Directory found at [{0}] is not a vaild index, could not rename to [{1}].",
                                            inuse, badindex);
                                }
                            }
                        } finally {
                            hdfsDirectory.close();
                        }
                    } else {
                        LOG.fatal("Could not rename [{0}] to inuse dir.", file);
                    }
                }
            }
        } catch (IOException e) {
            LOG.error("Unknown error while trying to refresh imports on [{1}/{2}].", e, _shard, _table);
        }
    } finally {
        _globalLock.unlock();
    }
}

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

private IndexAction getIndexAction(final HdfsDirectory directory, final FileSystem fileSystem) {
    return new IndexAction() {

        @Override/*from w w w.ja v  a 2 s . c  om*/
        public void performMutate(IndexSearcherCloseable searcher, IndexWriter writer) throws IOException {
            LOG.info("About to import [{0}] into [{1}/{2}]", directory, _shard, _table);
            boolean emitDeletes = searcher.getIndexReader().numDocs() != 0;
            applyDeletes(directory, writer, _shard, emitDeletes);
            LOG.info("Add index [{0}] [{1}/{2}]", directory, _shard, _table);
            writer.addIndexes(directory);
            LOG.info("Removing delete markers [{0}] on [{1}/{2}]", directory, _shard, _table);
            writer.deleteDocuments(new Term(BlurConstants.DELETE_MARKER, BlurConstants.DELETE_MARKER_VALUE));
            LOG.info("Finishing import [{0}], commiting on [{1}/{2}]", directory, _shard, _table);
        }

        @Override
        public void doPreCommit(IndexSearcherCloseable indexSearcher, IndexWriter writer) throws IOException {

        }

        @Override
        public void doPostCommit(IndexWriter writer) throws IOException {
            Path path = directory.getPath();
            fileSystem.delete(new Path(path, INPROGRESS), false);
            LOG.info("Import complete on [{0}/{1}]", _shard, _table);
            writer.maybeMerge();
        }

        @Override
        public void doPreRollback(IndexWriter writer) throws IOException {
            LOG.info("Starting rollback on [{0}/{1}]", _shard, _table);
        }

        @Override
        public void doPostRollback(IndexWriter writer) throws IOException {
            LOG.info("Finished rollback on [{0}/{1}]", _shard, _table);
            Path path = directory.getPath();
            String name = path.getName();
            fileSystem.rename(path, new Path(path.getParent(), rename(name, BADROWIDS)));
        }
    };
}

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

public void cleanupOldDirs() throws IOException {
    Path hdfsDirPath = _shardContext.getHdfsDirPath();
    TableContext tableContext = _shardContext.getTableContext();
    Configuration configuration = tableContext.getConfiguration();
    FileSystem fileSystem = hdfsDirPath.getFileSystem(configuration);
    FileStatus[] inuseSubDirs = fileSystem.listStatus(hdfsDirPath, new PathFilter() {
        @Override//from  www  .ja v  a 2 s.  c o  m
        public boolean accept(Path path) {
            return path.getName().endsWith(INUSE);
        }
    });
    Set<Path> inuseDirs = toSet(inuseSubDirs);
    Map<Path, Path> inuseFileToDir = toMap(fileSystem, inuseDirs);
    FileStatus[] listStatus = fileSystem.listStatus(hdfsDirPath, new PathFilter() {
        @Override
        public boolean accept(Path path) {
            return path.getName().endsWith(HdfsDirectory.LNK);
        }
    });

    for (FileStatus status : listStatus) {
        Path realPath = HdfsDirectory.readRealPathDataFromSymlinkPath(fileSystem, status.getPath());
        Path inuseDir = inuseFileToDir.get(realPath);
        inuseDirs.remove(inuseDir);
        // if the inuse dir has an inprogress file then remove it because there
        // are files that reference this dir so it had to be committed.
        Path path = new Path(inuseDir, INPROGRESS);
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, false);
            if (_thriftCache != null) {
                _thriftCache.clearTable(_table);
            }
        }
    }

    // Check if any inuse dirs have inprogress files.
    // If they do, rename inuse to commit to retry import.
    for (Path inuse : new HashSet<Path>(inuseDirs)) {
        Path path = new Path(inuse, INPROGRESS);
        if (fileSystem.exists(path)) {
            LOG.info("Path [{0}] is not imported but has inprogress file, retrying import.", path);
            inuseDirs.remove(inuse);
            Path commit = new Path(inuse.getParent(), rename(inuse.getName(), COMMIT));
            fileSystem.rename(inuse, commit);
        }
    }

    for (Path p : inuseDirs) {
        LOG.info("Deleting path [{0}] no longer in use.", p);
        fileSystem.delete(p, true);
    }
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputCommitter.java

License:Apache License

private void commitOrAbortJob(JobContext jobContext, Path shardPath, boolean commit) throws IOException {
    LOG.info("CommitOrAbort [{0}] path [{1}]", commit, shardPath);
    FileSystem fileSystem = shardPath.getFileSystem(jobContext.getConfiguration());
    FileStatus[] listStatus = fileSystem.listStatus(shardPath, new PathFilter() {
        @Override//from ww w. j  av  a2  s.c  om
        public boolean accept(Path path) {
            LOG.info("Checking path [{0}]", path);
            if (path.getName().endsWith(".task_complete")) {
                return true;
            }
            return false;
        }
    });
    for (FileStatus fileStatus : listStatus) {
        Path path = fileStatus.getPath();
        LOG.info("Trying to commitOrAbort [{0}]", path);
        String name = path.getName();
        boolean taskComplete = name.endsWith(".task_complete");
        if (fileStatus.isDir()) {
            String taskAttemptName = getTaskAttemptName(name);
            if (taskAttemptName == null) {
                LOG.info("Dir name [{0}] not task attempt", name);
                continue;
            }
            TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptName);
            if (taskAttemptID.getJobID().equals(jobContext.getJobID())) {
                if (commit) {
                    if (taskComplete) {
                        fileSystem.rename(path, new Path(shardPath, taskAttemptName + ".commit"));
                        LOG.info("Committing [{0}] in path [{1}]", taskAttemptID, path);
                    } else {
                        fileSystem.delete(path, true);
                        LOG.info("Deleting tmp dir [{0}] in path [{1}]", taskAttemptID, path);
                    }
                } else {
                    fileSystem.delete(path, true);
                    LOG.info("Deleting aborted job dir [{0}] in path [{1}]", taskAttemptID, path);
                }
            } else {
                LOG.warn("TaskAttempt JobID [{0}] does not match JobContext JobId [{1}]",
                        taskAttemptID.getJobID(), jobContext.getJobID());
            }
        }
    }
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputCommitter.java

License:Apache License

@Override
public void commitTask(TaskAttemptContext context) throws IOException {
    LOG.info("Running commit task.");
    Conf conf = setup(context);//  w  ww . j  a  v a2  s  . co  m
    FileSystem fileSystem = conf._newIndex.getFileSystem(conf._configuration);
    if (fileSystem.exists(conf._newIndex) && !fileSystem.isFile(conf._newIndex)) {
        Path dst = new Path(conf._indexPath, conf._taskAttemptID.toString() + ".task_complete");
        LOG.info("Committing [{0}] to [{1}]", conf._newIndex, dst);
        fileSystem.rename(conf._newIndex, dst);
    } else {
        throw new IOException("Path [" + conf._newIndex + "] does not exist, can not commit.");
    }
}

From source file:org.apache.blur.mapreduce.lib.update.Driver.java

License:Apache License

private List<Path> movePathList(FileSystem fileSystem, Path dstDir, List<Path> lst) throws IOException {
    List<Path> result = new ArrayList<Path>();
    for (Path src : lst) {
        Path dst = new Path(dstDir, src.getName());
        if (fileSystem.rename(src, dst)) {
            LOG.info("Moving [{0}] to [{1}]", src, dst);
            result.add(dst);/*from   w  ww.j  av  a2  s  .  co m*/
        } else {
            LOG.error("Could not move [{0}] to [{1}]", src, dst);
        }
    }
    return result;
}