Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:com.rim.logdriver.LockedFs.java

License:Apache License

public void move(Configuration conf, String[] from, String to) throws IOException {
    FileSystem fs = FileSystem.get(conf);

    List<FileStatus> fromList = new ArrayList<FileStatus>();
    for (String s : from) {
        FileStatus[] statuses = fs.globStatus(new Path(s));
        if (statuses == null) {
            continue;
        }//from  w ww  . j  a  v a  2 s  .  co m
        for (FileStatus status : statuses) {
            fromList.add(status);
        }
    }

    Path toPath = new Path(to);
    Boolean toExists = fs.exists(toPath);
    FileStatus toFileStatus = null;
    if (toExists) {
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there is no from, that's a problem.
    if (fromList.isEmpty()) {
        throw new IOException("No input files found");
    }

    // If the to exists, and is a file, that's a problem too.
    if (toExists && !toFileStatus.isDir()) {
        throw new IOException("Destination file exists:" + to);
    }

    // If the destination exists, and is a directory, then ensure that none of
    // the from list names will clash with existing contents of the directory.
    if (toExists && toFileStatus.isDir()) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            if (fs.exists(new Path(toPath, name))) {
                throw new IOException("Destination file exists:" + to + "/" + name);
            }
        }
    }

    // If the destination doesn't exist, but it ends with a slash, then create
    // it as a directory.
    if (!toExists && to.endsWith("/")) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
        toExists = true;
    }

    // If the destination doesn't exist, and there is more than one 'from', then
    // create a directory.
    if (!toExists && fromList.size() > 1) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there was only one from, then just rename it to to
    if (fromList.size() == 1) {
        fs.mkdirs(toPath.getParent());
        fs.rename(fromList.get(0).getPath(), toPath);
    }

    // If there was more than one from, then for each file in the from list,
    // move it to the to directory.
    if (fromList.size() > 1) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            fs.rename(fromStatus.getPath(), new Path(toPath, name));
        }
    }
}

From source file:com.rockstor.compact.Compactor.java

License:Apache License

public void compactData(String taskIdName) throws IOException, NoSuchAlgorithmException {
    Path dstDir = new Path(pathUtil.getSpecTaskDir(taskIdName));
    FileSystem dfs = RockAccessor.getFileSystem();
    if (!dfs.exists(dstDir)) {
        LOG.error("[COMPACTOR]: Directory " + dstDir + " is not exist");
        return;/* w w  w  .j  a  va 2  s. c om*/
    }

    String metaFileName = pathUtil.getTaskMetaPath(taskIdName);
    if (!dfs.exists(new Path(metaFileName))) {
        LOG.error("[COMPACTOR]: meta file " + metaFileName + " is not existed");
        return;
    }

    // compact data
    // 1. create rock data file
    String rockIdStr = null;

    // 2. create rock index file

    // 3. load meta file
    TaskMetaReader rocksMeta = new TaskMetaReader();
    rocksMeta.open(metaFileName);
    Map<String, byte[]> rocks = rocksMeta.getRocks();
    rocksMeta.close();
    // 4. compact rock files one by one
    /*
     * for(rock:rocks){ load rock gb from db; load rock gb from delete file
     * sort gb by offset copy chunks to new data file, and write new index,
     * if offset is in gb set, drop and continue }
     */
    Map<Long, Long> gbIndexes = null;
    RockIndexReader rockIndexReader = null;
    RockReader rockReader = null;
    Chunk chunk = null;

    // create rock writer
    RockCompactWriter rockWriter = new RockCompactWriter();
    rockWriter.create(taskIdName);

    rockIdStr = rockWriter.getRockID();

    String dataFileName = pathUtil.getTaskDataPath(taskIdName, rockIdStr);

    String gbIndexPath = null;

    long pos = 0;
    Long size = null;

    for (Entry<String, byte[]> entry : rocks.entrySet()) {
        LOG.info("compacting rock :" + entry.getKey());
    }

    for (Entry<String, byte[]> entry : rocks.entrySet()) {
        gbIndexes = RockDB.getGarbages(entry.getValue());
        rockIndexReader = new RockIndexReader();
        LOG.debug("get " + gbIndexes.size() + " invalid chunks of rock " + entry.getKey() + " from chunk DB");
        gbIndexPath = pathUtil.getGbMetaPath(entry.getKey());
        if (dfs.exists(new Path(gbIndexPath))) {
            rockIndexReader.open(gbIndexPath);

            // merge gb data index
            while (rockIndexReader.hasNext()) {
                chunk = rockIndexReader.next();
                LOG.debug("ignore list append chunk: " + chunk);
                gbIndexes.put(chunk.getOffset(), chunk.getSize() + Chunk.HEADER_LEN);
            }

            rockIndexReader.close();
        }

        // copy chunks and write new index
        rockReader = RockReaderPool.getInstance().get(entry.getKey());
        FSDataInputStream input = rockReader.getFSDataInputStream();
        int pedding_bytes = 0;
        while (rockReader.hasNext()) {
            pos = rockReader.getPos();
            pedding_bytes = (int) (pos & 7);
            if (pedding_bytes != 0) {
                pos = pos + 8 - pedding_bytes;
            }

            // LOG.info("pos now: "+pos);

            size = gbIndexes.get(pos);

            // ignore deleted chunk
            if (size != null) {
                LOG.debug("ignore chunk at " + pos + ", size: " + size);

                rockReader.seekg(pos + size);
                continue;
            }

            chunk = rockReader.nextChunk();
            if (chunk == null) {
                LOG.error("[Compactor] read source chunk from " + entry.getKey() + ":" + pos + " Failed");
                throw new IOException(
                        "[Compactor] read source chunk from " + entry.getKey() + ":" + pos + " Failed");
            }

            rockWriter.addChunk(chunk, input);
        }
    }

    rockWriter.close();

    // 5. rename ${compactorDir}/rockId.dat ==> $(rock_data_dir)/rockId
    dfs.rename(new Path(dataFileName), new Path(Rock.HADOOP_DATA_HOME + "/" + rockIdStr));

    // 6. remove invalid chunks
    removeInvalidChunks(taskIdName);

    // 7. sync left chunks
    syncLeftChunks(taskIdName);

    // 8. remove task dir
    dfs.delete(dstDir, true);
}

From source file:com.savy3.nonequijoin.MapOutputSampler.java

License:Apache License

/**
 * Driver for InputSampler MapReduce Job
 *///from  w w  w.  j  av a  2 s .co m
public static void runMap(Job job, Path sampleInputPath)
        throws IOException, IllegalStateException, ClassNotFoundException, InterruptedException {
    LOG.info("Running a MapReduce Job on Sample Input File" + sampleInputPath.toString());

    Configuration conf = new Configuration();
    conf.setBoolean("mapreduce.job.ubertask.enable", true);
    conf.set("numSamples", "" + (job.getNumReduceTasks() - 1));
    Job sampleJob = new Job(conf);
    sampleJob.setMapperClass(job.getMapperClass());
    sampleJob.setReducerClass(SampleKeyReducer.class);
    sampleJob.setJarByClass(job.getMapperClass());
    sampleJob.setMapOutputKeyClass(job.getMapOutputKeyClass());
    sampleJob.setMapOutputValueClass(job.getMapOutputValueClass());
    sampleJob.setOutputKeyClass(job.getMapOutputKeyClass());
    sampleJob.setOutputValueClass(NullWritable.class);
    sampleJob.setInputFormatClass(SequenceFileInputFormat.class);
    sampleJob.setOutputFormatClass(SequenceFileOutputFormat.class);

    SequenceFileInputFormat.addInputPath(sampleJob, sampleInputPath);
    FileSystem fs = FileSystem.get(conf);

    Path out = new Path(sampleInputPath.getParent(), "mapOut");
    fs.delete(out, true);

    SequenceFileOutputFormat.setOutputPath(sampleJob, out);

    sampleJob.waitForCompletion(true);

    LOG.info("Sample MapReduce Job Output File" + out.toString());

    Path partFile = new Path(out, "part-r-00000");
    Path tmpFile = new Path("/_tmp");
    fs.delete(tmpFile, true);
    fs.rename(partFile, tmpFile);
    fs.delete(sampleInputPath.getParent(), true);
    fs.rename(new Path("/_tmp"), sampleInputPath.getParent());

    LOG.info("Sample partitioning file cpied to location " + sampleInputPath.getParent().toString());
}

From source file:com.splicemachine.derby.impl.io.HdfsDirFile.java

License:Apache License

@Override
public boolean renameTo(StorageFile newName) {
    try {// www .  jav  a 2s  . c  om
        FileSystem fs = getFileSystem();
        boolean renameResult = fs.rename(new Path(path), new Path(newName.getPath()));
        if (renameResult) {
            this.path = newName.getPath();
        }
        return renameResult;
    } catch (IOException e) {
        LOG.error(String.format("An exception occurred while making directories in the path '%s'.", path), e);
        return false;
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();//from   w w  w.j  a  va2  s .  c  o m
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs((PrivilegedExceptionAction<Void>) () -> {
                Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                LOG.info("Working on file: " + workingFile);

                // Create empty file if configured
                if (actions.taskType == TaskType.CREATE_EMPTY_FILE) {
                    ensureDirectoryExists(fs, workingFile.getParent());
                    if (!fs.createNewFile(workingFile)) {
                        throw new IOException("Can't create file (probably already exists): " + workingFile);
                    }
                }

                if (actions.taskType == TaskType.CHANGE_EXISTING_FILE
                        && (actions.shouldMoveFile || actions.shouldRename)) {
                    Path newPath = workingFile.getParent();
                    String newName = workingFile.getName();
                    if (actions.shouldMoveFile) {
                        newPath = new Path(evaluate(variables, "newLocation", actions.newLocation));
                    }
                    if (actions.shouldRename) {
                        newName = evaluate(variables, "newName", actions.newName);
                    }

                    Path destinationFile = new Path(newPath, newName);
                    ensureDirectoryExists(fs, newPath);

                    LOG.debug("Renaming to: {}", destinationFile);
                    if (!fs.rename(workingFile, destinationFile)) {
                        throw new IOException(
                                Utils.format("Can't rename '{}' to '{}''", workingFile, destinationFile));
                    }
                    workingFile = destinationFile;
                }

                if (actions.taskType.isOneOf(TaskType.CHANGE_EXISTING_FILE, TaskType.CREATE_EMPTY_FILE)) {
                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = HdfsUtils.parseFsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }
                }

                if (actions.taskType == TaskType.REMOVE_FILE) {
                    fs.delete(workingFile, true);
                }

                // Issue event with the final file name (e.g. the renamed one if applicable)
                actions.taskType.getEventCreator().create(getContext()).with("filepath", workingFile.toString())
                        .with("filename", workingFile.getName()).createAndSend();

                LOG.debug("Done changing metadata on file: {}", workingFile);
                return null;
            });
        } catch (Throwable e) {
            // Hadoop libraries will wrap any non InterruptedException, RuntimeException, Error or IOException to UndeclaredThrowableException,
            // so we manually unwrap it here and properly propagate it to user.
            if (e instanceof UndeclaredThrowableException) {
                e = e.getCause();
            }
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();//from   ww w .j a  v a 2  s  . c o m
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                    LOG.info("Working on file: " + workingFile);

                    if (actions.shouldMoveFile) {
                        Path destinationFile = new Path(
                                evaluate(variables, "newLocation", actions.newLocation));

                        Path destinationParent = destinationFile.getParent();
                        if (!fs.exists(destinationParent)) {
                            LOG.debug("Creating parent directory for destination file: {}", destinationParent);
                            if (!fs.mkdirs(destinationParent)) {
                                throw new IOException("Can't create directory: " + destinationParent);
                            }
                        }

                        LOG.debug("Renaming to: {}", destinationFile);
                        if (!fs.rename(workingFile, destinationFile)) {
                            throw new IOException("Can't rename file to: " + destinationFile);
                        }
                        workingFile = destinationFile;
                    }

                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = new FsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }

                    // Issue event with the final file name (e.g. the renamed one if applicable)
                    EventRecord event = getContext().createEventRecord("file-changed", 1);
                    event.set(Field.create(Field.Type.MAP, new ImmutableMap.Builder<String, Field>()
                            .put("filepath", Field.create(Field.Type.STRING, workingFile.toString())).build()));
                    getContext().toEvent(event);

                    LOG.debug("Done changing metadata on file: {}", workingFile);
                    return null;
                }
            });
        } catch (Exception e) {
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.DefaultFsHelper.java

License:Apache License

@Override
public Path renameAndGetPath(FileSystem fs, Path tempPath) throws IOException, StageException {
    Path finalPath = new Path(tempPath.getParent(),
            (StringUtils.isEmpty(uniquePrefix) ? "" : (uniquePrefix + "_")) + UUID.randomUUID().toString()
                    + recordWriterManager.getExtension());
    if (!fs.rename(tempPath, finalPath)) {
        throw new IOException(Utils.format("Could not rename '{}' to '{}'", tempPath, finalPath));
    }// w w w  .  j a  v  a  2 s .c o m
    // Store closed path so that we can generate event for it later
    closedPaths.add(finalPath);
    return finalPath;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.RecordWriterManager.java

License:Apache License

Path renameToFinalName(FileSystem fs, Path tempPath) throws IOException {
    Path parent = tempPath.getParent();
    Path finalPath = new Path(parent, uniquePrefix + "_" + UUID.randomUUID().toString() + getExtension());
    if (!fs.rename(tempPath, finalPath)) {
        throw new IOException(Utils.format("Could not rename '{}' to '{}'", tempPath, finalPath));
    }/*from  ww w.java  2s .c  o  m*/
    return finalPath;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.WholeFileFormatFsHelper.java

License:Apache License

@Override
public Path renameAndGetPath(FileSystem fs, Path tempPath) throws IOException, StageException {
    Path finalPath = getRenamablePath(fs, tempPath);
    if (!fs.rename(tempPath, finalPath)) {
        throw new IOException(Utils.format("Could not rename '{}' to '{}'", tempPath, finalPath));
    }//from w  w w . j  a va2 s.  co  m

    //updatePermissions
    if (fsPermissions != null) {
        fs.setPermission(finalPath, fsPermissions);
    }

    fsPermissions = null;

    //Throw file copied event here.
    context.toEvent(wholeFileEventRecord);

    return finalPath;
}

From source file:com.streamsets.pipeline.stage.destination.mapreduce.jobtype.avroconvert.AvroConversionBaseMapper.java

License:Apache License

@Override
protected void map(String input, String output, Context context) throws IOException, InterruptedException {
    FileSystem fs = FileSystem.get(context.getConfiguration());
    Configuration conf = context.getConfiguration();

    LOG.info("Converting input file: {}", input);
    LOG.info("Output directory: {}", output);
    Path inputPath = new Path(input);
    Path outputDir = new Path(output);
    fs.mkdirs(outputDir);//w  w w  . j  a v a  2  s  .  c  o  m

    Path tempFile = new Path(outputDir, getTempFilePrefix() + inputPath.getName());
    if (fs.exists(tempFile)) {
        if (conf.getBoolean(AvroConversionCommonConstants.OVERWRITE_TMP_FILE, false)) {
            fs.delete(tempFile, true);
        } else {
            throw new IOException("Temporary file " + tempFile + " already exists.");
        }
    }
    LOG.info("Using temp file: {}", tempFile);

    // Output file is the same as input except of dropping .avro extension if it exists and appending .parquet or .orc
    String outputFileName = inputPath.getName().replaceAll("\\.avro$", "") + getOutputFileSuffix();
    Path finalFile = new Path(outputDir, outputFileName);
    LOG.info("Final path will be: {}", finalFile);

    // Avro reader
    SeekableInput seekableInput = new FsInput(inputPath, conf);
    DatumReader<GenericRecord> reader = new GenericDatumReader<>();
    FileReader<GenericRecord> fileReader = DataFileReader.openReader(seekableInput, reader);
    Schema avroSchema = fileReader.getSchema();

    initializeWriter(tempFile, avroSchema, conf, context);

    LOG.info("Started reading input file");
    long recordCount = 0;
    try {
        while (fileReader.hasNext()) {
            GenericRecord record = fileReader.next();
            handleAvroRecord(record);

            context.getCounter(Counters.PROCESSED_RECORDS).increment(1);
            recordCount++;
        }
    } catch (Exception e) {
        // Various random stuff can happen while converting, so we wrap the underlying exception with more details
        String message = String.format("Exception at offset %d (record %d): %s", fileReader.tell(), recordCount,
                e.toString());
        throw new IOException(message, e);
    }
    LOG.info("Done reading input file");
    closeWriter();

    LOG.info("Moving temporary file {} to final destination {}", tempFile, finalFile);
    fs.rename(tempFile, finalFile);

    if (!context.getConfiguration().getBoolean(AvroConversionCommonConstants.KEEP_INPUT_FILE, false)) {
        LOG.info("Removing input file", inputPath);
        fs.delete(inputPath, true);
    }

    LOG.info("Done converting input file into output directory {}", output);
}