Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

private void createParent(Path path) throws IOException {
    Path parent = path.getParent();
    if (parent != null) {
        String key = pathToKey(makeAbsolute(parent));
        if (key.length() > 0) {
            store.storeEmptyFile(key + PATH_DELIMITER);
        }/*from  ww w  . jav a  2 s  .co m*/
    }
}

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

@Override
public boolean rename(Path src, Path dst) throws IOException {

    String srcKey = pathToKey(makeAbsolute(src));

    if (srcKey.length() == 0) {
        // Cannot rename root of file system
        return false;
    }//  w w  w.j  a  va2 s  .  c  o m

    final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";

    // Figure out the final destination
    String dstKey;
    try {
        boolean dstIsFile = !getFileStatus(dst).isDir();
        if (dstIsFile) {
            LOG.debug(debugPreamble + "returning false as dst is an already " + "existing file");
            // If dst is not a directory
            throw new FileAlreadyExistsException(
                    String.format("Failed to rename %s to %s, file already exists!", src, dst));
        } else {
            LOG.debug(debugPreamble + "using dst as output directory");
            dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
        }
    } catch (FileNotFoundException e) {
        LOG.debug(debugPreamble + "using dst as output destination");
        dstKey = pathToKey(makeAbsolute(dst));
        try {
            if (!getFileStatus(dst.getParent()).isDir()) {
                LOG.debug(debugPreamble + "returning false as dst parent exists and " + "is a file");
                return false;
            }
        } catch (FileNotFoundException ex) {
            LOG.debug(debugPreamble + "returning false as dst parent does not exist");
            throw ex;
        }
    }

    boolean srcIsFile;
    try {
        srcIsFile = !getFileStatus(src).isDir();
    } catch (FileNotFoundException e) {
        LOG.debug(debugPreamble + "returning false as src does not exist");
        throw e;
    }
    if (srcIsFile) {
        LOG.debug(debugPreamble + "src is file, so doing copy then delete in Oss");
        store.copy(srcKey, dstKey);
        store.delete(srcKey);
    } else {
        LOG.debug(debugPreamble + "src is directory, so copying contents");
        store.storeEmptyFile(dstKey + PATH_DELIMITER);

        List<String> keysToDelete = new ArrayList<String>();
        String priorLastKey = null;
        do {
            PartialListing listing = store.list(srcKey, OSS_MAX_LISTING_LENGTH, priorLastKey, true);
            for (FileMetadata file : listing.getFiles()) {
                keysToDelete.add(file.getKey());
                store.copy(file.getKey(), dstKey + file.getKey().substring(srcKey.length()));
            }
            priorLastKey = listing.getPriorLastKey();
        } while (priorLastKey != null);

        LOG.debug(debugPreamble + "all files in src copied, now removing " + "src files");
        for (String key : keysToDelete) {
            store.delete(key);
        }

        try {
            store.delete(srcKey + FOLDER_SUFFIX);
        } catch (FileNotFoundException e) {
            //this is fine, we don't require a marker
        }
        LOG.debug(debugPreamble + "done");
    }

    return true;
}

From source file:com.aliyun.odps.fs.VolumeFileSystem.java

License:Apache License

@Override
public boolean rename(Path src, Path dst) throws IOException {
    statistics.incrementWriteOps(1);//from  w  w  w  .  java 2s  .c  o  m
    Path absSrc = fixRelativePart(src);
    Path absDst = fixRelativePart(dst);
    if (!exists(absSrc)) {
        throw new FileNotFoundException("Source path " + src + " does not exist");
    }
    if (isDirectory(absDst)) {
        // destination is a directory: rename goes underneath it with the
        // source name
        absDst = new Path(absDst, absSrc.getName());
    }
    if (exists(absDst)) {
        throw new FileAlreadyExistsException("Destination path " + dst + " already exists");
    }
    if (absDst.getParent() != null && !exists(absDst.getParent())) {
        throw new FileNotFoundException(
                VolumeFSErrorMessageGenerator.noSuchFileOrDirectory(absDst.getParent().toString()));
    }

    if (VolumeFSUtil.isParentOf(absSrc, absDst)) {
        throw new IOException("Cannot rename " + absSrc + " under itself" + " : " + absDst);
    }
    String srcPath = getPathName(absSrc);
    String dstPath = getPathName(absDst);
    try {
        return volumeClient.rename(srcPath, dstPath);
    } catch (VolumeException e) {
        logException(e);
        throw wrapExceptions(srcPath, e);
    }
}

From source file:com.architecting.ch07.MapReduceIndexerTool.java

License:Apache License

/** API for Java clients;visible for testing;may become a public API eventually */
int run(Options options) throws Exception {
    if (getConf().getBoolean("isMR1", false) && "local".equals(getConf().get("mapred.job.tracker"))) {
        throw new IllegalStateException(
                "Running with LocalJobRunner (i.e. all of Hadoop inside a single JVM) is not supported "
                        + "because LocalJobRunner does not (yet) implement the Hadoop Distributed Cache feature, "
                        + "which is required for passing files via --files and --libjars");
    }//  w  ww .  ja  va  2  s .  co m

    long programStartTime = System.nanoTime();
    getConf().setInt(SolrOutputFormat.SOLR_RECORD_WRITER_MAX_SEGMENTS, options.maxSegments);

    // switch off a false warning about allegedly not implementing Tool
    // also see http://hadoop.6.n7.nabble.com/GenericOptionsParser-warning-td8103.html
    // also see https://issues.apache.org/jira/browse/HADOOP-8183
    getConf().setBoolean("mapred.used.genericoptionsparser", true);

    if (options.log4jConfigFile != null) {
        Utils.setLogConfigFile(options.log4jConfigFile, getConf());
        addDistributedCacheFile(options.log4jConfigFile, getConf());
    }

    Configuration config = HBaseConfiguration.create();
    Job job = Job.getInstance(config);
    job.setJarByClass(getClass());

    // To be able to run this example from eclipse, we need to make sure 
    // the built jar is distributed to the map-reduce tasks from the
    // local file system.
    job.addCacheArchive(new URI("file:///home/cloudera/ahae/target/ahae.jar"));

    FileSystem fs = options.outputDir.getFileSystem(job.getConfiguration());
    if (fs.exists(options.outputDir) && !delete(options.outputDir, true, fs)) {
        return -1;
    }
    Path outputResultsDir = new Path(options.outputDir, RESULTS_DIR);
    Path outputReduceDir = new Path(options.outputDir, "reducers");

    int reducers = 1;

    Scan scan = new Scan();
    scan.addFamily(CF);
    // tag::SETUP[]
    scan.setCaching(500); // <1>
    scan.setCacheBlocks(false); // <2>

    TableMapReduceUtil.initTableMapperJob( // <3>
            options.inputTable, // Input HBase table name
            scan, // Scan instance to control what to index
            HBaseAvroToSOLRMapper.class, // Mapper to parse cells content.
            Text.class, // Mapper output key
            SolrInputDocumentWritable.class, // Mapper output value
            job);

    FileOutputFormat.setOutputPath(job, outputReduceDir);

    job.setJobName(getClass().getName() + "/" + Utils.getShortClassName(HBaseAvroToSOLRMapper.class));
    job.setReducerClass(SolrReducer.class); // <4>
    job.setPartitionerClass(SolrCloudPartitioner.class); // <5>
    job.getConfiguration().set(SolrCloudPartitioner.ZKHOST, options.zkHost);
    job.getConfiguration().set(SolrCloudPartitioner.COLLECTION, options.collection);
    job.getConfiguration().setInt(SolrCloudPartitioner.SHARDS, options.shards);

    job.setOutputFormatClass(SolrOutputFormat.class);
    SolrOutputFormat.setupSolrHomeCache(options.solrHomeDir, job);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(SolrInputDocumentWritable.class);
    job.setSpeculativeExecution(false);
    // end::SETUP[]
    job.setNumReduceTasks(reducers); // Set the number of reducers based on the number of shards we have.
    if (!waitForCompletion(job, true)) {
        return -1;// job failed
    }

    // -------------------------------------------------------------------------------------------------------------------------------------

    assert reducers == options.shards;

    // normalize output shard dir prefix, i.e.
    // rename part-r-00000 to part-00000 (stems from zero tree merge iterations)
    // rename part-m-00000 to part-00000 (stems from > 0 tree merge iterations)
    for (FileStatus stats : fs.listStatus(outputReduceDir)) {
        String dirPrefix = SolrOutputFormat.getOutputName(job);
        Path srcPath = stats.getPath();
        if (stats.isDirectory() && srcPath.getName().startsWith(dirPrefix)) {
            String dstName = dirPrefix + srcPath.getName().substring(dirPrefix.length() + "-m".length());
            Path dstPath = new Path(srcPath.getParent(), dstName);
            if (!rename(srcPath, dstPath, fs)) {
                return -1;
            }
        }
    }
    ;

    // publish results dir
    if (!rename(outputReduceDir, outputResultsDir, fs)) {
        return -1;
    }

    if (options.goLive && !new GoLive().goLive(options, listSortedOutputShardDirs(job, outputResultsDir, fs))) {
        return -1;
    }

    goodbye(job, programStartTime);
    return 0;
}

From source file:com.asakusafw.lang.compiler.extension.testdriver.InternalExporterRetriever.java

License:Apache License

@Override
public void truncate(InternalExporterDescription description, TestContext context) throws IOException {
    LOG.debug("deleting output directory: {}", description); //$NON-NLS-1$
    VariableTable variables = createVariables(context);
    Configuration config = configurations.newInstance();
    FileSystem fs = FileSystem.get(config);
    String resolved = variables.parse(description.getPathPrefix(), false);
    Path path = new Path(resolved);
    Path output = path.getParent();
    Path target;// w  w w  .  j av  a2s. c o m
    if (output == null) {
        LOG.warn(MessageFormat.format("skipped deleting output directory because it is a base directory: {0}",
                path));
        target = fs.makeQualified(path);
    } else {
        LOG.debug("output directory will be deleted: {}", output); //$NON-NLS-1$
        target = fs.makeQualified(output);
    }
    LOG.debug("deleting output target: {}", target); //$NON-NLS-1$
    try {
        FileStatus[] stats = fs.globStatus(path);
        for (FileStatus s : stats) {
            Path f = s.getPath();
            boolean deleted = fs.delete(f, true);
            LOG.debug("deleted output target (succeed={}): {}", deleted, f); //$NON-NLS-1$
        }
    } catch (IOException e) {
        LOG.debug("exception in truncate", e);
    }
}

From source file:com.asakusafw.operation.tools.directio.file.AbstractFileCopyCommand.java

License:Apache License

@Override
public void run() {
    LOG.debug("starting {}", getClass().getSimpleName());

    if (paths.size() < 2) {
        throw new CommandConfigurationException("source and destination files must be specified");
    }//ww  w . ja  va2s .  c  om
    List<DirectIoPath> sources = getSources();
    LOG.debug("source: {}", sources);

    Path destination = getDestination();
    LOG.debug("destination: {}", destination);

    List<ResourceInfo> files = sources.stream().flatMap(it -> {
        List<ResourceInfo> list = FileListCommand.list(it);
        if (list.isEmpty()) {
            throw new CommandConfigurationException(
                    MessageFormat.format("there are no files to copy: {0}", it));
        }
        return list.stream();
    }).collect(Collectors.toList());

    validate(files, destination);
    Optional<FileStatus> stat = stat(destination);

    if (stat.filter(it -> it.isDirectory()).isPresent()) {
        copyOnto(files, destination);
    } else if (stat.filter(it -> it.isDirectory() == false).isPresent()
            && overwriteParameter.isEnabled() == false) {
        throw new CommandConfigurationException(
                MessageFormat.format("destination file already exists: {0}", destination));
    } else {
        Path parent = Optional.ofNullable(destination.getParent())
                .orElseThrow(() -> new IllegalStateException(destination.toString()));
        if (stat(parent).filter(it -> it.isDirectory()).isPresent()) {
            if (sources.size() >= 2) {
                throw new CommandConfigurationException(MessageFormat.format("copy source is ambiguous: {0}",
                        sources.stream().map(String::valueOf).collect(Collectors.joining(", "))));
            }
            copyTo(files.get(0), destination);
        } else {
            throw new CommandConfigurationException(
                    MessageFormat.format("destination directory does not exist: {0}", parent));
        }
    }
}

From source file:com.asakusafw.operation.tools.directio.file.FilePutCommand.java

License:Apache License

@Override
public void run() {
    LOG.debug("starting {}", getClass().getSimpleName());

    if (paths.size() < 2) {
        throw new CommandConfigurationException("source and destination files must be specified");
    }/*from w  w w.  j a  v a2s .c om*/
    List<java.nio.file.Path> sources = getSources();
    LOG.debug("source: {}", sources);

    org.apache.hadoop.fs.Path destination = getDestination();
    LOG.debug("destination: {}", destination);

    Optional<org.apache.hadoop.fs.FileStatus> stat = stat(destination);
    if (stat.filter(it -> it.isDirectory()).isPresent()) {
        copyOnto(sources, destination);
    } else if (stat.filter(it -> it.isDirectory() == false).isPresent()
            && overwriteParameter.isEnabled() == false) {
        throw new CommandConfigurationException(
                MessageFormat.format("destination file already exists: {0}", destination));
    } else {
        Path parent = Optional.ofNullable(destination.getParent())
                .orElseThrow(() -> new IllegalStateException(destination.toString()));
        if (stat(parent).filter(it -> it.isDirectory()).isPresent()) {
            if (sources.size() >= 2) {
                throw new CommandConfigurationException(MessageFormat.format("copy source is ambiguous: {0}",
                        sources.stream().map(String::valueOf).collect(Collectors.joining(", "))));
            }
            copyTo(sources.get(0), destination);
        } else {
            throw new CommandConfigurationException(
                    MessageFormat.format("destination directory does not exist: {0}", parent));
        }
    }
}

From source file:com.asakusafw.operation.tools.directio.file.Util.java

License:Apache License

private static org.apache.hadoop.fs.Path normalize(org.apache.hadoop.fs.Path path) {
    if (path.getName().isEmpty()) {
        return Optional.ofNullable(path.getParent()).orElse(path);
    }/*from w  w w  .  ja v a2  s .  c  om*/
    return path;
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java

License:Apache License

private static void move(Counter counter, FileSystem fromFs, Path from, FileSystem toFs, Path to,
        boolean fromLocal) throws IOException {
    if (counter == null) {
        throw new IllegalArgumentException("counter must not be null"); //$NON-NLS-1$
    }//from   www.j  av  a2s  . c  o  m
    if (fromFs == null) {
        throw new IllegalArgumentException("fromFs must not be null"); //$NON-NLS-1$
    }
    if (from == null) {
        throw new IllegalArgumentException("from must not be null"); //$NON-NLS-1$
    }
    if (toFs == null) {
        throw new IllegalArgumentException("toFs must not be null"); //$NON-NLS-1$
    }
    if (to == null) {
        throw new IllegalArgumentException("to must not be null"); //$NON-NLS-1$
    }
    if (fromLocal && isLocalPath(from) == false) {
        throw new IllegalArgumentException("from must be on local file system"); //$NON-NLS-1$
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Start moving files (from={0}, to={1})", //$NON-NLS-1$
                from, to));
    }
    Path source = fromFs.makeQualified(from);
    Path target = toFs.makeQualified(to);
    List<Path> list = createFileListRelative(counter, fromFs, source);
    if (list.isEmpty()) {
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Process moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$
                from, to, list.size()));
    }
    Set<Path> directoryCreated = new HashSet<>();
    for (Path path : list) {
        Path sourceFile = new Path(source, path);
        Path targetFile = new Path(target, path);
        if (LOG.isTraceEnabled()) {
            FileStatus stat = fromFs.getFileStatus(sourceFile);
            LOG.trace(MessageFormat.format("Moving file (from={0}, to={1}, size={2})", //$NON-NLS-1$
                    sourceFile, targetFile, stat.getLen()));
        }
        try {
            FileStatus stat = toFs.getFileStatus(targetFile);
            if (LOG.isDebugEnabled()) {
                LOG.debug(MessageFormat.format("Deleting file: {0}", //$NON-NLS-1$
                        targetFile));
            }
            if (FileSystemCompatibility.isDirectory(stat)) {
                toFs.delete(targetFile, true);
            } else {
                toFs.delete(targetFile, false);
            }
        } catch (FileNotFoundException e) {
            Path targetParent = targetFile.getParent();
            if (directoryCreated.contains(targetParent) == false) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug(MessageFormat.format("Creating directory: {0}", //$NON-NLS-1$
                            targetParent));
                }
                toFs.mkdirs(targetParent);
                directoryCreated.add(targetParent);
            }
        }
        counter.add(1);
        if (fromLocal) {
            toFs.moveFromLocalFile(sourceFile, targetFile);
        } else {
            boolean succeed = toFs.rename(sourceFile, targetFile);
            if (succeed == false) {
                throw new IOException(
                        MessageFormat.format("Failed to move file (from={0}, to={1})", sourceFile, targetFile));
            }
        }
        counter.add(1);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Finish moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$
                from, to, list.size()));
    }
}

From source file:com.asakusafw.runtime.util.cache.HadoopFileCacheRepository.java

License:Apache License

private Path computeCachePath(Path file) {
    assert repository != null;
    String directoryName;/*  w ww. j a  v a 2 s  .  c o  m*/
    Path parent = file.getParent();
    if (parent == null) {
        directoryName = String.format("%08x", 0); //$NON-NLS-1$
    } else {
        directoryName = String.format("%08x", parent.toString().hashCode()); //$NON-NLS-1$
    }
    Path directory = new Path(repository, directoryName);
    Path target = new Path(directory, file.getName());
    return target;
}