Example usage for org.apache.hadoop.fs Path equals

List of usage examples for org.apache.hadoop.fs Path equals

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path equals.

Prototype

@Override
    public boolean equals(Object o) 

Source Link

Usage

From source file:org.kitesdk.data.spi.filesystem.FileSystemDataset.java

License:Apache License

@SuppressWarnings("unchecked")
public PartitionKey keyFromDirectory(Path dir) {

    Path relDir = null;
    URI relUri = directory.toUri().relativize(dir.toUri());

    if (!relUri.toString().isEmpty()) {
        relDir = new Path(relUri);
        Preconditions.checkState(!relDir.equals(dir),
                "Partition directory %s is not " + "relative to dataset directory %s", dir, directory);
    }/*from  ww  w .  jav a2 s .com*/

    List<String> pathComponents = Lists.newArrayList();
    while (relDir != null && !relDir.getName().equals("")) {
        pathComponents.add(0, relDir.getName());
        relDir = relDir.getParent();
    }

    List<FieldPartitioner> fps = Accessor.getDefault().getFieldPartitioners(partitionStrategy);
    Preconditions.checkState(pathComponents.size() <= fps.size(),
            "Number of components in partition directory %s (%s) exceeds number of field " + "partitioners %s",
            dir, pathComponents, partitionStrategy);

    List<Object> values = Lists.newArrayList();
    for (int i = 0; i < pathComponents.size(); i++) {
        values.add(convert.valueForDirname(fps.get(i), pathComponents.get(i)));
    }

    if (partitionKey != null) {
        values.addAll(0, partitionKey.getValues());
    }

    return new PartitionKey(values.toArray());
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemUtil.java

License:Apache License

/**
 * Replace {@code destination} with {@code replacement}.
 * <p>//from   ww w .  java  2 s.  c om
 * If this method fails in any step, recover using these steps:
 * <ol>
 * <li>If {@code .name.replacement} exists, but {@code name} does not, move
 * it to {@code name}</li>
 * <li>If {@code .name.replacement} and {@code name} exist, run this method
 * again with the same list of additional removals</li>
 * </ol>
 *
 * @param fs the FileSystem
 * @param destination a Path
 * @param replacement a Path that replaces the destination
 * @param removals a List of paths that should also be removed
 */
static void replace(FileSystem fs, Path root, Path destination, Path replacement, List<Path> removals) {
    try {
        // Ensure the destination exists because it acts as a recovery signal. If
        // the directory exists, then recovery must go through the entire
        // replacement process again. If it does not, then the dir can be moved.
        if (!fs.exists(destination)) {
            fs.mkdirs(destination);
        }

        Path staged = new Path(destination.getParent(), "." + destination.getName() + ".replacement");

        // First move into the destination folder to ensure moves work. It is
        // okay to run this method on the staged path
        if (!staged.equals(replacement) && !fs.rename(replacement, staged)) {
            throw new IOException("Failed to rename " + replacement + " to " + staged);
        }

        // Remove any additional directories included in the replacement. This
        // handles the case where there are multiple directories for the same
        // logical partition. For example, dataset/a=2/ and dataset/2/
        for (Path toRemove : removals) {
            if (toRemove.equals(destination)) {
                // destination is deleted last
                continue;
            }
            FileSystemUtil.cleanlyDelete(fs, root, toRemove);
        }

        // remove the directory that will be replaced with a move
        fs.delete(destination, true /* recursively */ );

        // move the replacement to the final location
        if (!fs.rename(staged, destination)) {
            throw new IOException("Failed to rename " + staged + " to " + destination);
        }

    } catch (IOException e) {
        throw new DatasetIOException("Could not replace " + destination + " with " + replacement, e);
    }
}

From source file:org.openflamingo.uploader.util.FileSystemUtils.java

License:Open Source License

/**
 * ?  ? ? ? ?? ?.// ww w.  j a v a2s  .  co m
 *
 * @param path1 1
 * @param path2 2
 */
public static void validateSameFileSystem(String path1, String path2) {
    Path p1 = new Path(correctPath(path1));
    Path p2 = new Path(correctPath(path2));
    FileSystem fs1 = null;
    FileSystem fs2 = null;
    try {
        fs1 = p1.getFileSystem(new Configuration());
        fs2 = p2.getFileSystem(new Configuration());
    } catch (Exception ex) {
        throw new SystemException(ExceptionUtils
                .getMessage("'{}' ? '{}' ? ?   .", p1, p2), ex);
    }

    if (!compareFs(fs1, fs2)) {
        throw new SystemException(ExceptionUtils.getMessage(
                " ?  ?? ? ? . ??  ?  ? ?? ? ?   : {}, {}",
                p1, p2));
    }

    if (p1.equals(p2)) {
        throw new SystemException(
                ExceptionUtils.getMessage(" ?  ?? : {}, {}", p1, p2));
    }
}

From source file:org.openflamingo.util.FileSystemUtils.java

License:Apache License

/**
 * ?  ? ? ? ?? ?.//from w w  w.j  a va2  s.co  m
 *
 * @param path1 1
 * @param path2 2
 */
public static void validateSameFileSystem(String path1, String path2) {
    Path p1 = new Path(correctPath(path1));
    Path p2 = new Path(correctPath(path2));
    FileSystem fs1 = null;
    FileSystem fs2 = null;
    try {
        fs1 = p1.getFileSystem(new Configuration());
        fs2 = p2.getFileSystem(new Configuration());
    } catch (Exception ex) {
        throw new FileSystemException(ExceptionUtils.getMessage("Cannot access '{}' or '{}'.", p1, p2), ex);
    }

    if (!compareFs(fs1, fs2)) {
        throw new FileSystemException(ExceptionUtils.getMessage("File system is not same : {}, {}", p1, p2));
    }

    if (p1.equals(p2)) {
        throw new FileSystemException(ExceptionUtils.getMessage("Same path : {}, {}", p1, p2));
    }
}

From source file:org.pentaho.hadoop.shim.common.format.ReadFileFilter.java

License:Apache License

@Override
public boolean accept(Path path) {
    String requiredDir = conf.get(FILTER_DIR);
    if (requiredDir == null) {
        throw new RuntimeException("Required dir not defined");
    }//w  w w. j av a  2 s.c o m
    String requiredFile = conf.get(FILTER_FILE);
    if (requiredFile == null) {
        throw new RuntimeException("Required file not defined");
    }
    return path.equals(new Path(requiredDir)) || path.equals(new Path(requiredFile));
}

From source file:org.seqdoop.hadoop_bam.BAMInputFormat.java

License:Open Source License

private int addIndexedSplits(List<InputSplit> splits, int i, List<InputSplit> newSplits, Configuration cfg)
        throws IOException {
    final Path file = ((FileSplit) splits.get(i)).getPath();
    List<InputSplit> potentialSplits = new ArrayList<InputSplit>();

    final SplittingBAMIndex idx = new SplittingBAMIndex(file.getFileSystem(cfg).open(getIdxPath(file)));

    int splitsEnd = splits.size();
    for (int j = i; j < splitsEnd; ++j)
        if (!file.equals(((FileSplit) splits.get(j)).getPath()))
            splitsEnd = j;//from ww w. j  av a 2 s.c  o m

    for (int j = i; j < splitsEnd; ++j) {
        final FileSplit fileSplit = (FileSplit) splits.get(j);

        final long start = fileSplit.getStart();
        final long end = start + fileSplit.getLength();

        final Long blockStart = idx.nextAlignment(start);

        // The last split needs to end where the last alignment ends, but the
        // index doesn't store that data (whoops); we only know where the last
        // alignment begins. Fortunately there's no need to change the index
        // format for this: we can just set the end to the maximal length of
        // the final BGZF block (0xffff), and then read until BAMRecordCodec
        // hits EOF.
        Long blockEnd;
        if (j == splitsEnd - 1) {
            blockEnd = idx.prevAlignment(end) | 0xffff;
        } else {
            blockEnd = idx.nextAlignment(end);
        }

        if (blockStart == null || blockEnd == null) {
            System.err.println("Warning: index for " + file.toString()
                    + " was not good. Generating probabilistic splits.");

            return addProbabilisticSplits(splits, i, newSplits, cfg);
        }

        potentialSplits.add(new FileVirtualSplit(file, blockStart, blockEnd, fileSplit.getLocations()));
    }

    for (InputSplit s : potentialSplits) {
        newSplits.add(s);
    }
    return splitsEnd;
}

From source file:uk.bl.wa.hadoop.mapreduce.hash.MessageDigestMapper.java

License:Open Source License

@Override
protected void map(Path key, BytesWritable value, Mapper<Path, BytesWritable, Text, Text>.Context context)
        throws IOException, InterruptedException {
    if (!key.equals(current)) {
        // Extract and emit:
        this.emit(context);
        // Set up a new one:
        current = key;//  w  w  w. j a  va  2  s  .co  m
        bytes_seen = 0;
        md.reset();
        log.info("Hashing " + current);
    }
    md.update(value.getBytes(), 0, value.getLength());
    bytes_seen += value.getLength();
}