Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:org.dutir.lucene.io.HadoopPlugin.java

License:Mozilla Public License

public void initialise() throws Exception {
    config = getGlobalConfiguration();//www  .  j a va 2  s .c o m

    final org.apache.hadoop.fs.FileSystem DFS = hadoopFS = org.apache.hadoop.fs.FileSystem.get(config);

    FileSystem terrierDFS = new FileSystem() {
        public String name() {
            return "hdfs";
        }

        /** capabilities of the filesystem */
        public byte capabilities() {
            return FSCapability.READ | FSCapability.WRITE | FSCapability.RANDOM_READ | FSCapability.STAT
                    | FSCapability.DEL_ON_EXIT | FSCapability.LS_DIR;
        }

        public String[] schemes() {
            return new String[] { "dfs", "hdfs" };
        }

        /** returns true if the path exists */
        public boolean exists(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Checking that " + filename + " exists answer=" + DFS.exists(new Path(filename)));
            return DFS.exists(new Path(filename));
        }

        /** open a file of given filename for reading */
        public InputStream openFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Opening " + filename);
            return DFS.open(new Path(filename));
        }

        /** open a file of given filename for writing */
        public OutputStream writeFileStream(String filename) throws IOException {
            if (logger.isDebugEnabled())
                logger.debug("Creating " + filename);
            return DFS.create(new Path(filename));
        }

        public boolean mkdir(String filename) throws IOException {
            return DFS.mkdirs(new Path(filename));
        }

        public RandomDataOutput writeFileRandom(String filename) throws IOException {
            throw new IOException("HDFS does not support random writing");
        }

        public RandomDataInput openFileRandom(String filename) throws IOException {
            return new HadoopFSRandomAccessFile(DFS, filename);
        }

        public boolean delete(String filename) throws IOException {
            return DFS.delete(new Path(filename), true);
        }

        public boolean deleteOnExit(String filename) throws IOException {
            return DFS.deleteOnExit(new Path(filename));
        }

        public String[] list(String path) throws IOException {
            final FileStatus[] contents = DFS.listStatus(new Path(path));
            final String[] names = new String[contents.length];
            for (int i = 0; i < contents.length; i++) {
                names[i] = contents[i].getPath().getName();
            }
            return names;
        }

        public String getParent(String path) throws IOException {
            return new Path(path).getParent().getName();
        }

        public boolean rename(String source, String destination) throws IOException {
            return DFS.rename(new Path(source), new Path(destination));
        }

        public boolean isDirectory(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).isDir();
        }

        public long length(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getLen();
        }

        public boolean canWrite(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.WRITE);
        }

        public boolean canRead(String path) throws IOException {
            return DFS.getFileStatus(new Path(path)).getPermission().getUserAction().implies(FsAction.READ);
        }
    };
    Files.addFileSystemCapability(terrierDFS);
}

From source file:org.exem.flamingo.shared.util.HdfsUtils.java

License:Apache License

/**
 *  ?   ?  ??./*from  w  w w  .  jav  a2  s. co  m*/
 *
 * @param source ?? 
 * @param target ?? 
 * @param fs     Hadoop FileSystem
 */
public static void move(String source, String target, FileSystem fs) throws Exception {
    Path srcPath = new Path(source);
    Path[] srcs = FileUtil.stat2Paths(fs.globStatus(srcPath), srcPath);
    Path dst = new Path(target);
    if (srcs.length > 1 && !fs.getFileStatus(dst).isDirectory()) {
        throw new ServiceException("When moving multiple files, destination should be a directory.");
    }
    for (int i = 0; i < srcs.length; i++) {
        if (!fs.rename(srcs[i], dst)) {
            FileStatus srcFstatus = null;
            FileStatus dstFstatus = null;
            try {
                srcFstatus = fs.getFileStatus(srcs[i]);
            } catch (FileNotFoundException e) {
                throw new FileNotFoundException(srcs[i] + ": No such file or directory");
            }
            try {
                dstFstatus = fs.getFileStatus(dst);
            } catch (IOException e) {
                // Nothing
            }
            if ((srcFstatus != null) && (dstFstatus != null)) {
                if (srcFstatus.isDirectory() && !dstFstatus.isDirectory()) {
                    throw new ServiceException(
                            "cannot overwrite non directory " + dst + " with directory " + srcs[i]);
                }
            }
            throw new ServiceException("Failed to rename " + srcs[i] + " to " + dst);
        }
    }
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public void hdfsrename(REXP rexp0) throws Exception {
    REXP spaths = rexp0.getRexpValue(0);
    REXP tpaths = rexp0.getRexpValue(1);
    int np = spaths.getRexpValueCount();
    FileSystem fs = FileSystem.get(getConf());
    for (int i = 0; i < np; i++) {
        String s = spaths.getStringValue(1).getStrval();
        String t = tpaths.getStringValue(1).getStrval();
        Path dstPath = new Path(t);
        Path srcPath = new Path(s);
        if (!fs.rename(srcPath, dstPath))
            throw new Exception("Error renaming " + s);
    }/*w ww.j a va  2s. c o m*/
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemUtil.java

License:Apache License

static List<Pair<Path, Path>> stageMove(FileSystem fs, Path src, Path dest, String ext) {
    List<Pair<Path, Path>> staged;

    try {/*from  ww  w  .j  a  v a2 s. c  o m*/
        // make sure the destination exists
        if (!fs.exists(dest)) {
            fs.mkdirs(dest);
        }

        FileStatus[] stats = fs.listStatus(src);
        staged = Lists.newArrayList();

        for (FileStatus stat : stats) {
            if (stat.isDir()) {
                continue;
            }

            Path srcFile = stat.getPath();
            Path dotFile = new Path(dest, "." + srcFile.getName() + "." + ext);
            Path destFile = new Path(dest, srcFile.getName());

            if (fs.rename(srcFile, dotFile)) {
                staged.add(Pair.of(dotFile, destFile));
            } else {
                throw new IOException("Failed to rename " + srcFile + " to " + dotFile);
            }
        }

    } catch (IOException e) {
        throw new DatasetIOException("Could not move contents of " + src + " to " + dest, e);
    }

    return staged;
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemUtil.java

License:Apache License

static void finishMove(FileSystem fs, List<Pair<Path, Path>> staged) {
    try {//from  w ww  . ja  v  a 2 s.  c om
        for (Pair<Path, Path> pair : staged) {
            if (!fs.rename(pair.first(), pair.second())) {
                throw new IOException("Failed to rename " + pair.first() + " to " + pair.second());
            }
        }
    } catch (IOException e) {
        throw new DatasetIOException("Could not finish replacement", e);
    }
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemUtil.java

License:Apache License

/**
 * Replace {@code destination} with {@code replacement}.
 * <p>//from w ww.java  2  s  . co  m
 * If this method fails in any step, recover using these steps:
 * <ol>
 * <li>If {@code .name.replacement} exists, but {@code name} does not, move
 * it to {@code name}</li>
 * <li>If {@code .name.replacement} and {@code name} exist, run this method
 * again with the same list of additional removals</li>
 * </ol>
 *
 * @param fs the FileSystem
 * @param destination a Path
 * @param replacement a Path that replaces the destination
 * @param removals a List of paths that should also be removed
 */
static void replace(FileSystem fs, Path root, Path destination, Path replacement, List<Path> removals) {
    try {
        // Ensure the destination exists because it acts as a recovery signal. If
        // the directory exists, then recovery must go through the entire
        // replacement process again. If it does not, then the dir can be moved.
        if (!fs.exists(destination)) {
            fs.mkdirs(destination);
        }

        Path staged = new Path(destination.getParent(), "." + destination.getName() + ".replacement");

        // First move into the destination folder to ensure moves work. It is
        // okay to run this method on the staged path
        if (!staged.equals(replacement) && !fs.rename(replacement, staged)) {
            throw new IOException("Failed to rename " + replacement + " to " + staged);
        }

        // Remove any additional directories included in the replacement. This
        // handles the case where there are multiple directories for the same
        // logical partition. For example, dataset/a=2/ and dataset/2/
        for (Path toRemove : removals) {
            if (toRemove.equals(destination)) {
                // destination is deleted last
                continue;
            }
            FileSystemUtil.cleanlyDelete(fs, root, toRemove);
        }

        // remove the directory that will be replaced with a move
        fs.delete(destination, true /* recursively */ );

        // move the replacement to the final location
        if (!fs.rename(staged, destination)) {
            throw new IOException("Failed to rename " + staged + " to " + destination);
        }

    } catch (IOException e) {
        throw new DatasetIOException("Could not replace " + destination + " with " + replacement, e);
    }
}

From source file:org.kitesdk.data.spi.filesystem.TestFileSystemPartitionView.java

License:Apache License

@Test
public void testRestrictedRead() throws IOException {
    FileSystemPartitionView<TestRecord> partition0 = partitioned.getPartitionView(URI.create("id_hash=0"));
    FileSystemPartitionView<TestRecord> partition1 = partitioned.getPartitionView(URI.create("id_hash=1"));
    FileSystemPartitionView<TestRecord> partition2 = partitioned.getPartitionView(URI.create("id_hash=2"));
    FileSystemPartitionView<TestRecord> partition3 = partitioned.getPartitionView(URI.create("id_hash=3"));

    int count0 = DatasetTestUtilities.materialize(partition0).size();
    int total = DatasetTestUtilities.materialize(partitioned).size();
    Assert.assertTrue("Should read some records", count0 > 0);
    Assert.assertTrue("Should not read the entire dataset", count0 < total);

    // move other partitions so they match the partition0 constraint
    FileSystem local = LocalFileSystem.getInstance();
    local.rename(new Path(partition1.getLocation()), new Path(partitioned.getDirectory(), "0"));
    local.rename(new Path(partition2.getLocation()), new Path(partitioned.getDirectory(), "hash=0"));
    local.rename(new Path(partition3.getLocation()), new Path(partitioned.getDirectory(), "id_hash=00"));

    int newCount0 = DatasetTestUtilities.materialize(partition0).size();
    Assert.assertEquals("Should match original count", count0, newCount0);

    int countByConstraints = DatasetTestUtilities.materialize(partition0.toConstraintsView()).size();
    Assert.assertEquals("Should match total count", total, countByConstraints);
}

From source file:org.kitesdk.data.spi.filesystem.TestFileSystemPartitionView.java

License:Apache License

@Test
public void testRestrictedDelete() throws IOException {
    FileSystemPartitionView<TestRecord> partition0 = partitioned.getPartitionView(URI.create("id_hash=0"));
    FileSystemPartitionView<TestRecord> partition1 = partitioned.getPartitionView(URI.create("id_hash=1"));
    FileSystemPartitionView<TestRecord> partition2 = partitioned.getPartitionView(URI.create("id_hash=2"));
    FileSystemPartitionView<TestRecord> partition3 = partitioned.getPartitionView(URI.create("id_hash=3"));

    int count0 = DatasetTestUtilities.materialize(partition0).size();
    int total = DatasetTestUtilities.materialize(partitioned).size();
    Assert.assertTrue("Should read some records", count0 > 0);
    Assert.assertTrue("Should not read the entire dataset", count0 < total);

    // move other partitions so they match the partition0 constraint
    FileSystem local = LocalFileSystem.getInstance();
    local.rename(new Path(partition1.getLocation()), new Path(partitioned.getDirectory(), "0"));
    local.rename(new Path(partition2.getLocation()), new Path(partitioned.getDirectory(), "hash=0"));
    local.rename(new Path(partition3.getLocation()), new Path(partitioned.getDirectory(), "id_hash=00"));

    Assert.assertEquals("Constraints should match all 4 directories", total,
            DatasetTestUtilities.materialize(partition0.toConstraintsView()).size());

    partition0.deleteAll();//from  ww w  .  j  a  va 2  s  . co  m

    int newCount0 = DatasetTestUtilities.materialize(partition0).size();
    Assert.assertEquals("Should have removed all records in id_hash=0", 0, newCount0);

    Assert.assertTrue("Should not have deleted other directories",
            local.exists(new Path(partitioned.getDirectory(), "0")));
    Assert.assertTrue("Should not have deleted other directories",
            local.exists(new Path(partitioned.getDirectory(), "hash=0")));
    Assert.assertTrue("Should not have deleted other directories",
            local.exists(new Path(partitioned.getDirectory(), "id_hash=00")));

    Assert.assertEquals("Should match total without deleted data", total - count0,
            DatasetTestUtilities.materialize(partition0.toConstraintsView()).size());

    partitioned.unbounded.deleteAll();

    Assert.assertFalse("Should have deleted all other directories",
            local.exists(new Path(partitioned.getDirectory(), "0")));
    Assert.assertFalse("Should have deleted all other directories",
            local.exists(new Path(partitioned.getDirectory(), "hash=0")));
    Assert.assertFalse("Should have deleted all other directories",
            local.exists(new Path(partitioned.getDirectory(), "id_hash=00")));
}

From source file:org.kitesdk.data.spi.filesystem.TestPartitionReplacement.java

License:Apache License

@Test
public void testReplacePartitionsByConstraints() throws IOException {
    // like testReplaceSinglePartition, this will replace partition0 with temp0
    // but, this will also remove partitions that have equivalent constraints
    // to simulate the case where directories 0, hash_0, id_hash=0, and
    // id_hash=00 are compacted to a single replacement folder

    FileSystemPartitionView<TestRecord> partition0 = partitioned.getPartitionView(new Path("id_hash=0"));
    FileSystemPartitionView<TestRecord> temp0 = temporary.getPartitionView(new Path("id_hash=0"));

    Set<String> replacementFiles = Sets
            .newHashSet(Iterators.transform(temp0.pathIterator(), new GetFilename()));

    // move other partitions so they match the partition0 constraint
    FileSystem local = LocalFileSystem.getInstance();
    local.rename(new Path(partitioned.getDirectory(), "id_hash=1"), new Path(partitioned.getDirectory(), "0"));
    local.rename(new Path(partitioned.getDirectory(), "id_hash=2"),
            new Path(partitioned.getDirectory(), "hash=0"));
    local.rename(new Path(partitioned.getDirectory(), "id_hash=3"),
            new Path(partitioned.getDirectory(), "id_hash=00"));

    Assert.assertTrue("Should allow replacing a single partition", partitioned.canReplace(partition0));
    Assert.assertFalse("Should not allow replacement test with a different dataset",
            partitioned.canReplace(temp0));

    partitioned.replace(partition0, temp0);

    Set<String> replacedFiles = Sets
            .newHashSet(Iterators.transform(partitioned.pathIterator(), new GetFilename()));
    Assert.assertEquals("Should contain the replacement files", replacementFiles, replacedFiles);

    Iterator<Path> dirIterator = partitioned.dirIterator();
    Path onlyDirectory = dirIterator.next();
    Assert.assertFalse("Should contain only one directory", dirIterator.hasNext());
    Assert.assertEquals("Should have the correct directory name", "id_hash=0", onlyDirectory.getName());
}

From source file:org.kitesdk.data.spi.filesystem.TestPartitionReplacement.java

License:Apache License

@Test
public void testReplacePartitionsByConstraintsWithoutOriginal() throws IOException {
    // like testReplacePartitionsByConstraints, but the target partition does
    // not exist/*from   w  w  w . ja va  2 s  .co m*/

    FileSystemPartitionView<TestRecord> temp0 = temporary.getPartitionView(new Path("id_hash=0"));

    Set<String> replacementFiles = Sets
            .newHashSet(Iterators.transform(temp0.pathIterator(), new GetFilename()));

    // move other partitions so they match the partition0 constraint
    FileSystem local = LocalFileSystem.getInstance();
    local.rename(new Path(partitioned.getDirectory(), "id_hash=0"),
            new Path(partitioned.getDirectory(), "id-hash=0"));
    local.rename(new Path(partitioned.getDirectory(), "id_hash=1"), new Path(partitioned.getDirectory(), "0"));
    local.rename(new Path(partitioned.getDirectory(), "id_hash=2"),
            new Path(partitioned.getDirectory(), "hash=0"));
    local.rename(new Path(partitioned.getDirectory(), "id_hash=3"),
            new Path(partitioned.getDirectory(), "id_hash=00"));

    FileSystemPartitionView<TestRecord> partition0 = partitioned.getPartitionView(new Path("id-hash=0"));

    Assert.assertTrue("Should allow replacing a single partition", partitioned.canReplace(partition0));
    Assert.assertFalse("Should not allow replacement test with a different dataset",
            partitioned.canReplace(temp0));

    partitioned.replace(partition0, temp0);

    Set<String> replacedFiles = Sets
            .newHashSet(Iterators.transform(partitioned.pathIterator(), new GetFilename()));
    Assert.assertEquals("Should contain the replacement files", replacementFiles, replacedFiles);

    Iterator<Path> dirIterator = partitioned.dirIterator();
    Path onlyDirectory = dirIterator.next();
    Assert.assertFalse("Should contain only one directory", dirIterator.hasNext());
    Assert.assertEquals("Should have the correct directory name", "id_hash=0", onlyDirectory.getName());
}