Example usage for org.apache.hadoop.fs Trash moveToTrash

List of usage examples for org.apache.hadoop.fs Trash moveToTrash

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Trash moveToTrash.

Prototype

public boolean moveToTrash(Path path) throws IOException 

Source Link

Document

Move a file or directory to the current trash directory.

Usage

From source file:com.revolutionanalytics.hadoop.hdfs.FileUtils.java

License:Apache License

private static void delete(Configuration cfg, FileSystem srcFS, Path src, boolean recursive)
        throws IOException {
    Trash trashTmp = new Trash(srcFS, cfg);
    if (trashTmp.moveToTrash(src)) {
        System.out.println("Moved to trash: " + src);
        return;/* w w w  . ja  v a 2  s.c om*/
    }
    if (srcFS.delete(src, true)) {
        System.out.println("Deleted " + src);
    } else {
        if (!srcFS.exists(src)) {
            throw new FileNotFoundException("cannot remove " + src + ": No such file or directory.");
        }
        throw new IOException("Delete failed " + src);
    }
}

From source file:com.sensei.indexing.hadoop.reduce.ShardWriter.java

License:Apache License

public static void moveToTrash(Configuration conf, Path path) throws IOException {
    Trash t = new Trash(conf);
    boolean isMoved = t.moveToTrash(path);
    t.expunge();/*from w w w . j a  v a  2 s. c  o m*/
    if (!isMoved) {
        logger.error("Trash is not enabled or file is already in the trash.");
    }
}

From source file:gobblin.data.management.trash.MockTrashTest.java

License:Apache License

@Test
public void MockTrashTest() throws IOException {

    FileSystem fs = mock(FileSystem.class);

    Path homeDirectory = new Path("/home/directory");
    when(fs.getHomeDirectory()).thenReturn(homeDirectory);
    when(fs.makeQualified(any(Path.class))).thenAnswer(new Answer<Path>() {
        @Override//  w  w  w  .j a  v a2 s.co  m
        public Path answer(InvocationOnMock invocation) throws Throwable {
            return (Path) invocation.getArguments()[0];
        }
    });

    Trash trash = new MockTrash(fs, new Properties(), "user");

    Assert.assertTrue(trash.moveToTrash(new Path("/some/path")));

    verify(fs).getHomeDirectory();
    verify(fs).makeQualified(any(Path.class));
    verifyNoMoreInteractions(fs);

}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

@Override
public boolean moveToTrash(Path path) throws IOException {
    FileSystem fs = getVolumeByPath(path).getFileSystem();
    Trash trash = new Trash(fs, fs.getConf());
    return trash.moveToTrash(path);
}

From source file:org.apache.gobblin.util.HadoopUtils.java

License:Apache License

/**
 * Moves the object to the filesystem trash according to the file system policy.
 * @param fs FileSystem object//  w  w  w .  ja  v a  2s  .  co  m
 * @param path Path to the object to be moved to trash.
 * @throws IOException
 */
public static void moveToTrash(FileSystem fs, Path path) throws IOException {
    Trash trash = new Trash(fs, new Configuration());
    trash.moveToTrash(path);
}

From source file:org.apache.gobblin.util.HadoopUtilsTest.java

License:Apache License

@Test
public void testMoveToTrash() throws IOException {
    Path hadoopUtilsTestDir = new Path(Files.createTempDir().getAbsolutePath(), "HadoopUtilsTestDir");
    Configuration conf = new Configuration();
    // Set the time to keep it in trash to 10 minutes.
    // 0 means object will be deleted instantly.
    conf.set("fs.trash.interval", "10");
    FileSystem fs = FileSystem.getLocal(conf);
    Trash trash = new Trash(fs, conf);
    TrashPolicy trashPolicy = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory());
    Path trashPath = trashPolicy.getCurrentTrashDir();

    fs.mkdirs(hadoopUtilsTestDir);/* w  ww  .  ja  v a  2 s  .c  o m*/
    Assert.assertTrue(fs.exists(hadoopUtilsTestDir));
    trash.moveToTrash(hadoopUtilsTestDir.getParent());
    Assert.assertFalse(fs.exists(hadoopUtilsTestDir));
    Assert.assertTrue(fs.exists(trashPath));
}

From source file:org.apache.oozie.action.hadoop.FsActionExecutor.java

License:Apache License

/**
 * Delete path/*from   w w w.ja va2s .  c o  m*/
 *
 * @param context
 * @param fsConf
 * @param nameNodePath
 * @param path
 * @throws ActionExecutorException
 */
public void delete(Context context, XConfiguration fsConf, Path nameNodePath, Path path, boolean skipTrash)
        throws ActionExecutorException {
    URI uri = path.toUri();
    URIHandler handler;
    try {
        handler = Services.get().get(URIHandlerService.class).getURIHandler(uri);
        if (handler instanceof FSURIHandler) {
            // Use legacy code to handle hdfs partition deletion
            path = resolveToFullPath(nameNodePath, path, true);
            final FileSystem fs = getFileSystemFor(path, context, fsConf);
            Path[] pathArr = FileUtil.stat2Paths(fs.globStatus(path));
            if (pathArr != null && pathArr.length > 0) {
                checkGlobMax(pathArr);
                for (final Path p : pathArr) {
                    if (fs.exists(p)) {
                        if (!skipTrash) {
                            // Moving directory/file to trash of user.
                            UserGroupInformationService ugiService = Services.get()
                                    .get(UserGroupInformationService.class);
                            UserGroupInformation ugi = ugiService
                                    .getProxyUser(fs.getConf().get(OozieClient.USER_NAME));
                            ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                                @Override
                                public FileSystem run() throws Exception {
                                    Trash trash = new Trash(fs.getConf());
                                    if (!trash.moveToTrash(p)) {
                                        throw new ActionExecutorException(
                                                ActionExecutorException.ErrorType.ERROR, "FS005",
                                                "Could not move path [{0}] to trash on delete", p);
                                    }
                                    return null;
                                }
                            });
                        } else if (!fs.delete(p, true)) {
                            throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FS005",
                                    "delete, path [{0}] could not delete path", p);
                        }
                    }
                }
            }
        } else {
            handler.delete(uri, handler.getContext(uri, fsConf, context.getWorkflow().getUser(), false));
        }
    } catch (Exception ex) {
        throw convertException(ex);
    }
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

private void delete(Path src, FileSystem srcFs, boolean recursive) throws IOException {
    if (srcFs.isDirectory(src) && !recursive) {
        throw new IOException("Cannot remove directory \"" + src + "\", use -rmr instead");
    }//from   ww  w.j  av a2s  .c  o  m
    Trash trashTmp = new Trash(srcFs, getConf());
    if (trashTmp.moveToTrash(src)) {
        System.out.println("Moved to trash: " + src);
        return;
    }
    if (srcFs.delete(src, true)) {
        System.out.println("Deleted " + src);
    } else {
        if (!srcFs.exists(src)) {
            throw new FileNotFoundException("cannot remove " + src + ": No such file or directory.");
        }
        throw new IOException("Delete failed " + src);
    }
}

From source file:org.springframework.data.hadoop.fs.FsShell.java

License:Apache License

public void rm(boolean recursive, boolean skipTrash, String... uris) {
    for (String uri : uris) {
        try {/*w ww. j av  a  2s  . co m*/
            Path src = new Path(uri);
            FileSystem srcFs = getFS(src);

            for (Path p : FileUtil.stat2Paths(srcFs.globStatus(src), src)) {
                FileStatus status = srcFs.getFileStatus(p);
                if (status.isDir() && !recursive) {
                    throw new IllegalStateException(
                            "Cannot remove directory \"" + src + "\", if recursive deletion was not specified");
                }
                if (!skipTrash) {
                    try {
                        Trash trashTmp = new Trash(srcFs, configuration);
                        trashTmp.moveToTrash(p);
                    } catch (IOException ex) {
                        throw new HadoopException("Cannot move to Trash resource " + p, ex);
                    }
                }
                srcFs.delete(p, recursive);
            }
        } catch (IOException ex) {
            throw new HadoopException("Cannot delete (all) resources " + ex.getMessage(), ex);
        }
    }
}

From source file:org.springframework.data.hadoop.impala.hdfs.FsShellCommands.java

License:Apache License

@CliCommand(value = PREFIX + "rm", help = "Remove files in the HDFS")
public void rm(@CliOption(key = {
        "" }, mandatory = false, specifiedDefaultValue = ".", unspecifiedDefaultValue = ".", help = "directory to be listed") final String path,
        @CliOption(key = {//from   w ww  .  ja v a 2 s.c  o m
                "skipTrash" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "whether skip trash") final boolean skipTrash,
        @CliOption(key = {
                "recursive" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "whether with recursion") final boolean recursive) {
    try {
        Path file = new Path(path);
        FileSystem fs = file.getFileSystem(getHadoopConfiguration());
        for (Path p : FileUtil.stat2Paths(fs.globStatus(file), file)) {
            FileStatus status = fs.getFileStatus(p);
            if (status.isDir() && !recursive) {
                LOG.severe("To remove directory, please use fs rm --recursive instead");
                return;
            }
            if (!skipTrash) {
                Trash trash = new Trash(fs, getHadoopConfiguration());
                trash.moveToTrash(p);
            }
            fs.delete(p, recursive);
        }
    } catch (Throwable t) {
        LOG.severe("run HDFS shell failed. Message is: " + t.getMessage());
    }

}