Example usage for org.apache.commons.vfs2 FileObject delete

List of usage examples for org.apache.commons.vfs2 FileObject delete

Introduction

In this page you can find the example usage for org.apache.commons.vfs2 FileObject delete.

Prototype

boolean delete() throws FileSystemException;

Source Link

Document

Deletes this file.

Usage

From source file:org.obiba.opal.core.service.ProjectsServiceImpl.java

private void deleteFolder(FileObject folder) throws FileSystemException {
    if (!folder.isWriteable())
        return;/*from www. j  a  v  a2s  .co  m*/

    for (FileObject file : folder.getChildren()) {
        if (file.getType() == FileType.FOLDER) {
            deleteFolder(file);
        } else if (file.isWriteable()) {
            file.delete();
        }
    }
    if (folder.getChildren().length == 0) {
        folder.delete();
    }
}

From source file:org.obiba.opal.shell.commands.ReportCommand.java

private void deleteFileSilently(FileObject file) {
    try {/*  w  w w. j  a  va2s .c  o m*/
        if (file.exists()) {
            file.delete();
        }
    } catch (FileSystemException ex) {
        log.error("Could not delete file: {}", file.getName().getPath());
    }
}

From source file:org.obiba.opal.web.FilesResource.java

@DELETE
@Path("/{path:.*}")
public Response deleteFile(@PathParam("path") String path) throws FileSystemException {
    FileObject file = resolveFileInFileSystem(path);

    // File or folder does not exist.
    if (!file.exists()) {
        return getPathNotExistResponse(path);
    }/*from   w w w . j  a va 2 s .c  o m*/

    // Read-only file or folder.
    if (!file.isWriteable()) {
        return Response.status(Status.FORBIDDEN).entity("cannotDeleteReadOnlyFile").build();
    }

    try {
        if (file.getType() == FileType.FOLDER) {
            deleteFolder(file);
        } else {
            file.delete();
        }
        subjectAclService.deleteNodePermissions("/files/" + path);
        return Response.ok("The following file or folder has been deleted : " + path).build();
    } catch (FileSystemException couldNotDeleteFile) {
        return Response.status(Status.INTERNAL_SERVER_ERROR).entity("couldNotDeleteFileError").build();
    }
}

From source file:org.obiba.opal.web.FilesResource.java

/**
 * Delete writable folder and sub-folders.
 *
 * @param folder//from  w  w  w .jav a  2 s . c o m
 * @throws FileSystemException
 */
private void deleteFolder(FileObject folder) throws FileSystemException {
    if (!folder.isWriteable())
        return;

    FileObject[] files = folder.getChildren();
    for (FileObject file : files) {
        if (file.getType() == FileType.FOLDER) {
            deleteFolder(file);
        } else if (file.isWriteable()) {
            file.delete();
        }
    }
    if (folder.getChildren().length == 0) {
        folder.delete();
    }
}

From source file:org.obiba.opal.web.FilesResourceTest.java

@After
public void tearDown() throws FileSystemException {
    // Delete any files created by the test.
    for (String filePath : filesCreatedByTest) {
        FileObject file = fileSystem.getRoot().resolveFile(filePath);
        if (file.exists()) {
            file.delete();
        }/*  w  w w.  j a  v a 2 s .  co m*/
    }
}

From source file:org.ow2.proactive_grid_cloud_portal.dataspace.RestDataspaceImpl.java

/**
 * Delete file(s) from the specified location in the <i>dataspace</i>. The
 * format of the DELETE URI is://from  w w  w.  j  a v  a2  s  .c om
 * <p>
 * {@code http://<rest-server-path>/data/<dataspace>/<path-name>}
 * <p>
 * Example:
 * {@code http://localhost:8080/rest/rest/data/user/my-files/my-text-file.txt}
 * <ul>
 * <li>dataspace: can have two possible values, 'user' or 'global',
 * depending on the target <i>DATASPACE</i></li>
 * <li>path-name: location of the file(s) to be deleted.</li>
 * </ul>
 * <b>Notes:</b>
 * <ul>
 * <li>Only empty directories can be deleted.</li>
 * <li>File names or regular expressions can be used as 'includes' and
 * 'excludes' query parameters, in order to select which files to be deleted
 * inside the specified directory (path-name).</li>
 * </ul>
 *
 */
@DELETE
@Path("/{dataspace}/{path-name:.*}")
public Response delete(@HeaderParam("sessionid") String sessionId, @PathParam("dataspace") String dataspace,
        @PathParam("path-name") String pathname, @QueryParam("includes") List<String> includes,
        @QueryParam("excludes") List<String> excludes)
        throws NotConnectedRestException, PermissionRestException {
    checkPathParams(dataspace, pathname);
    Session session = checkSessionValidity(sessionId);

    try {
        FileObject fo = resolveFile(session, dataspace, pathname);

        if (!fo.exists()) {
            return Response.status(Response.Status.NO_CONTENT).build();
        }
        if (fo.getType() == FileType.FOLDER) {
            logger.debug(String.format("Deleting directory %s in %s", pathname, dataspace));
            return deleteDir(fo, includes, excludes);
        } else {
            logger.debug(String.format("Deleting file %s in %s", pathname, dataspace));
            fo.close();
            return fo.delete() ? noContentRes() : serverErrorRes("Cannot delete the file: %s", pathname);
        }
    } catch (Throwable error) {
        logger.error(String.format("Cannot delete %s in %s.", pathname, dataspace), error);
        throw rethrow(error);
    }
}

From source file:org.ow2.proactive_grid_cloud_portal.scheduler.SchedulerStateRest.java

/**
 * Pushes a file from the local file system into the given DataSpace
 * /*from   w ww  . j  av a 2  s .c  om*/
 * @param sessionId
 *            a valid session id
 * @param spaceName
 *            the name of the DataSpace
 * @param filePath
 *            the path inside the DataSpace where to put the file e.g.
 *            "/myfolder"
 * @param multipart
 *            the form data containing : - fileName the name of the file
 *            that will be created on the DataSpace - fileContent the
 *            content of the file
 * @return true if the transfer succeeded
 * @see org.ow2.proactive.scheduler.common.SchedulerConstants for spaces
 *      names
 **/
@Override
public boolean pushFile(@HeaderParam("sessionid") String sessionId, @PathParam("spaceName") String spaceName,
        @PathParam("filePath") String filePath, MultipartFormDataInput multipart)
        throws IOException, NotConnectedRestException, PermissionRestException {
    checkAccess(sessionId, "pushFile");

    Session session = dataspaceRestApi.checkSessionValidity(sessionId);

    Map<String, List<InputPart>> formDataMap = multipart.getFormDataMap();

    List<InputPart> fNL = formDataMap.get("fileName");
    if ((fNL == null) || (fNL.size() == 0)) {
        throw new IllegalArgumentException("Illegal multipart argument definition (fileName), received " + fNL);
    }
    String fileName = fNL.get(0).getBody(String.class, null);

    List<InputPart> fCL = formDataMap.get("fileContent");
    if ((fCL == null) || (fCL.size() == 0)) {
        throw new IllegalArgumentException(
                "Illegal multipart argument definition (fileContent), received " + fCL);
    }
    InputStream fileContent = fCL.get(0).getBody(InputStream.class, null);

    if (fileName == null) {
        throw new IllegalArgumentException("Wrong file name : " + fileName);
    }

    filePath = normalizeFilePath(filePath, fileName);

    FileObject destfo = dataspaceRestApi.resolveFile(session, spaceName, filePath);

    URL targetUrl = destfo.getURL();
    logger.info("[pushFile] pushing file to " + targetUrl);

    if (!destfo.isWriteable()) {
        RuntimeException ex = new IllegalArgumentException(
                "File " + filePath + " is not writable in space " + spaceName);
        logger.error(ex);
        throw ex;
    }
    if (destfo.exists()) {
        destfo.delete();
    }
    // used to create the necessary directories if needed
    destfo.createFile();

    dataspaceRestApi.writeFile(fileContent, destfo, null);

    return true;
}

From source file:org.ow2.proactive_grid_cloud_portal.scheduler.SchedulerStateRest.java

/**
 * Deletes a file or recursively delete a directory from the given DataSpace
 * /*www . j  a va2s  .  c o  m*/
 * @param sessionId
 *            a valid session id
 * @param spaceName
 *            the name of the data space involved (GLOBAL or USER)
 * @param filePath
 *            the path to the file or directory which must be deleted
 **/
@Override
public boolean deleteFile(@HeaderParam("sessionid") String sessionId, @PathParam("spaceName") String spaceName,
        @PathParam("filePath") String filePath)
        throws IOException, NotConnectedRestException, PermissionRestException {
    checkAccess(sessionId, "deleteFile");

    Session session = dataspaceRestApi.checkSessionValidity(sessionId);

    filePath = normalizeFilePath(filePath, null);

    FileObject sourcefo = dataspaceRestApi.resolveFile(session, spaceName, filePath);

    if (!sourcefo.exists() || !sourcefo.isWriteable()) {
        RuntimeException ex = new IllegalArgumentException(
                "File or Folder " + filePath + " does not exist or is not writable in space " + spaceName);
        logger.error(ex);
        throw ex;
    }
    if (sourcefo.getType().equals(FileType.FILE)) {
        logger.info("[deleteFile] deleting file " + sourcefo.getURL());
        sourcefo.delete();
    } else if (sourcefo.getType().equals(FileType.FOLDER)) {
        logger.info("[deleteFile] deleting folder (and all its descendants) " + sourcefo.getURL());
        sourcefo.delete(Selectors.SELECT_ALL);
    } else {
        RuntimeException ex = new IllegalArgumentException(
                "File " + filePath + " has an unsupported type " + sourcefo.getType());
        logger.error(ex);
        throw ex;
    }
    return true;
}

From source file:org.pentaho.di.cluster.PartitioningIT.java

/**
 * This test reads a CSV file in parallel on the cluster, one copy per slave.<br>
 * It then partitions the data on id in 12 partitions (4 per slave) and keeps the data partitioned until written to
 * file.<br>/*ww  w .  j  a  v  a 2 s  . c om*/
 * As such we expect 12 files on disk.<br>
 * File: "partitioning-swimming-lanes-on-cluster.ktr"<br>
 */
public void testPartitioningSwimmingLanesOnCluster() throws Exception {
    init();

    ClusterGenerator clusterGenerator = new ClusterGenerator();
    try {
        clusterGenerator.launchSlaveServers();

        TransMeta transMeta = loadAndModifyTestTransformation(clusterGenerator,
                "src/it/resources/org/pentaho/di/cluster/partitioning-swimming-lanes-on-cluster.ktr");
        TransExecutionConfiguration config = createClusteredTransExecutionConfiguration();
        TransSplitter transSplitter = Trans.executeClustered(transMeta, config);
        long nrErrors = Trans.monitorClusteredTransformation(
                new LogChannel("cluster unit test <testParallelFileReadOnMaster>"), transSplitter, null, 1);
        assertEquals(0L, nrErrors);

        String[] results = new String[] { "8", "9", "9", "9", "9", "8", "8", "8", "8", "8", "8", "8", };
        String[] files = new String[] { "000", "001", "002", "003", "004", "005", "006", "007", "008", "009",
                "010", "011", };
        for (int i = 0; i < results.length; i++) {
            String filename = "${java.io.tmpdir}/partitioning-swimming-lanes-on-cluster-" + files[i] + ".txt";
            String result = loadFileContent(transMeta, filename);
            assertEqualsIgnoreWhitespacesAndCase(results[i], result);

            // Remove the output file : we don't want to leave too much clutter around
            //
            FileObject file = KettleVFS.getFileObject(transMeta.environmentSubstitute(filename));
            file.delete();
        }

    } catch (Exception e) {
        e.printStackTrace();
        fail(e.toString());
    } finally {
        try {
            clusterGenerator.stopSlaveServers();
        } catch (Exception e) {
            e.printStackTrace();
            fail(e.toString());
        }
    }
}

From source file:org.pentaho.di.cluster.PartitioningIT.java

/**
 * This test reads a CSV file in parallel on the cluster, one copy per slave.<br>
 * It then partitions the data on id in 12 partitions (4 per slave).<br>
 * After that it re-partitions the data in 9 partitions (3 per slave).<br>
 * As such we expect 9 result files on disk.<br>
 * File: "partitioning-repartitioning-on-cluster.ktr"<br>
 *//*from   w  ww  .j  a v  a  2  s.c  o m*/
public void testPartitioningRepartitioningOnCluster() throws Exception {
    init();

    ClusterGenerator clusterGenerator = new ClusterGenerator();
    try {
        clusterGenerator.launchSlaveServers();

        TransMeta transMeta = loadAndModifyTestTransformation(clusterGenerator,
                "src/it/resources/org/pentaho/di/cluster/partitioning-repartitioning-on-cluster.ktr");
        TransExecutionConfiguration config = createClusteredTransExecutionConfiguration();
        TransSplitter transSplitter = Trans.executeClustered(transMeta, config);
        long nrErrors = Trans.monitorClusteredTransformation(
                new LogChannel("cluster unit test <testParallelFileReadOnMaster>"), transSplitter, null, 1);
        assertEquals(0L, nrErrors);

        String[] results = new String[] { "8", "9", "9", "9", "9", "8", "8", "8", "8", "8", "8", "8", };
        String[] files = new String[] { "000", "001", "002", "003", "004", "005", "006", "007", "008", "009",
                "010", "011", };
        for (int i = 0; i < results.length; i++) {
            String filename = "${java.io.tmpdir}/partitioning-repartitioning-on-cluster-" + files[i] + ".txt";
            String result = loadFileContent(transMeta, filename);
            assertEqualsIgnoreWhitespacesAndCase(results[i], result);

            // Remove the output file : we don't want to leave too much clutter around
            //
            FileObject file = KettleVFS.getFileObject(transMeta.environmentSubstitute(filename));
            file.delete();
        }

    } catch (Exception e) {
        e.printStackTrace();
        fail(e.toString());
    } finally {
        try {
            clusterGenerator.stopSlaveServers();
        } catch (Exception e) {
            e.printStackTrace();
            fail(e.toString());
        }
    }
}