Example usage for org.apache.commons.vfs2 FileSystemException FileSystemException

List of usage examples for org.apache.commons.vfs2 FileSystemException FileSystemException

Introduction

In this page you can find the example usage for org.apache.commons.vfs2 FileSystemException FileSystemException.

Prototype

public FileSystemException(final Throwable throwable) 

Source Link

Document

Constructs wrapper exception.

Usage

From source file:org.pentaho.amazon.s3.S3NVfsFileChooserDialog.java

public FileObject resolveFile(String fileUri, FileSystemOptions opts) throws FileSystemException {
    try {//from www  .j a va2s  .c o m
        return KettleVFS.getFileObject(fileUri, getVariableSpace(), opts);
    } catch (KettleFileException e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.amazon.s3.S3NVfsFileChooserDialog.java

private FileSystemOptions getFileSystemOptions() throws FileSystemException {
    FileSystemOptions opts = new FileSystemOptions();
    try {//from   w  w w.  j av a2  s .c om
        AWSCredentials credentials = S3CredentialsProvider.getAWSCredentials();
        if (credentials != null) {
            StaticUserAuthenticator userAuthenticator = new StaticUserAuthenticator(null,
                    credentials.getAWSAccessKeyId(), credentials.getAWSSecretKey());
            DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, userAuthenticator);
        }
    } catch (SdkClientException e) {
        throw new FileSystemException(e);
    }
    return opts;
}

From source file:org.pentaho.big.data.impl.vfs.hdfs.HDFSFileProvider.java

@Override
protected FileSystem doCreateFileSystem(final FileName name, final FileSystemOptions fileSystemOptions)
        throws FileSystemException {
    GenericFileName genericFileName = (GenericFileName) name.getRoot();
    String hostName = genericFileName.getHostName();
    int port = genericFileName.getPort();
    // TODO: load from metastore
    NamedCluster namedCluster = namedClusterService.getClusterTemplate();
    namedCluster.setHdfsHost(hostName);//from www.j  av  a  2  s  .co  m
    if (port > 0) {
        namedCluster.setHdfsPort(String.valueOf(port));
    } else {
        namedCluster.setHdfsPort("");
    }
    namedCluster.setMapr(MAPRFS.equals(name.getScheme()));
    try {
        return new HDFSFileSystem(name, fileSystemOptions, hadoopFileSystemLocator
                .getHadoopFilesystem(namedCluster, URI.create(name.getURI() == null ? "" : name.getURI())));
    } catch (ClusterInitializationException e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.big.data.impl.vfs.hdfs.nc.NamedClusterProvider.java

@Override
protected FileSystem doCreateFileSystem(FileName name, FileSystemOptions fileSystemOptions)
        throws FileSystemException {
    GenericFileName genericFileName = (GenericFileName) name.getRoot();
    String clusterName = genericFileName.getHostName();
    String path = genericFileName.getPath();
    NamedCluster namedCluster = getNamedClusterByName(clusterName, fileSystemOptions);
    try {//from www  . java  2  s . co m
        if (namedCluster == null) {
            namedCluster = namedClusterService.getClusterTemplate();
        }
        String generatedUrl = namedCluster.processURLsubstitution(path == null ? "" : path,
                getMetastore(clusterName, fileSystemOptions), new Variables());
        URI uri = URI.create(generatedUrl);

        return new NamedClusterFileSystem(name, uri, fileSystemOptions,
                hadoopFileSystemLocator.getHadoopFilesystem(namedCluster, uri));
    } catch (ClusterInitializationException e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.big.data.impl.vfs.hdfs.nc.NamedClusterProvider.java

/**
 * package visibility for test purpose only
 * @param clusterNameToResolve - name of namedcluster for resolve namedcluster
 * @param filesSystemOptions - The fileSystemOptions for the file system in play
 * @return named cluster from metastore or null
 * @throws FileSystemException/*from   www. j ava 2  s  . c  o m*/
 */
NamedCluster getNamedClusterByName(String clusterNameToResolve, FileSystemOptions fileSystemOptions)
        throws FileSystemException {
    IMetaStore metaStore = getMetastore(clusterNameToResolve, fileSystemOptions);
    NamedCluster namedCluster = null;
    try {
        if (metaStore != null) {
            namedCluster = namedClusterService.read(clusterNameToResolve, metaStore);
        }
    } catch (MetaStoreException e) {
        throw new FileSystemException(e);
    }
    return namedCluster;
}

From source file:org.pentaho.big.data.kettle.plugins.hdfs.vfs.NamedClusterVfsFileChooserDialog.java

/**
 * resolve file with <b>new</b> File SystemOptions.
 *///from  w  w w . ja  v a 2 s .co  m
@Override
public FileObject resolveFile(String fileUri) throws FileSystemException {
    try {
        //should we use new instance of FileSystemOptions? should it be depdrecated?
        return KettleVFS.getFileObject(fileUri, getVariableSpace(), getFileSystemOptions());
    } catch (KettleFileException e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.big.data.kettle.plugins.hdfs.vfs.NamedClusterVfsFileChooserDialog.java

@Override
public FileObject resolveFile(String fileUri, FileSystemOptions opts) throws FileSystemException {
    try {/*  ww w . ja  va2  s .c  om*/
        return KettleVFS.getFileObject(fileUri, getVariableSpace(), opts);
    } catch (KettleFileException e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.googledrive.vfs.GoogleDriveFileObject.java

protected GoogleDriveFileObject(final AbstractFileName fileName, final GoogleDriveFileSystem fileSystem)
        throws FileSystemException {
    super(fileName, fileSystem);
    try {/*w  w w .j a  va2 s . com*/
        HTTP_TRANSPORT = GoogleNetHttpTransport.newTrustedTransport();
        DATA_STORE_FACTORY = new CustomDataStoreFactory(DATA_STORE_DIR);
        driveService = getDriveService();
        resolveFileMetadata();
    } catch (Exception e) {
        throw new FileSystemException(e);
    }
}

From source file:org.pentaho.hadoop.shim.common.DistributedCacheUtilImpl.java

/**
 * Recursively searches for all files starting at the directory provided with the extension provided. If no extension
 * is provided all files will be returned.
 *
 * @param root      Directory to start the search for files in
 * @param extension File extension to search for. If null all files will be returned.
 * @return List of absolute path names to all files found in {@code dir} and its subdirectories.
 * @throws KettleFileException// www  . j  a va2  s . c o  m
 * @throws FileSystemException
 */
@SuppressWarnings("unchecked")
public List<String> findFiles(FileObject root, final String extension) throws FileSystemException {
    FileObject[] files = root.findFiles(new FileSelector() {
        @Override
        public boolean includeFile(FileSelectInfo fileSelectInfo) throws Exception {
            return extension == null || extension.equals(fileSelectInfo.getFile().getName().getExtension());
        }

        @Override
        public boolean traverseDescendents(FileSelectInfo fileSelectInfo) throws Exception {
            return FileType.FOLDER.equals(fileSelectInfo.getFile().getType());
        }
    });

    if (files == null) {
        return Collections.EMPTY_LIST;
    }

    List<String> paths = new ArrayList<String>();
    for (FileObject file : files) {
        try {
            paths.add(file.getURL().toURI().getPath());
        } catch (URISyntaxException ex) {
            throw new FileSystemException("Error getting URI of file: " + file.getURL().getPath());
        }
    }
    return paths;
}

From source file:org.pentaho.hadoop.shim.HadoopConfigurationFileSystemManager.java

/**
 * Register a file provider for a given scheme as well as an alias for it. The alias will be used to register the
 * provider directly under the scheme: "scheme-alias://..", so it may be directly referenced.
 *
 * @param scheme Scheme to register the provider under (this will be proxied and referenced via the "active hadoop
 *               configuration")/*from   www  . j av  a2 s.c o m*/
 * @param alias  Alias for the provider so a direct reference can be made if desired, "scheme-alias://..."
 * @param p      File provider to register
 * @throws FileSystemException Error registering file provider
 */
public synchronized void addProvider(HadoopConfiguration config, String scheme, String alias, FileProvider p)
        throws FileSystemException {
    ActiveHadoopShimFileProvider provider = providerProxies.get(scheme);
    if (provider == null) {
        provider = new ActiveHadoopShimFileProvider(this, scheme);
        providerProxies.put(scheme, provider);
        // Register a proxying provider
        delegate.addProvider(scheme, provider);
    }

    Map<String, FileProvider> providersForConfig = providersByConfiguration.get(config);
    if (providersForConfig == null) {
        providersForConfig = new HashMap<String, FileProvider>();
        providersByConfiguration.put(config, providersForConfig);
    }
    if (providersForConfig.containsKey(scheme)) {
        throw new FileSystemException(BaseMessages.getString(PKG, "Error.SchemeAlreadyRegistered", scheme));
    }
    providersForConfig.put(scheme, p);

    // Register the real provider under the scheme-alias so we can support talking to more than one provider
    // for the same scheme at the same time: scheme-alias://my/file/path
    delegate.addProvider(scheme + "-" + alias, p);
}