Example usage for org.apache.hadoop.fs FileSystem newInstance

List of usage examples for org.apache.hadoop.fs FileSystem newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem newInstance.

Prototype

public static FileSystem newInstance(Configuration conf) throws IOException 

Source Link

Document

Returns a unique configured FileSystem implementation for the default filesystem of the supplied configuration.

Usage

From source file:org.datacleaner.spark.utils.HadoopUtils.java

License:Open Source License

public static FileSystem getFileSystem() throws IOException {
    return FileSystem.newInstance(HadoopUtils.getHadoopConfiguration(getHadoopConfigurationDirectoryToUse()));
}

From source file:org.datacleaner.util.HdfsUtils.java

License:Open Source License

public static FileSystem getFileSystemFromUri(final URI uri) {
    try {/*from w  ww  .  j  av a 2 s.c  o  m*/
        final URI baseUri = UriBuilder.fromUri(uri).replacePath("/").build();
        return FileSystem.newInstance(getHadoopConfiguration(baseUri));
    } catch (final IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.datacleaner.windows.HdfsUrlChooser.java

License:Open Source License

/**
 * This scans Hadoop environment variables for a directory with configuration files
 *
 * @param serverInformationCatalog/*from w w w  . j  ava2 s .  c om*/
 * @return True if a configuration was yielded.
 */
private boolean scanHadoopConfigFiles(final ServerInformationCatalog serverInformationCatalog,
        final String selectedServer) {

    final HadoopClusterInformation clusterInformation;
    if (selectedServer != null) {
        clusterInformation = (HadoopClusterInformation) serverInformationCatalog.getServer(selectedServer);
    } else {
        clusterInformation = (HadoopClusterInformation) serverInformationCatalog
                .getServer(HadoopResource.DEFAULT_CLUSTERREFERENCE);
    }

    if (clusterInformation == null) {
        return false;
    }

    final Configuration configuration = HdfsUtils.getHadoopConfigurationWithTimeout(clusterInformation);

    _currentDirectory = new Path("/");

    try {
        _fileSystem = FileSystem.newInstance(configuration);
    } catch (final IOException e) {
        throw new IllegalArgumentException("Illegal URI when showing HDFS chooser", e);
    }
    final HdfsDirectoryModel model = (HdfsDirectoryModel) _fileList.getModel();
    model.updateFileList();
    return model._files.length > 0;
}

From source file:org.jd.copier.CopyClient.java

License:Apache License

/** Normal copy using the shell copy command */

public static void runNormalCopy(String args[]) {
    if (!args[1].equals(null)) {
        try {/*w  ww .jav  a2  s .  co m*/
            File fileListCopy = new File(new URI(args[1]).getPath());
            if (!fileListCopy.isFile()) {
                throw new IllegalArgumentException("Please Provide File Path For the File List To Copy");
            } else {
                FileStatus status;
                status = FileSystem.newInstance(new Configuration()).getFileStatus(new Path(args[2]));
                if (!status.isDir()) {
                    throw new IllegalArgumentException("Please Provide a Valid HDFS Directory to Copy");
                } else {
                    if (args[3].equals(null)) {
                        throw new IllegalArgumentException("Please Provide the Chunk Count");
                    } else {
                        if (args[4].equals(null)) {
                            throw new IllegalArgumentException("Please Provide Number Of  Threads");
                        } else {
                            String fileListPath = fileListCopy.getAbsolutePath();
                            Path pathOutDir = new Path(args[2]);
                            int chunkCount = Integer.parseInt(args[3]);
                            int threadCount = Integer.parseInt(args[4]);
                            new HDPCopy().copyToHdfs(fileListPath, pathOutDir, chunkCount, threadCount);

                        }
                    }
                }
            }

        } catch (URISyntaxException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

From source file:org.jd.copier.shell.CopyRunnable.java

License:Apache License

/** Run Method copy the files*/

public void run() {
    try {//from  w ww  .  ja  v  a2  s . c o m
        setFileSystem(FileSystem.newInstance(configuration));
        Path[] p = new Path[subList.get().size()];
        Path[] path = subList.get().toArray(p);
        getFileSystem().copyFromLocalFile(false, true, path, getOutDirectory());

    } catch (IOException e) {
        e.printStackTrace();
    }

}

From source file:org.shaf.core.util.IOUtils.java

License:Apache License

/**
 * Returns the {@link FileSystem}.// w w  w .  j  a  v  a2s .  c  o m
 * 
 * @return the file system.
 * @throws IOException
 *             if an I/O error occurs.
 */
public static final FileSystem getFileSystem() throws IOException {
    if (isHadoopAvailable()) {
        return FileSystem.newInstance(getHadoopConfig());
    } else {
        return FileSystem.newInstanceLocal(new Configuration()).getRaw();
    }
}

From source file:tamriel.cyrodiil.champion.thor.service.hadoop.HdfsConnector.java

public HdfsConnector(String host, Integer port) {
    if (host == null) {
        throw new IllegalArgumentException("Hadoop server must be specified");
    }//from   w  w  w . jav a 2s  .c om
    StringBuilder defaultName = new StringBuilder("hdfs://");
    defaultName.append(host);
    if (port != null) {
        defaultName.append(':').append(port);
    }

    Configuration conf = new Configuration();
    conf.set("fs.default.name", defaultName.toString());

    try {
        fileSystem = FileSystem.newInstance(conf);
    } catch (IOException err) {
        err.printStackTrace();
    }
}