Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * This is the non-quiet close operation
 *
 * @param dfs filesystem/*from w  w  w . j  a  v a 2 s  .c  o m*/
 * @throws SmartFrogRuntimeException if the filesystem does not close
 */
public static void closeDfs(FileSystem dfs) throws SmartFrogRuntimeException {
    try {
        dfs.close();
    } catch (IOException e) {
        if (isFilesystemClosedException(e)) {
            LogFactory.getLog(DfsUtils.class).info("DFS has already closed", e);
        } else {
            throw (SmartFrogRuntimeException) SmartFrogRuntimeException
                    .forward(ERROR_FAILED_TO_CLOSE + dfs.getUri(), e);
        }
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Delete a DFS directory. Cleans up afterwards
 *
 * @param dfs       DFS configuration/*from  w w w.ja  v  a2s  .  co  m*/
 * @param dir       directory to delete
 * @param recursive recurseive delete?
 * @throws SmartFrogRuntimeException if anything goes wrong
 */
public static void deleteDFSDirectory(FileSystem dfs, String dir, boolean recursive)
        throws SmartFrogRuntimeException {
    URI dfsURI = dfs.getUri();
    Path path = new Path(dir);
    try {
        dfs.delete(path, recursive);
    } catch (IOException e) {
        closeQuietly(dfs);
        throw (SmartFrogRuntimeException) SmartFrogRuntimeException
                .forward(ERROR_FAILED_TO_DELETE_PATH + path + " on " + dfsURI, e);
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Check the dest is not under the source Credit: Apache Hadoop team;
 *
 * @param srcFS source filesystem//from w ww.j  a  va 2s. co m
 * @param src   source path
 * @param dstFS dest filesystem
 * @param dst   dest path
 * @throws SmartFrogRuntimeException if there is a match.
 */

public static void assertNotDependent(FileSystem srcFS, Path src, FileSystem dstFS, Path dst)
        throws SmartFrogRuntimeException {
    if (srcFS.getUri().equals(dstFS.getUri())) {
        String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
        String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
        if (dstq.startsWith(srcq)) {
            if (srcq.length() == dstq.length()) {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to itself.");
            } else {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to its subdirectory " + dst);
            }
        }
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Create the parent directories of a given path
 *
 * @param fileSystem filesystem to work with
 * @param dest       file/*w  w  w . j a  v  a 2 s  .co m*/
 * @throws SmartFrogRuntimeException failure to create the directories
 */
public static void mkdirs(FileSystem fileSystem, Path dest) throws SmartFrogRuntimeException {
    try {
        if (!fileSystem.mkdirs(dest)) {
            throw new SmartFrogRuntimeException(ERROR_MKDIR_FAILED + dest);
        }
    } catch (IOException e) {
        throw new SmartFrogRuntimeException(
                ERROR_MKDIR_FAILED + dest + " in " + fileSystem.getUri() + " : " + e, e);
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Copy a file//  ww  w.  j a v a  2 s .  com
 *
 * @param srcFS     source filesystem
 * @param src       source path
 * @param dstFS     destination filesystem
 * @param dst       destination path
 * @param overwrite overwrite
 * @param blocksize block size
 * @throws SmartFrogRuntimeException for any failure
 */
public static void copyFile(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, boolean overwrite,
        int blocksize) throws SmartFrogRuntimeException {
    assertNotDependent(srcFS, src, dstFS, dst);
    FileStatus status;
    URI fsuri = srcFS.getUri();
    try {
        status = srcFS.getFileStatus(src);
    } catch (FileNotFoundException fe) {
        throw new SmartFrogRuntimeException(ERROR_MISSING_SOURCE_FILE + src + " in " + fsuri, fe);
    } catch (IOException e) {
        throw new SmartFrogRuntimeException(ERROR_NO_STAT + src + " in " + fsuri + " : " + e, e);
    }
    if (status.isDir()) {
        throw new SmartFrogRuntimeException(ERROR_NO_DIRECTORY_COPY + src + " in " + fsuri);
    }
    InputStream in = null;
    OutputStream out = null;
    try {
        in = srcFS.open(src);
        out = dstFS.create(dst, overwrite);
    } catch (IOException e) {
        //close the input stream if it is not already in there
        org.smartfrog.services.filesystem.FileSystem.close(in);
        org.smartfrog.services.filesystem.FileSystem.close(out);
    }
    try {
        IOUtils.copyBytes(in, out, blocksize, true);
    } catch (IOException e) {
        throw new SmartFrogRuntimeException(
                ERROR_COPY_FAILED + src + " in " + fsuri + " to " + dst + " in " + dstFS.getUri() + " : " + e,
                e);
    }

}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Copy a local file into HDFS//from  w  w  w . j  a  v  a 2s. com
 *
 * @param fileSystem filesystem for the destination
 * @param source     source file
 * @param dest       dest path
 * @param overwrite  should there be an overwrite?
 * @throws SmartFrogRuntimeException if the copy failed
 */
public static void copyLocalFileIn(FileSystem fileSystem, File source, Path dest, boolean overwrite)
        throws SmartFrogRuntimeException {
    if (!source.exists()) {
        throw new SmartFrogRuntimeException(ERROR_MISSING_SOURCE_FILE + source);
    }
    Path localSource = new Path(source.toURI().toString());
    try {
        fileSystem.copyFromLocalFile(false, overwrite, localSource, dest);
    } catch (IOException e) {
        throw new SmartFrogRuntimeException(
                FAILED_TO_COPY + source + " to " + dest + " on " + fileSystem.getUri(), e);
    }
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Move files that match the file pattern <i>srcPath</i>
 * to a destination file./*from w  w  w. j av a2 s.co  m*/
 * When moving mutiple files, the destination must be a directory.
 * Otherwise, IOException is thrown.
 * Based on {@link org.apache.hadoop.fs.FsShell#rename(String, String)}
 *
 * @param fileSystem filesystem to work with
 * @param srcPath    a file pattern specifying source files
 * @param dstPath    a destination file/directory
 * @throws IOException for any problem
 * @see org.apache.hadoop.fs.FileSystem#globStatus(Path)
 */
public static void rename(FileSystem fileSystem, Path srcPath, Path dstPath) throws IOException {
    Path[] srcs = FileUtil.stat2Paths(fileSystem.globStatus(srcPath), srcPath);
    FileStatus destStatus = fileSystem.getFileStatus(dstPath);
    if (srcs.length > 1 && !destStatus.isDir()) {
        throw new IOException("When moving multiple files, " + "destination should be a directory.");
    }
    for (Path src : srcs) {
        if (!fileSystem.rename(src, dstPath)) {
            FileStatus srcFstatus;
            FileStatus dstFstatus;
            try {
                srcFstatus = fileSystem.getFileStatus(src);
            } catch (FileNotFoundException e) {
                FileNotFoundException fnf = new FileNotFoundException(
                        src + ": No such file or directory in " + fileSystem.getUri());
                fnf.initCause(e);
                throw fnf;
            }
            try {
                dstFstatus = fileSystem.getFileStatus(dstPath);
            } catch (IOException ignored) {
                dstFstatus = null;
            }
            if ((srcFstatus != null) && (dstFstatus != null)) {
                if (srcFstatus.isDir() && !dstFstatus.isDir()) {
                    throw new IOException("cannot overwrite non directory " + dstPath + " with directory "
                            + srcPath + " in " + fileSystem.getUri());
                }
            }
            throw new IOException(
                    "Failed to rename '" + srcPath + "' to '" + dstPath + "'" + " in " + fileSystem.getUri());
        }
    }
}

From source file:org.springframework.data.hadoop.fs.FsShell.java

License:Apache License

public void mv(String src, String src2, String... dst) {
    Object[] va = parseVarargs(src, src2, dst);
    @SuppressWarnings({ "unchecked" })
    List<Path> sources = (List<Path>) va[0];
    Path dstPath = (Path) va[1];

    try {//w w w  .j  av  a 2s .  c om
        FileSystem dstFs = getFS(dstPath);
        boolean isDstDir = !dstFs.isFile(dstPath);

        if (sources.size() > 1 && !isDstDir) {
            throw new IllegalArgumentException("Destination must be a dir when moving multiple files");
        }

        for (Path srcPath : sources) {
            FileSystem srcFs = getFS(srcPath);
            URI srcURI = srcFs.getUri();
            URI dstURI = dstFs.getUri();
            if (srcURI.compareTo(dstURI) != 0) {
                throw new IllegalArgumentException("src and destination filesystems do not match.");
            }
            Path[] srcs = FileUtil.stat2Paths(srcFs.globStatus(srcPath), srcPath);
            if (srcs.length > 1 && !isDstDir) {
                throw new IllegalArgumentException(
                        "When moving multiple files, destination should be a directory.");
            }
            for (Path s : srcs) {
                if (!srcFs.rename(s, dstPath)) {
                    FileStatus srcFstatus = null;
                    FileStatus dstFstatus = null;
                    try {
                        srcFstatus = srcFs.getFileStatus(s);
                    } catch (FileNotFoundException e) {
                        // ignore
                    }
                    try {
                        dstFstatus = dstFs.getFileStatus(dstPath);
                    } catch (IOException e) {
                    }
                    if ((srcFstatus != null) && (dstFstatus != null)) {
                        if (srcFstatus.isDir() && !dstFstatus.isDir()) {
                            throw new IllegalArgumentException(
                                    "cannot overwrite non directory " + dstPath + " with directory " + s);
                        }
                    }
                    throw new HadoopException("Failed to rename " + s + " to " + dstPath);
                }
            }
        }
    } catch (IOException ex) {
        throw new HadoopException("Cannot rename resources " + ex.getMessage(), ex);
    }
}

From source file:org.springframework.data.hadoop.test.support.compat.MiniMRClusterCompat.java

License:Apache License

/**
 * Instantiates a minimrcluster./*  w w  w.  j  a  v  a 2  s . com*/
 * 
 * @param caller the one who called this method
 * @param nodes number of nodes
 * @param configuration passed configuration
 * @param fileSystem hdfs filesystem
 * @param classLoader the class loader
 * @return
 */
public static Object instantiateCluster(Class<?> caller, int nodes, Configuration configuration,
        FileSystem fileSystem, ClassLoader classLoader) {

    log.info("Starting minirmcluster via compat");

    Assert.notNull(caller, "Caller class must be set");
    Assert.notNull(fileSystem, "FileSystem must not be null");

    Object cluster = null;

    Class<?> factoryClass = resolveClass(CLASS_FACTORY, classLoader);
    Class<?> legacyClass = resolveClass(CLASS_LEGACY, classLoader);
    log.info("Cluster classes resolved, factory=" + factoryClass + " legacy=" + legacyClass);

    if (factoryClass != null) {
        Method method = ReflectionUtils.findMethod(factoryClass, "create", Class.class, int.class,
                Configuration.class);
        cluster = ReflectionUtils.invokeMethod(method, null, caller, nodes, configuration);
    } else if (legacyClass != null) {
        Constructor<?> constructor = ClassUtils.getConstructorIfAvailable(legacyClass, int.class, String.class,
                int.class);
        cluster = BeanUtils.instantiateClass(constructor, nodes, fileSystem.getUri().toString(), 1);
    } else {
        log.error("Failed to find or instantiate cluster class");
    }

    if (cluster != null) {
        log.info("Cluster instantiated: " + cluster);
    }

    return cluster;
}

From source file:org.springframework.data.hadoop.test.support.StandaloneHadoopCluster.java

License:Apache License

@SuppressWarnings("deprecation")
@Override//from ww  w  .  j ava  2 s . co m
public void start() throws IOException {
    log.info("Checking if cluster=" + clusterName + " needs to be started");
    synchronized (this.startupShutdownMonitor) {
        if (started) {
            return;
        }

        // TODO: fix for MAPREDUCE-2785
        // I don't like it at all to do it like this, but
        // until we find a better way for the fix,
        // just set the system property
        // who knows what kind of problems this will cause!!!
        // keeping this here as reminder for the next guy who
        // clean up the mess
        String tmpDir = getTmpDir();
        System.setProperty("hadoop.log.dir", tmpDir);

        // need to get unique dir per cluster
        System.setProperty("test.build.data", "build/test/data/" + clusterName);

        log.info("Starting cluster=" + clusterName);

        Configuration config = new JobConf();

        // umask trick
        String umask = getCurrentUmask(tmpDir, config);
        if (umask != null) {
            log.info("Setting expected umask to " + umask);
            config.set("dfs.datanode.data.dir.perm", umask);
        }

        // dfs cluster is updating config
        // newer dfs cluster are using builder pattern
        // but we need to support older versions in
        // a same code. there are some ramifications if
        // deprecated methods are removed in a future
        dfsCluster = new MiniDFSCluster(config, nodes, true, null);

        // we need to ask the config from mr cluster after init
        // returns. for this case it is not guaranteed that passed config
        // is updated.
        // we do all this via compatibility class which uses
        // reflection because at this point we don't know
        // the exact runtime implementation

        FileSystem fs = dfsCluster.getFileSystem();
        log.info("Dfs cluster uri= " + fs.getUri());

        mrClusterObject = MiniMRClusterCompat.instantiateCluster(this.getClass(), nodes, config, fs,
                this.getClass().getClassLoader());

        configuration = MiniMRClusterCompat.getConfiguration(mrClusterObject);

        // set default uri again in case it wasn't updated
        FileSystem.setDefaultUri(configuration, fs.getUri());

        log.info("Started cluster=" + clusterName);
        started = true;
    }
}