Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.mitre.mapred.fs.FileUtils.java

License:Open Source License

/**
 * Returns a tmp path on the remote FileSystem.
 *
 * @param fs//from w ww. ja v a2 s. c  o m
 * @param basePath
 * @return The path
 * @throws java.io.IOException
 */
public static final Path createRemoteTempPath(FileSystem fs, Path basePath) throws IOException {

    long now = System.currentTimeMillis();
    // @TODO: add constant and look up tmp dir name
    Path tmpDirPath = new Path(basePath.toString() + Path.SEPARATOR + "tmp_" + Long.toHexString(now));
    // check to see if unqiue?
    return fs.makeQualified(tmpDirPath);
}

From source file:org.notmysock.tez.BroadcastTest.java

License:Apache License

public boolean run(Configuration conf, boolean doLocalityCheck) throws Exception {
    System.out.println("Running BroadcastTest");
    // conf and UGI
    TezConfiguration tezConf;/* w w  w. j  a  v  a2s . co  m*/
    if (conf != null) {
        tezConf = new TezConfiguration(conf);
    } else {
        tezConf = new TezConfiguration();
    }
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    UserGroupInformation.setConfiguration(tezConf);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();

    // staging dir
    FileSystem fs = FileSystem.get(tezConf);
    String stagingDirStr = Path.SEPARATOR + "user" + Path.SEPARATOR + user + Path.SEPARATOR + ".staging"
            + Path.SEPARATOR + Path.SEPARATOR + Long.toString(System.currentTimeMillis());
    Path stagingDir = new Path(stagingDirStr);
    tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDirStr);
    stagingDir = fs.makeQualified(stagingDir);

    Path jobJar = new Path(stagingDir, "job.jar");
    fs.copyFromLocalFile(getCurrentJarURL(), jobJar);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("job.jar", createLocalResource(fs, jobJar));

    TezClient tezSession = null;
    // needs session or else TaskScheduler does not hold onto containers
    tezSession = TezClient.create("BroadcastTest", tezConf);
    tezSession.addAppMasterLocalFiles(localResources);
    tezSession.start();

    DAGClient dagClient = null;

    try {
        DAG dag = createDAG(fs, tezConf, stagingDir, localResources);

        dag.addTaskLocalFiles(localResources);

        tezSession.waitTillReady();
        dagClient = tezSession.submitDAG(dag);

        // monitoring
        DAGStatus dagStatus = dagClient.waitForCompletionWithStatusUpdates(null);
        if (dagStatus.getState() != DAGStatus.State.SUCCEEDED) {
            System.out.println("DAG diagnostics: " + dagStatus.getDiagnostics());
            return false;
        }
        return true;
    } finally {
        fs.delete(stagingDir, true);
        tezSession.stop();
    }
}

From source file:org.shaf.core.util.IOUtils.java

License:Apache License

/**
 * Merges paths.//from  w  w  w  . j  a  v a 2s  .  co  m
 * 
 * @param paths
 *            the paths to merge.
 * @return the merged path.
 * @throws IOException
 *             if an I/O error occurs.
 */
public static final Path mergePath(final Path... paths) throws IOException {
    try {
        if (paths.length == 0) {
            throw new IllegalArgumentException("paths.length=0");
        }

        if (paths.length == 1) {
            return paths[0];
        }

        Path buf = null;
        for (int i = 0; i < paths.length; i++) {
            if (paths[i] == null) {
                throw new NullPointerException("paths[" + i + "]");
            }

            Path norm = normalizePath(paths[i]);
            if (buf == null) {
                buf = norm;
            } else {
                if (norm.isAbsolute()) {
                    norm = new Path(StringUtils.trim(norm.toString(), Path.SEPARATOR));
                }
                buf = new Path(buf, norm);
            }
        }

        return buf;
    } catch (NullPointerException | IllegalArgumentException exc) {
        throw new IOException("Invalid input.", exc);
    }
}

From source file:org.shaf.core.util.IOUtils.java

License:Apache License

/**
 * Merges paths.//from  w  w w. j ava 2s . c om
 * 
 * @param paths
 *            the paths to merge.
 * @return the merged path.
 * @throws IOException
 *             if an I/O error occurs.
 */
public static final Path mergePath(final String... paths) throws IOException {
    try {
        if (paths.length == 0) {
            throw new IllegalArgumentException("paths.length=0");
        }

        Path buf = null;
        for (int i = 0; i < paths.length; i++) {
            if (paths[i] == null) {
                throw new NullPointerException("paths[" + i + "]");
            }

            Path norm = normalizePath(paths[i]);
            if (buf == null) {
                buf = norm;
            } else {
                if (norm.isAbsolute()) {
                    norm = new Path(StringUtils.trim(norm.toString(), Path.SEPARATOR));
                }
                buf = new Path(buf, norm);
            }
        }
        return buf;
    } catch (NullPointerException | IllegalArgumentException exc) {
        throw new IOException("Invalid input.", exc);
    }
}

From source file:org.shaf.lib.io.Zip.java

License:Apache License

/**
 * Performs data compression./*from w  ww.  ja va  2s  . c  om*/
 */
@Override
protected Object logic() throws IOException {
    Location src = new Location(super.getFileSystem(), super.getBasePath(), this.source,
            this.wildcard.equals("NULL") ? null : this.wildcard).setQualifier("compressing data")
                    .shouldBeFound();
    Location trg = new Location(super.getFileSystem(), super.getBasePath(), this.target)
            .setQualifier("zip-archive");

    try (ZipOutputStream out = new ZipOutputStream(trg.getDataOutputStream())) {

        if (src.isDirectory()) {
            for (Location loc : src.getContent()) {
                if (loc.isFile()) {
                    out.putNextEntry(new ZipEntry((src.isFilterDefined() ? "" : src.getName() + Path.SEPARATOR)
                            + loc.getPathAsString()));
                    try (FSDataInputStream in = loc.getDataInputStream()) {
                        super.copyBlock(in, out, loc.getSize());
                    }
                }
            }
        } else {
            out.putNextEntry(new ZipEntry(src.getName()));
            try (FSDataInputStream in = src.getDataInputStream()) {
                super.copyBlock(in, out, src.getSize());
            }
        }

    } catch (Exception exc) {
        throw new IOException("Failed to compress location " + this.source + " to archive " + this.target, exc);
    }

    return null;
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Check the dest is not under the source Credit: Apache Hadoop team;
 *
 * @param srcFS source filesystem//from   w  w  w  . j av  a  2s . c o m
 * @param src   source path
 * @param dstFS dest filesystem
 * @param dst   dest path
 * @throws SmartFrogRuntimeException if there is a match.
 */

public static void assertNotDependent(FileSystem srcFS, Path src, FileSystem dstFS, Path dst)
        throws SmartFrogRuntimeException {
    if (srcFS.getUri().equals(dstFS.getUri())) {
        String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
        String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
        if (dstq.startsWith(srcq)) {
            if (srcq.length() == dstq.length()) {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to itself.");
            } else {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to its subdirectory " + dst);
            }
        }
    }
}

From source file:org.springframework.data.hadoop.impala.mapreduce.MapReduceCommands.java

License:Apache License

/**
 * @param jarFileName//www.j  a v  a2  s .  c  o m
 * @param mainClassName
 * @param args
 * @throws Throwable 
 */
public void runJar(final String jarFileName, final String mainClassName, final String args) throws Throwable {
    File file = new File(jarFileName);
    File tmpDir = new File(new Configuration().get("hadoop.tmp.dir"));
    String os = System.getProperty("os.name").toLowerCase();
    if (os.contains("win")) {
        tmpDir = new File(System.getProperty("java.io.tmpdir"), "impala");
    }
    tmpDir.mkdirs();
    if (!tmpDir.isDirectory()) {
        LOG.severe("Mkdirs failed to create " + tmpDir);
    }

    try {
        final File workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
        workDir.delete();
        workDir.mkdirs();
        if (!workDir.isDirectory()) {
            LOG.severe("Mkdirs failed to create " + workDir);
            return;
        }

        Runtime.getRuntime().addShutdownHook(new Thread() {
            public void run() {
                try {
                    FileUtil.fullyDelete(workDir);
                } catch (IOException e) {
                }
            }
        });

        unJar(file, workDir);

        ArrayList<URL> classPath = new ArrayList<URL>();

        //This is to add hadoop configuration dir to classpath so that 
        //user's configuration can be accessed when running the jar
        File hadoopConfigurationDir = new File(workDir + Path.SEPARATOR + "impala-hadoop-configuration");
        writeHadoopConfiguration(hadoopConfigurationDir, this.getHadoopConfiguration());
        classPath.add(hadoopConfigurationDir.toURL());
        //classPath.add(new File(System.getenv("HADOOP_CONF_DIR")).toURL());

        classPath.add(new File(workDir + Path.SEPARATOR).toURL());
        classPath.add(file.toURL());
        classPath.add(new File(workDir, "classes" + Path.SEPARATOR).toURL());
        File[] libs = new File(workDir, "lib").listFiles();
        if (libs != null) {
            for (int i = 0; i < libs.length; i++) {
                classPath.add(libs[i].toURL());
            }
        }
        ClassLoader loader = new URLClassLoader(classPath.toArray(new URL[0]),
                this.getClass().getClassLoader());
        Thread.currentThread().setContextClassLoader(loader);
        Class<?> mainClass = Class.forName(mainClassName, true, loader);
        Method main = mainClass.getMethod("main",
                new Class[] { Array.newInstance(String.class, 0).getClass() });
        String[] newArgs = args.split(" ");
        main.invoke(null, new Object[] { newArgs });
    } catch (Exception e) {
        if (e instanceof InvocationTargetException) {
            if (e.getCause() instanceof ExitTrappedException) {
                throw (ExitTrappedException) e.getCause();
            }
        } else {
            throw e;
        }
    }
}

From source file:org.springframework.data.hadoop.impala.mapreduce.MapReduceCommands.java

License:Apache License

/**
 * wirte the Hadoop configuration to one directory, 
 * file name is "core-site.xml", "hdfs-site.xml" and "mapred-site.xml".
 * //from  w  ww  . j  a  v  a 2  s  . c o m
 * @param configDir the directory that the file be written
 * @param config Hadoop configuration
 * 
 */
public void writeHadoopConfiguration(File configDir, Configuration config) {
    configDir.mkdirs();
    try {
        FileOutputStream fos = new FileOutputStream(new File(configDir + Path.SEPARATOR + "core-site.xml"));
        config.writeXml(fos);
        fos = new FileOutputStream(new File(configDir + Path.SEPARATOR + "hdfs-site.xml"));
        config.writeXml(fos);
        fos = new FileOutputStream(new File(configDir + Path.SEPARATOR + "mapred-site.xml"));
        config.writeXml(fos);
    } catch (Exception e) {
        LOG.severe("Save user's configuration failed. Message:" + e.getMessage());
    }

}

From source file:org.trustedanalytics.auth.gateway.hdfs.utils.PathCreator.java

License:Apache License

private Path createPath(String... args) {
    return getPath(Path.SEPARATOR.concat(String.join(Path.SEPARATOR, args)));
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Long> length(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource resource) {//from   w  ww  .j a  v  a 2s.c om
    final String filePath = resource.dirPath + Path.SEPARATOR + resource.fileName;
    LOG.debug("{}getting length of file:{}", new Object[] { logPrefix, filePath });

    try {
        Result<Long> result = ugi.doAs(new PrivilegedExceptionAction<Result<Long>>() {
            @Override
            public Result<Long> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    long length = -1;
                    if (fs.isFile(new Path(filePath))) {
                        length = fs.getLength(new Path(filePath));
                    }
                    return Result.success(length);
                } catch (IOException ex) {
                    LOG.warn("{}could not get size of file:{}", logPrefix, ex.getMessage());
                    return Result.externalSafeFailure(new HDFSException("hdfs file length", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalSafeFailure(new HDFSException("hdfs file length", ex));
    }
}