Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:org.apache.reef.runtime.mesos.evaluator.REEFExecutor.java

License:Apache License

public final void onEvaluatorLaunch(final EvaluatorLaunch evaluatorLaunch) {
    LOG.log(Level.INFO, "Launch!!!! {0}", evaluatorLaunch.toString());
    assert (evaluatorLaunch.getIdentifier().toString().equals(this.mesosExecutorId));
    final ExecutorService evaluatorLaunchExecutorService = Executors.newSingleThreadExecutor();
    evaluatorLaunchExecutorService.submit(new Thread() {
        public void run() {
            try {
                final List<String> command = Arrays.asList(evaluatorLaunch.getCommand().toString().split(" "));
                LOG.log(Level.INFO, "Command!!!! {0}", command);
                final FileSystem fileSystem = FileSystem.get(new Configuration());
                final Path hdfsFolder = new Path(fileSystem.getUri() + "/" + mesosExecutorId);
                final File localFolder = new File(fileNames.getREEFFolderName(),
                        fileNames.getLocalFolderName());

                FileUtil.copy(fileSystem, hdfsFolder, localFolder, true, new Configuration());

                evaluatorProcess = new ProcessBuilder().command(command)
                        .redirectError(new File(fileNames.getEvaluatorStderrFileName()))
                        .redirectOutput(new File(fileNames.getEvaluatorStdoutFileName())).start();

                evaluatorProcessExitValue = evaluatorProcess.waitFor();

                fileSystem.close();/*from   w  w w  .j  a  v a  2s .com*/
            } catch (IOException | InterruptedException e) {
                throw new RuntimeException(e);
            }
        }
    });
    evaluatorLaunchExecutorService.shutdown();
}

From source file:org.apache.slider.test.ContractTestUtils.java

License:Apache License

/**
 * Cleanup at the end of a test run//from w  ww  .  j  ava 2s .c om
 * @param action action triggering the operation (for use in logging)
 * @param fileSystem filesystem to work with. May be null
 * @param cleanupPath path to delete as a string
 */
public static void cleanup(String action, FileSystem fileSystem, String cleanupPath) {
    if (fileSystem == null) {
        return;
    }
    Path path = new Path(cleanupPath).makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
    cleanup(action, fileSystem, path);
}

From source file:org.apache.solr.hadoop.MorphlineBasicMiniMRTest.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    if (System.getProperty("hadoop.log.dir") == null) {
        System.setProperty("hadoop.log.dir", "target");
    }/*  w ww .  java2s .c  o  m*/
    int taskTrackers = 2;
    int dataNodes = 2;
    //    String proxyUser = System.getProperty("user.name");
    //    String proxyGroup = "g";
    //    StringBuilder sb = new StringBuilder();
    //    sb.append("127.0.0.1,localhost");
    //    for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    //      sb.append(",").append(i.getCanonicalHostName());
    //    }

    System.setProperty("solr.hdfs.blockcache.enabled", "false");

    JobConf conf = new JobConf();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions", "true");
    conf.set("hadoop.security.authentication", "simple");

    dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
    FileSystem fileSystem = dfsCluster.getFileSystem();
    fileSystem.mkdirs(new Path("/tmp"));
    fileSystem.mkdirs(new Path("/user"));
    fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
    fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
    String nnURI = fileSystem.getUri().toString();
    int numDirs = 1;
    String[] racks = null;
    String[] hosts = null;

    mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:org.apache.solr.hadoop.MorphlineGoLiveMiniMRTest.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    //    if (isYarn()) {
    //      org.junit.Assume.assumeTrue(false); // ignore test on Yarn until CDH-10420 is fixed
    //    }// w ww.j  a  v a2s  .  c  om
    if (System.getProperty("hadoop.log.dir") == null) {
        System.setProperty("hadoop.log.dir", "target");
    }
    int taskTrackers = 2;
    int dataNodes = 2;

    System.setProperty("solr.hdfs.blockcache.enabled", "false");

    JobConf conf = new JobConf();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions", "true");
    conf.set("hadoop.security.authentication", "simple");

    createTempDir();
    System.setProperty("test.build.data", dataDir + File.separator + "hdfs" + File.separator + "build");
    System.setProperty("test.cache.data", dataDir + File.separator + "hdfs" + File.separator + "cache");

    dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
    FileSystem fileSystem = dfsCluster.getFileSystem();
    fileSystem.mkdirs(new Path("/tmp"));
    fileSystem.mkdirs(new Path("/user"));
    fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
    fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
    String nnURI = fileSystem.getUri().toString();
    int numDirs = 1;
    String[] racks = null;
    String[] hosts = null;

    mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:org.apache.solr.hadoop.SolrOutputFormat.java

License:Apache License

public static void addSolrConfToDistributedCache(Job job, File solrHomeZip) throws IOException {
    // Make a reasonably unique name for the zip file in the distributed cache
    // to avoid collisions if multiple jobs are running.
    String hdfsZipName = UUID.randomUUID().toString() + '.' + ZIP_FILE_BASE_NAME;
    Configuration jobConf = job.getConfiguration();
    jobConf.set(ZIP_NAME, hdfsZipName);/*w  w  w  .ja v a 2s. c  o  m*/

    Path zipPath = new Path("/tmp", getZipName(jobConf));
    FileSystem fs = FileSystem.get(jobConf);
    fs.copyFromLocalFile(new Path(solrHomeZip.toString()), zipPath);
    final URI baseZipUrl = fs.getUri().resolve(zipPath.toString() + '#' + getZipName(jobConf));

    DistributedCache.addCacheArchive(baseZipUrl, jobConf);
    LOG.debug("Set Solr distributed cache: {}", Arrays.asList(job.getCacheArchives()));
    LOG.debug("Set zipPath: {}", zipPath);
    // Actually send the path for the configuration zip file
    jobConf.set(SETUP_OK, zipPath.toString());
}

From source file:org.apache.sqoop.util.DirectImportUtils.java

License:Apache License

/**
 * Open a file in HDFS for write to hold the data associated with a table.
 * Creates any necessary directories, and returns the OutputStream to write
 * to. The caller is responsible for calling the close() method on the
 * returned stream./*from   ww  w.  j a va 2s. co  m*/
 */
public static SplittableBufferedWriter createHdfsSink(Configuration conf, SqoopOptions options,
        ImportJobContext context) throws IOException {

    Path destDir = context.getDestination();
    FileSystem fs = destDir.getFileSystem(conf);

    LOG.debug("Writing to filesystem: " + fs.getUri());
    LOG.debug("Creating destination directory " + destDir);
    fs.mkdirs(destDir);

    // This Writer will be closed by the caller.
    return new SplittableBufferedWriter(new SplittingOutputStream(conf, destDir, "part-m-",
            options.getDirectSplitSize(), getCodec(conf, options)));
}

From source file:org.apache.storm.hdfs.spout.TestHdfsSpout.java

License:Apache License

private List<String> getSeqFileContents(FileSystem fs, Path... seqFiles) throws IOException {
    ArrayList<String> result = new ArrayList<>();

    for (Path seqFile : seqFiles) {
        Path file = new Path(fs.getUri().toString() + seqFile.toString());
        SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file));
        try {//from ww w  .ja  v  a 2s  .c o  m
            Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
            Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
            while (reader.next(key, value)) {
                String keyValStr = Arrays.asList(key, value).toString();
                result.add(keyValStr);
            }
        } finally {
            reader.close();
        }
    } // for
    return result;
}

From source file:org.apache.tajo.cli.TestTajoCli.java

License:Apache License

@Test
public void testDescTable() throws Exception {
    String tableName;/*from  ww w  .  j a  v a 2  s . c o m*/
    if (cluster.isHCatalogStoreRunning()) {
        tableName = "TEST_DESC_TABLE".toLowerCase();
    } else {
        tableName = "TEST_DESC_TABLE";
    }

    String sql = "create table \"" + tableName + "\" (col1 int4, col2 int4);";

    setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
    tajoCli.executeScript(sql);

    tajoCli.executeMetaCommand("\\d " + tableName);
    tajoCli.executeMetaCommand("\\d \"" + tableName + "\"");

    String consoleResult = new String(out.toByteArray());

    FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
    if (!cluster.isHCatalogStoreRunning()) {
        assertOutputResult("testDescTable.result", consoleResult, new String[] { "${table.path}" },
                new String[] { fs.getUri() + "/tajo/warehouse/default/" + tableName });
    }
}

From source file:org.apache.tajo.cli.tsql.TestTajoCli.java

License:Apache License

private void verifyDescTable(String sql, String tableName, String resultFileName) throws Exception {
    setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
    tajoCli.executeScript(sql);//  w w  w .j  a  v a  2s  . c o  m

    tajoCli.executeMetaCommand("\\d " + tableName);
    tajoCli.executeMetaCommand("\\d \"" + tableName + "\"");

    String consoleResult = new String(out.toByteArray());

    FileSystem fs = FileSystem.get(testBase.getTestingCluster().getConfiguration());
    if (!cluster.isHCatalogStoreRunning()) {
        assertOutputResult(resultFileName, consoleResult, new String[] { "${table.path}" },
                new String[] { fs.getUri() + "/tajo/warehouse/default/" + tableName });
    }
}

From source file:org.apache.tajo.conf.TajoConf.java

License:Apache License

/**
 * It returns the default root staging directory used by queries without a target table or
 * a specified output directory. An example query is <pre>SELECT a,b,c FROM XXX;</pre>.
 *
 * @param conf TajoConf/*from  ww w.  j  ava2 s  . c  o m*/
 * @return Path which points the default staging directory
 * @throws IOException
 */
public static Path getDefaultRootStagingDir(TajoConf conf) throws IOException {
    String stagingDirString = conf.getVar(ConfVars.STAGING_ROOT_DIR);
    if (!hasScheme(stagingDirString)) {
        Path warehousePath = getWarehouseDir(conf);
        FileSystem fs = warehousePath.getFileSystem(conf);
        Path path = new Path(fs.getUri().toString(), stagingDirString);
        conf.setVar(ConfVars.STAGING_ROOT_DIR, path.toString());
        return path;
    }
    return new Path(stagingDirString);
}