Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:org.apache.oozie.service.TestShareLibService.java

License:Apache License

@Test
public void testShareLibLoadFileMultipleFile() throws Exception {
    FileSystem fs = getFileSystem();
    services = new Services();
    createTestShareLibMetaFile_multipleFile(fs);
    setSystemProps();//from w  ww. j  a v a2 s .  com
    Configuration conf = services.get(ConfigurationService.class).getConf();
    conf.set(ShareLibService.SHARELIB_MAPPING_FILE, fs.getUri() + "/user/test/config.properties");
    conf.set(ShareLibService.SHIP_LAUNCHER_JAR, "true");
    try {
        services.init();
        ShareLibService shareLibService = Services.get().get(ShareLibService.class);
        assertNull(shareLibService.getShareLibJars("something_new"));
        assertEquals(shareLibService.getShareLibJars("pig").size(), 2);
        fs.delete(new Path("shareLibPath/"), true);
    } finally {
        services.destroy();
    }
}

From source file:org.apache.oozie.service.TestShareLibService.java

License:Apache License

@Test
public void testMetafileSymlink() throws ServiceException, IOException {
    // Assume.assumeTrue("Skipping for hadoop - 1.x",HadoopFileSystem.isSymlinkSupported());
    if (!HadoopShims.isSymlinkSupported()) {
        return;//w w w.j  a  v  a  2s. co  m
    }

    services = new Services();
    setSystemProps();
    Configuration conf = services.get(ConfigurationService.class).getConf();
    conf.set(ShareLibService.SHIP_LAUNCHER_JAR, "true");
    services.init();
    FileSystem fs = getFileSystem();
    Properties prop = new Properties();
    try {

        String testPath = "shareLibPath/";

        Path basePath = new Path(testPath + Path.SEPARATOR + "testPath");
        Path basePath1 = new Path(testPath + Path.SEPARATOR + "testPath1");
        Path hive_site = new Path(
                basePath.toString() + Path.SEPARATOR + "hive_conf" + Path.SEPARATOR + "hive-site.xml");
        Path hive_site1 = new Path(
                basePath.toString() + Path.SEPARATOR + "hive_conf" + Path.SEPARATOR + "hive-site1.xml");
        Path symlink = new Path("symlink/");
        Path symlink_hive_site = new Path("symlink/hive_conf" + Path.SEPARATOR + "hive-site.xml");

        fs.mkdirs(basePath);

        createFile(basePath.toString() + Path.SEPARATOR + "pig" + Path.SEPARATOR + "pig.jar");
        createFile(basePath.toString() + Path.SEPARATOR + "pig" + Path.SEPARATOR + "pig_1.jar");

        createFile(basePath1.toString() + Path.SEPARATOR + "pig" + Path.SEPARATOR + "pig_2.jar");
        createFile(basePath1.toString() + Path.SEPARATOR + "pig" + Path.SEPARATOR + "pig_3.jar");
        createFile(basePath1.toString() + Path.SEPARATOR + "pig" + Path.SEPARATOR + "pig_4.jar");

        createFile(hive_site.toString());

        HadoopShims fileSystem = new HadoopShims(fs);
        fileSystem.createSymlink(basePath, symlink, true);
        fileSystem.createSymlink(hive_site, symlink_hive_site, true);

        prop.put(ShareLibService.SHARE_LIB_CONF_PREFIX + ".pig", "/user/test/" + symlink.toString());
        prop.put(ShareLibService.SHARE_LIB_CONF_PREFIX + ".hive_conf",
                "/user/test/" + symlink_hive_site.toString() + "#hive-site.xml");
        createTestShareLibMetaFile(fs, prop);
        assertEquals(fileSystem.isSymlink(symlink), true);

        conf.set(ShareLibService.SHARELIB_MAPPING_FILE, fs.getUri() + "/user/test/config.properties");
        conf.set(ShareLibService.SHIP_LAUNCHER_JAR, "true");
        try {
            ShareLibService shareLibService = Services.get().get(ShareLibService.class);
            assertEquals(shareLibService.getShareLibJars("pig").size(), 2);
            assertEquals(shareLibService.getShareLibJars("hive_conf").size(), 1);
            new HadoopShims(fs).createSymlink(basePath1, symlink, true);
            new HadoopShims(fs).createSymlink(hive_site1, symlink_hive_site, true);
            assertEquals(
                    new HadoopShims(fs).getSymLinkTarget(shareLibService.getShareLibJars("hive_conf").get(0)),
                    hive_site1);
            assertEquals(shareLibService.getShareLibJars("pig").size(), 3);
        } finally {
            fs.delete(new Path("shareLibPath/"), true);
            fs.delete(new Path(metaFile), true);
            fs.delete(new Path("/user/test/config.properties"), true);

            fs.delete(symlink, true);
            services.destroy();
        }
    } catch (IOException ex) {
        ex.printStackTrace();
    }
}

From source file:org.apache.oozie.test.hive.MiniHS2.java

License:Apache License

public MiniHS2(HiveConf hiveConf, FileSystem fs) throws IOException {
    // MiniHS2 normally only lets you do either "local" mode or normal mode.  We couldn't use "local" mode because it forks out
    // a process to run a shell script (that we don't have) to run Hadoop jobs.  And we didn't want to use normal mode because that
    // creates Mini MR and DFS clusters, which we already have setup for Oozie.  Our hacking here involved deleting the Hive Mini
    // MR/DFS cluster code and passing in our jobConf in the hiveConf so that HS2 would use our Mini MR/DFS cluster.
    super(hiveConf, "localhost", MetaStoreUtils.findFreePort(), MetaStoreUtils.findFreePort());
    baseDir = Files.createTempDir();
    baseDfsDir = new Path(new Path(fs.getUri()), "/base");
    String metaStoreURL = "jdbc:derby:" + baseDir.getAbsolutePath() + File.separator + "test_metastore-"
            + hs2Counter.incrementAndGet() + ";create=true";

    fs.mkdirs(baseDfsDir);//from w  ww . j a v a  2  s .com
    Path wareHouseDir = new Path(baseDfsDir, "warehouse");
    fs.mkdirs(wareHouseDir);
    setWareHouseDir(wareHouseDir.toString());
    System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
    hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, metaStoreURL);
    // reassign a new port, just in case if one of the MR services grabbed the last one
    setBinaryPort(MetaStoreUtils.findFreePort());
    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE);
    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, getHost());
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, getBinaryPort());
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort());

    Path scratchDir = new Path(baseDfsDir, "scratch");
    fs.mkdirs(scratchDir);
    System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString());
    System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname,
            baseDir.getPath() + File.separator + "scratch");
}

From source file:org.apache.oozie.test.TestXFsTestCase.java

License:Apache License

public void testFsDir() throws Exception {
    assertNotNull(getFsTestCaseDir());// w ww. ja  v  a2 s . c  om
    assertNotNull(getFileSystem());
    assertNotNull(getTestCaseFileUri("file"));

    String testDir = getTestCaseDir();
    String testFile = getTestCaseFileUri("file");
    String nameNode = getNameNodeUri();
    String user = getTestUser();
    Path fsTestDir = getFsTestCaseDir();

    assertTrue(fsTestDir.toString().startsWith(nameNode));
    assertTrue(fsTestDir.toString().contains(user));
    assertEquals(fsTestDir, getFsTestCaseDir());

    FileSystem fs = getFileSystem();
    assertTrue(fs.getUri().toString().startsWith(getNameNodeUri()));

    assertTrue(fs.exists(fsTestDir));
    assertTrue(fs.listStatus(fsTestDir).length == 0);

    assertTrue(new File(new URI(testFile)).createNewFile());
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }/* w w w . ja v  a 2  s  .co m*/
        int taskTrackers = 2;
        int dataNodes = 2;
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.oozie.util.ClasspathUtils.java

License:Apache License

private static void addToClasspathIfNotJar(Path[] paths, URI[] withLinks, Configuration conf,
        Map<String, String> environment, String classpathEnvVar) throws IOException {
    if (paths != null) {
        HashMap<Path, String> linkLookup = new HashMap<Path, String>();
        if (withLinks != null) {
            for (URI u : withLinks) {
                Path p = new Path(u);
                FileSystem remoteFS = p.getFileSystem(conf);
                p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
                String name = (null == u.getFragment()) ? p.getName() : u.getFragment();
                if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
                    linkLookup.put(p, name);
                }//  w  ww .  ja v a 2 s  .  c o m
            }
        }

        for (Path p : paths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            String name = linkLookup.get(p);
            if (name == null) {
                name = p.getName();
            }
            if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
                MRApps.addToEnvironment(environment, classpathEnvVar,
                        ApplicationConstants.Environment.PWD.$() + Path.SEPARATOR + name, conf);
            }
        }
    }
}

From source file:org.apache.phoenix.hive.HiveTestUtil.java

License:Apache License

public HiveTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer)
        throws Exception {
    this.outDir = outDir;
    this.logDir = logDir;
    if (confDir != null && !confDir.isEmpty()) {
        HiveConf.setHiveSiteLocation(//from   w  w  w.  j  a va  2 s .c om
                new URL("file://" + new File(confDir).toURI().getPath() + "/hive-site.xml"));
        LOG.info("Setting hive-site: " + HiveConf.getHiveSiteLocation());
    }
    conf = new HiveConf();
    String tmpBaseDir = System.getProperty("test.tmp.dir");
    if (tmpBaseDir == null || tmpBaseDir == "") {
        tmpBaseDir = System.getProperty("java.io.tmpdir");
    }
    String metaStoreURL = "jdbc:derby:" + tmpBaseDir + File.separator + "metastore_dbtest;" + "create=true";
    conf.set(ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
    System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);

    //set where derby logs
    File derbyLogFile = new File(tmpBaseDir + "/derby.log");
    derbyLogFile.createNewFile();
    System.setProperty("derby.stream.error.file", derbyLogFile.getPath());

    this.hadoopVer = getHadoopMainVersion(hadoopVer);
    qMap = new TreeMap<String, String>();
    qSkipSet = new HashSet<String>();
    qSortSet = new HashSet<String>();
    qSortQuerySet = new HashSet<String>();
    qHashQuerySet = new HashSet<String>();
    qSortNHashQuerySet = new HashSet<String>();
    qJavaVersionSpecificOutput = new HashSet<String>();
    this.clusterType = clusterType;

    // Using randomUUID for dfs cluster
    System.setProperty("test.build.data", "target/test-data/hive-" + UUID.randomUUID().toString());

    HadoopShims shims = ShimLoader.getHadoopShims();
    int numberOfDataNodes = 1;

    if (clusterType != MiniClusterType.none) {
        dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
        FileSystem fs = dfs.getFileSystem();
        String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
        if (clusterType == MiniClusterType.tez) {
            conf.set("hive.execution.engine", "tez");
            mr = shims.getMiniTezCluster(conf, 1, uriString, 1);
        } else {
            conf.set("hive.execution.engine", "mr");
            mr = shims.getMiniMrCluster(conf, 1, uriString, 1);

        }
    }

    initConf();

    // Use the current directory if it is not specified
    String dataDir = conf.get("test.data.files");
    if (dataDir == null) {
        dataDir = new File(".").getAbsolutePath() + "/data/files";
    }

    testFiles = dataDir;

    // Use the current directory if it is not specified
    String scriptsDir = conf.get("test.data.scripts");
    if (scriptsDir == null) {
        scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
    }
    if (!initScript.isEmpty()) {
        this.initScript = scriptsDir + "/" + initScript;
    }
    if (!cleanupScript.isEmpty()) {
        this.cleanupScript = scriptsDir + "/" + cleanupScript;
    }

    overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));

    setup = new HiveTestSetup();
    setup.preTest(conf);
    init();
}

From source file:org.apache.pig.impl.util.JarManager.java

License:Apache License

/**
 * Add the qualified path name of jars containing the given classes 
 * /*from w  w w  .j a v a  2 s  .co m*/
 * @param fs
 *            FileSystem object
 * @param jars
 *            the resolved path names to be added to this set
 * @param classes
 *            classes to find
 */
private static void addQualifiedJarsName(FileSystem fs, Set<String> jars, Class<?>... classes) {
    URI fsUri = fs.getUri();
    Path workingDir = fs.getWorkingDirectory();
    for (Class<?> clazz : classes) {
        String jarName = findContainingJar(clazz);
        if (jarName == null) {
            log.warn("Could not find jar for class " + clazz);
            continue;
        }
        jars.add(new Path(jarName).makeQualified(fsUri, workingDir).toString());
    }
}

From source file:org.apache.reef.runtime.mesos.driver.MesosResourceLaunchHandler.java

License:Apache License

@Override
public void onNext(final DriverRuntimeProtocol.ResourceLaunchProto resourceLaunchProto) {
    try {/*from www .j  av a 2s  .  com*/
        LOG.log(Level.INFO, "resourceLaunchProto. {0}", resourceLaunchProto.toString());

        final File localStagingFolder = Files.createTempDirectory(this.fileNames.getEvaluatorFolderPrefix())
                .toFile();

        final Configuration evaluatorConfiguration = Tang.Factory.getTang()
                .newConfigurationBuilder(
                        this.configurationSerializer.fromString(resourceLaunchProto.getEvaluatorConf()))
                .bindImplementation(TempFileCreator.class, WorkingDirectoryTempFileCreator.class).build();

        final File configurationFile = new File(localStagingFolder,
                this.fileNames.getEvaluatorConfigurationName());
        this.configurationSerializer.toFile(evaluatorConfiguration, configurationFile);

        JobJarMaker.copy(resourceLaunchProto.getFileList(), localStagingFolder);

        final FileSystem fileSystem = FileSystem.get(new org.apache.hadoop.conf.Configuration());
        final Path hdfsFolder = new Path(fileSystem.getUri() + "/" + resourceLaunchProto.getIdentifier() + "/");
        FileUtil.copy(localStagingFolder, fileSystem, hdfsFolder, false,
                new org.apache.hadoop.conf.Configuration());

        // TODO: Replace REEFExecutor with a simple launch command (we only need to launch REEFExecutor)
        final LaunchCommandBuilder commandBuilder;
        switch (resourceLaunchProto.getType()) {
        case JVM:
            commandBuilder = new JavaLaunchCommandBuilder()
                    .setClassPath(this.classpath.getEvaluatorClasspath());
            break;
        case CLR:
            commandBuilder = new CLRLaunchCommandBuilder();
            break;
        default:
            throw new IllegalArgumentException("Unsupported container type");
        }

        final List<String> command = commandBuilder.setErrorHandlerRID(this.remoteManager.getMyIdentifier())
                .setLaunchID(resourceLaunchProto.getIdentifier())
                .setConfigurationFileName(this.fileNames.getEvaluatorConfigurationPath())
                .setMemory((int) (this.jvmHeapFactor
                        * this.executors.getMemory(resourceLaunchProto.getIdentifier())))
                .build();

        this.executors.launchEvaluator(
                new EvaluatorLaunch(resourceLaunchProto.getIdentifier(), StringUtils.join(command, ' ')));
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.reef.runtime.mesos.driver.REEFScheduler.java

License:Apache License

private String getReefTarUri(final String jobIdentifier) {
    try {/*w  w w  . j ava2  s . c o m*/
        // Create REEF_TAR
        final FileOutputStream fileOutputStream = new FileOutputStream(REEF_TAR);
        final TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream(
                new GZIPOutputStream(fileOutputStream));
        final File globalFolder = new File(this.fileNames.getGlobalFolderPath());
        final DirectoryStream<Path> directoryStream = Files.newDirectoryStream(globalFolder.toPath());

        for (final Path path : directoryStream) {
            tarArchiveOutputStream.putArchiveEntry(
                    new TarArchiveEntry(path.toFile(), globalFolder + "/" + path.getFileName()));

            final BufferedInputStream bufferedInputStream = new BufferedInputStream(
                    new FileInputStream(path.toFile()));
            IOUtils.copy(bufferedInputStream, tarArchiveOutputStream);
            bufferedInputStream.close();

            tarArchiveOutputStream.closeArchiveEntry();
        }
        directoryStream.close();
        tarArchiveOutputStream.close();
        fileOutputStream.close();

        // Upload REEF_TAR to HDFS
        final FileSystem fileSystem = FileSystem.get(new Configuration());
        final org.apache.hadoop.fs.Path src = new org.apache.hadoop.fs.Path(REEF_TAR);
        final String reefTarUri = fileSystem.getUri().toString() + "/" + jobIdentifier + "/" + REEF_TAR;
        final org.apache.hadoop.fs.Path dst = new org.apache.hadoop.fs.Path(reefTarUri);
        fileSystem.copyFromLocalFile(src, dst);

        return reefTarUri;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}