Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.java

License:Apache License

/**
 * This method executes a query on namenode. If the query succeeds, FS
 * instance is healthy. If it fails, the old instance is closed and a new
 * instance is created./*from   www .jav  a  2  s  . c om*/
 */
public void checkAndClearFileSystem() {
    FileSystem fileSystem = getCachedFileSystem();

    if (fileSystem != null) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Checking file system at " + fileSystem.getUri(), logPrefix);
        }
        try {
            checkFileSystemExists();
            if (logger.isDebugEnabled()) {
                logger.debug("{}FS client is ok: " + fileSystem.getUri() + " " + fileSystem.hashCode(),
                        logPrefix);
            }
            return;
        } catch (ConnectTimeoutException e) {
            if (logger.isDebugEnabled()) {
                logger.debug("{}Hdfs unreachable, FS client is ok: " + fileSystem.getUri() + " "
                        + fileSystem.hashCode(), logPrefix);
            }
            return;
        } catch (IOException e) {
            logger.debug("IOError in filesystem checkAndClear ", e);

            // The file system is closed or NN is not reachable. It is safest to
            // create a new FS instance. If the NN continues to remain unavailable,
            // all subsequent read/write request will cause HDFSIOException. This is
            // similar to the way hbase manages failures. This has a drawback
            // though. A network blip will result in all connections to be
            // recreated. However trying to preserve the connections and waiting for
            // FS to auto-recover is not deterministic.
            if (e instanceof RemoteException) {
                e = ((RemoteException) e).unwrapRemoteException();
            }

            logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_UNREACHABLE, fileSystem.getUri()),
                    e);
        }

        // compare and clear FS container. The fs container needs to be reusable
        boolean result = fs.clear(fileSystem, true);
        if (!result) {
            // the FS instance changed after this call was initiated. Check again
            logger.debug("{}Failed to clear FS ! I am inconsistent so retrying ..", logPrefix);
            checkAndClearFileSystem();
        } else {
            closeFileSystemIgnoreError(fileSystem);
        }
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.java

License:Apache License

private void closeFileSystemIgnoreError(FileSystem fileSystem) {
    if (fileSystem == null) {
        logger.debug("{}Trying to close null file system", logPrefix);
        return;//  www .ja va  2  s  .  c om
    }

    try {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Closing file system at " + fileSystem.getUri() + " " + fileSystem.hashCode(),
                    logPrefix);
        }
        fileSystem.close();
    } catch (Exception e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Failed to close file system at " + fileSystem.getUri() + " " + fileSystem.hashCode(),
                    e);
        }
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog.java

License:Apache License

private void handleReadIOError(HFileReader hfileReader, IOException e, boolean skipFailIfSafe) {
    if (logger.isDebugEnabled())
        logger.debug("Read IO error", e);
    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
    if (safeError) {
        // IOException because of closed file system. This happens when member is
        // shutting down
        if (logger.isDebugEnabled())
            logger.debug("IO error caused by filesystem shutdown", e);
        throw new CacheClosedException("IO error caused by filesystem shutdown", e);
    }//from   w  ww  . j  a va 2s . c  om

    // expose the error wrapped inside remote exception. Remote exceptions are
    // handled by file system client. So let the caller handle this error
    if (e instanceof RemoteException) {
        e = ((RemoteException) e).unwrapRemoteException();
        throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
    }

    FileSystem currentFs = fsProvider.checkFileSystem();
    if (hfileReader != null && hfileReader.previousFS != currentFs) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Detected new FS client, closing old reader", logPrefix);
            if (currentFs != null) {
                if (logger.isDebugEnabled())
                    logger.debug("CurrentFs:" + currentFs.getUri() + "-" + currentFs.hashCode(), logPrefix);
            }
            if (hfileReader.previousFS != null) {
                if (logger.isDebugEnabled())
                    logger.debug("OldFs:" + hfileReader.previousFS.getUri() + "-"
                            + hfileReader.previousFS.hashCode() + ", closing old reader", logPrefix);
            }
        }
        try {
            HFileSortedOplog.this.compareAndClose(hfileReader, false);
        } catch (Exception ex) {
            if (logger.isDebugEnabled())
                logger.debug("Failed to close reader", ex);
        }
        if (skipFailIfSafe) {
            if (logger.isDebugEnabled())
                logger.debug("Not faling after io error since FS client changed");
            return;
        }
    }

    // it is not a safe error. let the caller handle it
    throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
}

From source file:com.github.dongjinleekr.hadoop.examples.DistributedCacheExample.java

License:Apache License

public static void printCachePath(Configuration conf) throws IOException, URISyntaxException {
    FileSystem fs = FileSystem.get(conf);
    URI[] archives = DistributedCache.getCacheArchives(conf);

    for (URI archive : archives) {
        HarFileSystem hfs = new HarFileSystem();
        String cacheUri = String.format("har://hdfs-%s:%d%s", fs.getUri().getHost(), fs.getUri().getPort(),
                archive.toString());// w w w . j  av a 2  s.c  o m
        System.out.println(cacheUri);

        hfs.initialize(new URI(cacheUri), conf);

        FileStatus root = hfs.listStatus(new Path("."))[0];
        FileStatus[] children = hfs.listStatus(root.getPath());

        for (FileStatus child : children) {
            System.out.println(child.getPath());
        }

        IOUtils.closeStream(hfs);
    }
}

From source file:com.github.hdl.tensorflow.yarn.app.Client.java

License:Apache License

/**
 * Main run function for the client//from ww  w  .  ja  v a  2  s.c  om
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress="
                + node.getHttpAddress() + ", nodeRackName=" + node.getRackName() + ", nodeNumContainers="
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed

    long maxMem = appResponse.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setApplicationName(appName);

    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }

    Set<String> tags = new HashSet<String>();
    appContext.setApplicationTags(tags);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    TFAmContainer tfAmContainer = new TFAmContainer(this);

    // Copy the application jar to the filesystem
    FileSystem fs = FileSystem.get(conf);
    String dstJarPath = copyLocalFileToDfs(fs, appId.toString(), appMasterJar, TFContainer.SERVER_JAR_PATH);
    tfAmContainer.addToLocalResources(fs, new Path(dstJarPath), TFAmContainer.APPMASTER_JAR_PATH,
            localResources);

    String jniSoDfsPath = "";
    if (jniSoFile != null && !jniSoFile.equals("")) {
        jniSoDfsPath = copyLocalFileToDfs(fs, appId.toString(), jniSoFile, "libbridge.so");
    }
    // Set the log4j properties if needed
    /*    if (!log4jPropFile.isEmpty()) {
          tfAmContainer.addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(),
              localResources, null);
        }*/

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    Map<String, String> env = tfAmContainer.setJavaEnv(conf);

    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }

    StringBuilder command = tfAmContainer.makeCommands(amMemory, appMasterMainClass, containerMemory,
            containerVirtualCores, workerNum, psNum, dstJarPath, containerRetryOptions, jniSoDfsPath);

    LOG.info("AppMaster command: " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());

    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null,
            null, null);

    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    // TODO - what is the range for priority? how to decide?
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);

    appContext.setQueue(amQueue);

    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);
    handleSignal(appId);
    return monitorApplication(appId);

}

From source file:com.github.sakserv.minicluster.impl.KdcLocalClusterHdfsIntegrationTest.java

License:Apache License

@Test
public void testHdfs() throws Exception {
    FileSystem hdfsFsHandle = hdfsLocalCluster.getHdfsFileSystemHandle();

    UserGroupInformation.loginUserFromKeytab(kdcLocalCluster.getKrbPrincipalWithRealm("hdfs"),
            kdcLocalCluster.getKeytabForPrincipal("hdfs"));

    assertTrue(UserGroupInformation.isSecurityEnabled());
    assertTrue(UserGroupInformation.isLoginKeytabBased());

    // Write a file to HDFS containing the test string
    FSDataOutputStream writer = hdfsFsHandle
            .create(new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
    writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
    writer.close();/* ww  w . j  a  v a2s  .c o  m*/

    // Read the file and compare to test string
    FSDataInputStream reader = hdfsFsHandle
            .open(new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
    assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
    reader.close();

    // Log out
    UserGroupInformation.getLoginUser().logoutUserFromKeytab();

    UserGroupInformation.reset();

    try {
        Configuration conf = new Configuration();
        UserGroupInformation.setConfiguration(conf);
        FileSystem.get(hdfsFsHandle.getUri(), conf)
                .open(new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
        fail();
    } catch (AccessControlException e) {
        LOG.info("Not authenticated!");
    }
}

From source file:com.google.mr4c.hadoop.HadoopTestUtils.java

License:Open Source License

public static URI toTestDFSURI(String path) throws IOException {
    FileSystem fs = getTestDFS();
    Path root = new Path(fs.getUri());
    return new Path(root, path).toUri();
}

From source file:com.google.mr4c.hadoop.mrv1.MRv1TestBinding.java

License:Open Source License

private void startMRCluster() throws IOException {

    FileSystem fs = HadoopTestUtils.getTestDFS();
    m_mrCluster = new MiniMRCluster(1, // # of task trackers
            fs.getUri().toString(), // name node
            1 // # of directories
    );/*from  w  ww. java  2s .  co  m*/

}

From source file:com.google.mr4c.sources.HDFSFileSource.java

License:Open Source License

public static HDFSFileSource create(FileSystem fs, Path dir, boolean flat) {
    Path root = new Path(fs.getUri());
    dir = new Path(root, dir);
    return new HDFSFileSource(fs, dir, flat);
}

From source file:com.google.mr4c.sources.MapFileSource.java

License:Open Source License

public MapFileSource(FileSystem fs, Path dir) throws IOException {
    m_fs = fs;//from w ww .  jav a 2  s .  co  m
    m_config = m_fs.getConf();
    Path root = new Path(fs.getUri());
    m_dir = new Path(root, dir);
    m_dirStr = m_dir.toUri().getPath();
    m_dataPath = new Path(m_dir, MapFile.DATA_FILE_NAME);
    m_indexPath = new Path(m_dir, MapFile.INDEX_FILE_NAME);
    m_metaPath = new Path(m_dir, "metadata");
}