Example usage for org.apache.hadoop.yarn.api.records LocalResource setType

List of usage examples for org.apache.hadoop.yarn.api.records LocalResource setType

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records LocalResource setType.

Prototype

@Public
@Stable
public abstract void setType(LocalResourceType type);

Source Link

Document

Set the LocalResourceType of the resource to be localized.

Usage

From source file:org.apache.tez.benchmark.SessionTest.java

License:Apache License

protected LocalResource createLocalResource(FileSystem fs, Path file) throws IOException {
    final LocalResourceType type = LocalResourceType.FILE;
    final LocalResourceVisibility visibility = LocalResourceVisibility.APPLICATION;
    FileStatus fstat = fs.getFileStatus(file);
    org.apache.hadoop.yarn.api.records.URL resourceURL = ConverterUtils.getYarnUrlFromPath(file);
    long resourceSize = fstat.getLen();
    long resourceModificationTime = fstat.getModificationTime();
    LocalResource lr = Records.newRecord(LocalResource.class);
    lr.setResource(resourceURL);//w ww . j av a2 s . c o m
    lr.setType(type);
    lr.setSize(resourceSize);
    lr.setVisibility(visibility);
    lr.setTimestamp(resourceModificationTime);
    return lr;
}

From source file:org.apache.tez.client.TezClientUtils.java

License:Apache License

/**
 * Helper function to create a YARN LocalResource
 * @param fs FileSystem object/* ww w  .j a v  a 2  s. c  om*/
 * @param p Path of resource to localize
 * @param type LocalResource Type
 * @return a YARN LocalResource for the given Path
 * @throws IOException
 */
static LocalResource createLocalResource(FileSystem fs, Path p, LocalResourceType type,
        LocalResourceVisibility visibility) throws IOException {
    LocalResource rsrc = Records.newRecord(LocalResource.class);
    FileStatus rsrcStat = fs.getFileStatus(p);
    rsrc.setResource(ConverterUtils.getYarnUrlFromPath(fs.resolvePath(rsrcStat.getPath())));
    rsrc.setSize(rsrcStat.getLen());
    rsrc.setTimestamp(rsrcStat.getModificationTime());
    rsrc.setType(type);
    rsrc.setVisibility(visibility);
    return rsrc;
}

From source file:org.apache.tez.dag.api.DagTypeConverters.java

License:Apache License

public static Map<String, LocalResource> createLocalResourceMapFromDAGPlan(
        List<PlanLocalResource> localResourcesList) {
    Map<String, LocalResource> map = new HashMap<String, LocalResource>();
    for (PlanLocalResource res : localResourcesList) {
        LocalResource r = new LocalResourcePBImpl();

        //NOTE: have to check every optional field in protobuf generated classes for existence before accessing
        //else we will receive a default value back, eg ""
        if (res.hasPattern()) {
            r.setPattern(res.getPattern());
        }//from  www  . jav  a  2 s  .c o  m
        r.setResource(ConverterUtils.getYarnUrlFromPath(new Path(res.getUri()))); // see above notes on HDFS URL handling
        r.setSize(res.getSize());
        r.setTimestamp(res.getTimeStamp());
        r.setType(DagTypeConverters.convertFromDAGPlan(res.getType()));
        r.setVisibility(DagTypeConverters.convertFromDAGPlan(res.getVisibility()));
        map.put(res.getName(), r);
    }
    return map;
}

From source file:org.apache.tez.dag.app.launcher.TestTezLocalCacheManager.java

License:Apache License

private static LocalResource createFile(String content) throws IOException {
    FileContext fs = FileContext.getLocalFSFileContext();

    java.nio.file.Path tempFile = Files.createTempFile("test-cache-manager", ".txt");
    File temp = tempFile.toFile();
    temp.deleteOnExit();/*w  ww  . jav a2 s  . c om*/
    Path p = new Path("file:///" + tempFile.toAbsolutePath().toString());

    Files.write(tempFile, content.getBytes());

    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
    URL yarnUrlFromPath = ConverterUtils.getYarnUrlFromPath(p);
    ret.setResource(yarnUrlFromPath);
    ret.setSize(content.getBytes().length);
    ret.setType(LocalResourceType.FILE);
    ret.setVisibility(LocalResourceVisibility.PRIVATE);
    ret.setTimestamp(fs.getFileStatus(p).getModificationTime());
    return ret;
}

From source file:org.apache.tez.mapreduce.client.YARNRunner.java

License:Apache License

private LocalResource createApplicationResource(FileContext fs, Path p, LocalResourceType type)
        throws IOException {
    LocalResource rsrc = Records.newRecord(LocalResource.class);
    FileStatus rsrcStat = fs.getFileStatus(p);
    rsrc.setResource(/*  ww  w. j av  a 2  s  .  c  o  m*/
            ConverterUtils.getYarnUrlFromPath(fs.getDefaultFileSystem().resolvePath(rsrcStat.getPath())));
    rsrc.setSize(rsrcStat.getLen());
    rsrc.setTimestamp(rsrcStat.getModificationTime());
    rsrc.setType(type);
    rsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    return rsrc;
}

From source file:org.apache.tez.Tez_1494.java

License:Apache License

private LocalResource createLocalResource(FileSystem fs, Path file) throws IOException {
    final LocalResourceType type = LocalResourceType.FILE;
    final LocalResourceVisibility visibility = LocalResourceVisibility.APPLICATION;
    FileStatus fstat = fs.getFileStatus(file);
    org.apache.hadoop.yarn.api.records.URL resourceURL = ConverterUtils.getYarnUrlFromPath(file);
    long resourceSize = fstat.getLen();
    long resourceModificationTime = fstat.getModificationTime();
    LocalResource lr = Records.newRecord(LocalResource.class);
    lr.setResource(resourceURL);//from   ww  w  .  j  av a2s .c o m
    lr.setType(type);
    lr.setSize(resourceSize);
    lr.setVisibility(visibility);
    lr.setTimestamp(resourceModificationTime);
    return lr;
}

From source file:org.deeplearning4j.iterativereduce.runtime.Utils.java

License:Apache License

public static Map<String, LocalResource> getLocalResourcesForApplication(Configuration conf,
        ApplicationId appId, String appName, Properties props, LocalResourceVisibility visibility)
        throws IOException {

    List<String> resources = new ArrayList<String>();
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    resources.add(getPathForResource(props.getProperty(ConfigFields.JAR_PATH), appId, appName)); // Our app JAR
    resources.add(getPathForResource(props.getProperty(ConfigFields.APP_JAR_PATH), appId, appName)); // User app JAR
    resources.add(getPathForResource(ConfigFields.APP_CONFIG_FILE, appId, appName)); // Our application configuration
    resources.add(getPathForResource("log4j.properties", appId, appName));

    // Libs//from   ww w.ja v a 2 s .com
    String libs = props.getProperty(ConfigFields.APP_LIB_PATH);
    if (libs != null && !libs.isEmpty()) {
        for (String s : libs.split(",")) {
            resources.add(getPathForResource(s, appId, appName));
        }
    }

    FileSystem fs = FileSystem.get(conf);
    Path fsPath;
    FileStatus fstat;

    // Convert to local resource list
    for (String resource : resources) {
        try {
            fsPath = new Path(resource);
            fstat = fs.getFileStatus(fsPath);
            LOG.debug("Processing local resource=" + fstat.getPath());

            //System.out.println("IR: Utils > Converting to local resource: " + fstat.getPath() );

            LocalResource localResource = Records.newRecord(LocalResource.class);
            localResource.setResource(ConverterUtils.getYarnUrlFromPath(fstat.getPath()));
            localResource.setSize(fstat.getLen());
            localResource.setTimestamp(fstat.getModificationTime());
            localResource.setVisibility(visibility);
            localResource.setType(LocalResourceType.FILE);

            localResources.put(fsPath.getName(), localResource);
        } catch (FileNotFoundException ex) {
            LOG.warn("Unable to copy file " + resource + ": File not found.");
        }
    }

    return localResources;
}

From source file:org.dknight.app.Client.java

License:Apache License

/**
 * Main run function for the client/*from   www  .j  av  a 2s.c  om*/
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request 
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max. 
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max. 
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources         
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    FileSystem fs = FileSystem.get(conf);
    Path src = new Path(appMasterJar);
    String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar";
    Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
    fs.copyFromLocalFile(false, true, src, dst);
    FileStatus destStatus = fs.getFileStatus(dst);
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);

    // Set the type of resource - file or archive
    // archives are untarred at destination
    // we don't need the jar file to be untarred for now
    amJarRsrc.setType(LocalResourceType.FILE);
    // Set visibility of the resource 
    // Setting to most private option
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    // Set the resource to be copied over
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    // Set timestamp and length of file so that the framework 
    // can do basic sanity checks for the local resource 
    // after it has been copied over to ensure it is the same 
    // resource the client intended to use with the application
    amJarRsrc.setTimestamp(destStatus.getModificationTime());
    amJarRsrc.setSize(destStatus.getLen());
    localResources.put("AppMaster.jar", amJarRsrc);

    String confXMLFSPath = "";
    {
        File clusterConfXML = new File("cluster-conf.xml");
        conf.writeXml(new FileOutputStream(clusterConfXML));
        Path confSrc = new Path(clusterConfXML.getAbsolutePath());
        String confPathSuffix = appName + "/" + appId.getId() + "/cluster-conf.xml";
        Path confDst = new Path(fs.getHomeDirectory(), confPathSuffix);
        fs.copyFromLocalFile(false, true, confSrc, confDst);
        FileStatus confFileStatus = fs.getFileStatus(confDst);
        LocalResource confRsrc = Records.newRecord(LocalResource.class);
        confRsrc.setType(LocalResourceType.FILE);
        confRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        confRsrc.setResource(ConverterUtils.getYarnUrlFromURI(confDst.toUri()));
        confRsrc.setSize(confFileStatus.getLen());
        confRsrc.setTimestamp(confFileStatus.getModificationTime());
        localResources.put("cluster-conf.xml", confRsrc);
        confXMLFSPath = confDst.toUri().getPath();
    }

    // Set the log4j properties if needed 
    if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed. 
    // To do this, we need to first copy into the filesystem that is visible 
    // to the yarn framework. 
    // We do not need to set this as a local resource for the application 
    // master as the application master does not need it.       
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh";
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the 
    // eventual containers that will be launched to execute the shell scripts
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));
    env.put(DSConstants.CLUSTER_CONF_XML_PATH, confXMLFSPath);

    // Add AppMaster.jar location to classpath       
    // At some point we should not be required to add 
    // the hadoop specific classpaths to the env. 
    // It should be provided out of the box. 
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master 
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command 
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    //      if (debugFlag) {
    //          vargs.add("-Xdebug -Xrunjdwp:transport=dt_socket,address=9998,server=y,suspend=y");
    //      }
    // Set class name 
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(shellCmdPriority));
    if (!shellCommand.isEmpty()) {
        vargs.add("--shell_command " + shellCommand + "");
    }
    if (!shellArgs.isEmpty()) {
        vargs.add("--shell_args " + shellArgs + "");
    }
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide? 
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:org.elasticsearch.hadoop.yarn.am.EsCluster.java

License:Apache License

private Map<String, LocalResource> setupEsZipResource(Config conf) {
    // elasticsearch.zip
    Map<String, LocalResource> resources = new LinkedHashMap<String, LocalResource>();

    LocalResource esZip = Records.newRecord(LocalResource.class);
    String esZipHdfsPath = conf.esZipHdfsPath();
    Path p = new Path(esZipHdfsPath);
    FileStatus fsStat;/*from  www  . j av a2  s  .  c o m*/
    try {
        fsStat = FileSystem.get(cfg).getFileStatus(p);
    } catch (IOException ex) {
        throw new IllegalArgumentException(String.format(
                "Cannot find Elasticsearch zip at [%s]; make sure the artifacts have been properly provisioned and the correct permissions are in place.",
                esZipHdfsPath), ex);
    }
    // use the normalized path as otherwise YARN chokes down the line
    esZip.setResource(ConverterUtils.getYarnUrlFromPath(fsStat.getPath()));
    esZip.setSize(fsStat.getLen());
    esZip.setTimestamp(fsStat.getModificationTime());
    esZip.setType(LocalResourceType.ARCHIVE);
    esZip.setVisibility(LocalResourceVisibility.PUBLIC);

    resources.put(conf.esZipName(), esZip);
    return resources;
}

From source file:org.elasticsearch.hadoop.yarn.client.YarnLauncher.java

License:Apache License

private Map<String, LocalResource> setupEsYarnJar() {
    Map<String, LocalResource> resources = new LinkedHashMap<String, LocalResource>();
    LocalResource esYarnJar = Records.newRecord(LocalResource.class);
    Path p = new Path(clientCfg.jarHdfsPath());
    FileStatus fsStat;//from   w  w  w .  j  a  v a 2 s . co  m
    try {
        fsStat = FileSystem.get(client.getConfiguration()).getFileStatus(p);
    } catch (IOException ex) {
        throw new IllegalArgumentException(String.format(
                "Cannot find jar [%s]; make sure the artifacts have been properly provisioned and the correct permissions are in place.",
                clientCfg.jarHdfsPath()), ex);
    }
    // use the normalized path as otherwise YARN chokes down the line
    esYarnJar.setResource(ConverterUtils.getYarnUrlFromPath(fsStat.getPath()));
    esYarnJar.setSize(fsStat.getLen());
    esYarnJar.setTimestamp(fsStat.getModificationTime());
    esYarnJar.setType(LocalResourceType.FILE);
    esYarnJar.setVisibility(LocalResourceVisibility.PUBLIC);

    resources.put(clientCfg.jarName(), esYarnJar);
    return resources;
}