Example usage for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment

List of usage examples for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment.

Prototype

@Public
@Stable
public abstract void setEnvironment(Map<String, String> environment);

Source Link

Document

Add environment variables for the container.

Usage

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

/**
 * Setup and submit the Gobblin Yarn application.
 *
 * @throws IOException if there's anything wrong setting up and submitting the Yarn application
 * @throws YarnException if there's anything wrong setting up and submitting the Yarn application
 *///from  w ww  . jav  a2s . c  om
@VisibleForTesting
ApplicationId setupAndSubmitApplication() throws IOException, YarnException {
    YarnClientApplication gobblinYarnApp = this.yarnClient.createApplication();
    ApplicationSubmissionContext appSubmissionContext = gobblinYarnApp.getApplicationSubmissionContext();
    appSubmissionContext.setApplicationType(GOBBLIN_YARN_APPLICATION_TYPE);
    ApplicationId applicationId = appSubmissionContext.getApplicationId();

    GetNewApplicationResponse newApplicationResponse = gobblinYarnApp.getNewApplicationResponse();
    // Set up resource type requirements for ApplicationMaster
    Resource resource = prepareContainerResource(newApplicationResponse);

    // Add lib jars, and jars and files that the ApplicationMaster need as LocalResources
    Map<String, LocalResource> appMasterLocalResources = addAppMasterLocalResources(applicationId);

    ContainerLaunchContext amContainerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
    amContainerLaunchContext.setLocalResources(appMasterLocalResources);
    amContainerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
    amContainerLaunchContext
            .setCommands(Lists.newArrayList(buildApplicationMasterCommand(resource.getMemory())));

    Map<ApplicationAccessType, String> acls = new HashMap<>(1);
    acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
    amContainerLaunchContext.setApplicationACLs(acls);

    if (UserGroupInformation.isSecurityEnabled()) {
        setupSecurityTokens(amContainerLaunchContext);
    }

    // Setup the application submission context
    appSubmissionContext.setApplicationName(this.applicationName);
    appSubmissionContext.setResource(resource);
    appSubmissionContext.setQueue(this.appQueueName);
    appSubmissionContext.setPriority(Priority.newInstance(0));
    appSubmissionContext.setAMContainerSpec(amContainerLaunchContext);
    // Also setup container local resources by copying local jars and files the container need to HDFS
    addContainerLocalResources(applicationId);

    // Submit the application
    LOGGER.info("Submitting application " + applicationId);
    this.yarnClient.submitApplication(appSubmissionContext);

    LOGGER.info("Application successfully submitted and accepted");
    ApplicationReport applicationReport = this.yarnClient.getApplicationReport(applicationId);
    LOGGER.info("Application Name: " + applicationReport.getName());
    LOGGER.info("Application Tracking URL: " + applicationReport.getTrackingUrl());
    LOGGER.info("Application User: " + applicationReport.getUser() + " Queue: " + applicationReport.getQueue());

    return applicationId;
}

From source file:org.apache.gobblin.yarn.YarnService.java

License:Apache License

protected ContainerLaunchContext newContainerLaunchContext(Container container, String helixInstanceName)
        throws IOException {
    Path appWorkDir = GobblinClusterUtils.getAppWorkDirPath(this.fs, this.applicationName, this.applicationId);
    Path containerWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME);

    Map<String, LocalResource> resourceMap = Maps.newHashMap();

    addContainerLocalResources(new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME),
            resourceMap);/*from   w  w  w. jav  a2 s  .co m*/
    addContainerLocalResources(new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME),
            resourceMap);
    addContainerLocalResources(new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME),
            resourceMap);

    if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY)) {
        addRemoteAppFiles(this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY),
                resourceMap);
    }

    ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
    containerLaunchContext.setLocalResources(resourceMap);
    containerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
    containerLaunchContext.setCommands(Lists.newArrayList(buildContainerCommand(container, helixInstanceName)));

    Map<ApplicationAccessType, String> acls = new HashMap<>(1);
    acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
    containerLaunchContext.setApplicationACLs(acls);

    if (UserGroupInformation.isSecurityEnabled()) {
        containerLaunchContext.setTokens(this.tokens.duplicate());
    }

    return containerLaunchContext;
}

From source file:org.apache.hama.bsp.YARNBSPJobClient.java

License:Apache License

@Override
protected RunningJob launchJob(BSPJobID jobId, BSPJob normalJob, Path submitJobFile, FileSystem pFs)
        throws IOException {
    YARNBSPJob job = (YARNBSPJob) normalJob;

    LOG.info("Submitting job...");
    if (getConf().get("bsp.child.mem.in.mb") == null) {
        LOG.warn("BSP Child memory has not been set, YARN will guess your needs or use default values.");
    }/*from   w ww.  j av  a  2 s.co  m*/

    FileSystem fs = pFs;
    if (fs == null) {
        fs = FileSystem.get(getConf());
    }

    if (getConf().get("bsp.user.name") == null) {
        String s = getUnixUserName();
        getConf().set("bsp.user.name", s);
        LOG.debug("Retrieved username: " + s);
    }

    yarnClient.start();
    try {
        YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
        LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers="
                + clusterMetrics.getNumNodeManagers());

        List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        LOG.info("Got Cluster node info from ASM");
        for (NodeReport node : clusterNodeReports) {
            LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                    + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                    + node.getNumContainers());
        }

        QueueInfo queueInfo = yarnClient.getQueueInfo("default");
        LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
                + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
                + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
                + queueInfo.getChildQueues().size());

        List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
        for (QueueUserACLInfo aclInfo : listAclInfo) {
            for (QueueACL userAcl : aclInfo.getUserAcls()) {
                LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                        + userAcl.name());
            }
        }

        // Get a new application id
        YarnClientApplication app = yarnClient.createApplication();

        // Create a new ApplicationSubmissionContext
        //ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();

        id = appContext.getApplicationId();

        // set the application name
        appContext.setApplicationName(job.getJobName());

        // Create a new container launch context for the AM's container
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

        // Define the local resources required
        Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
        // Lets assume the jar we need for our ApplicationMaster is available in
        // HDFS at a certain known path to us and we want to make it available to
        // the ApplicationMaster in the launched container
        if (job.getJar() == null) {
            throw new IllegalArgumentException("Jar must be set in order to run the application!");
        }

        Path jarPath = new Path(job.getJar());
        jarPath = fs.makeQualified(jarPath);
        getConf().set("bsp.jar", jarPath.makeQualified(fs.getUri(), jarPath).toString());

        FileStatus jarStatus = fs.getFileStatus(jarPath);
        LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
        amJarRsrc.setType(LocalResourceType.FILE);
        amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));
        amJarRsrc.setTimestamp(jarStatus.getModificationTime());
        amJarRsrc.setSize(jarStatus.getLen());

        // this creates a symlink in the working directory
        localResources.put(YARNBSPConstants.APP_MASTER_JAR_PATH, amJarRsrc);

        // add hama related jar files to localresources for container
        List<File> hamaJars;
        if (System.getProperty("hama.home.dir") != null)
            hamaJars = localJarfromPath(System.getProperty("hama.home.dir"));
        else
            hamaJars = localJarfromPath(getConf().get("hama.home.dir"));
        String hamaPath = getSystemDir() + "/hama";
        for (File fileEntry : hamaJars) {
            addToLocalResources(fs, fileEntry.getCanonicalPath(), hamaPath, fileEntry.getName(),
                    localResources);
        }

        // Set the local resources into the launch context
        amContainer.setLocalResources(localResources);

        // Set up the environment needed for the launch context
        Map<String, String> env = new HashMap<String, String>();
        // Assuming our classes or jars are available as local resources in the
        // working directory from which the command will be run, we need to append
        // "." to the path.
        // By default, all the hadoop specific classpaths will already be available
        // in $CLASSPATH, so we should be careful not to overwrite it.
        StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
                .append(File.pathSeparatorChar).append("./*");
        for (String c : yarnConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
                YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
            classPathEnv.append(File.pathSeparatorChar);
            classPathEnv.append(c.trim());
        }

        env.put(YARNBSPConstants.HAMA_YARN_LOCATION, jarPath.toUri().toString());
        env.put(YARNBSPConstants.HAMA_YARN_SIZE, Long.toString(jarStatus.getLen()));
        env.put(YARNBSPConstants.HAMA_YARN_TIMESTAMP, Long.toString(jarStatus.getModificationTime()));

        env.put(YARNBSPConstants.HAMA_LOCATION, hamaPath);
        env.put("CLASSPATH", classPathEnv.toString());
        amContainer.setEnvironment(env);

        // Set the necessary command to execute on the allocated container
        Vector<CharSequence> vargs = new Vector<CharSequence>(5);
        vargs.add("${JAVA_HOME}/bin/java");
        vargs.add("-cp " + classPathEnv + "");
        vargs.add(ApplicationMaster.class.getCanonicalName());
        vargs.add(submitJobFile.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString());

        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stderr");

        // Get final commmand
        StringBuilder command = new StringBuilder();
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        List<String> commands = new ArrayList<String>();
        commands.add(command.toString());
        amContainer.setCommands(commands);

        LOG.debug("Start command: " + command);

        Resource capability = Records.newRecord(Resource.class);
        // we have at least 3 threads, which comsumes 1mb each, for each bsptask and
        // a base usage of 100mb
        capability.setMemory(3 * job.getNumBspTask() + getConf().getInt("hama.appmaster.memory.mb", 100));
        LOG.info("Set memory for the application master to " + capability.getMemory() + "mb!");

        // Set the container launch content into the ApplicationSubmissionContext
        appContext.setResource(capability);

        // Setup security tokens
        if (UserGroupInformation.isSecurityEnabled()) {
            // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
            Credentials credentials = new Credentials();
            String tokenRenewer = yarnConf.get(YarnConfiguration.RM_PRINCIPAL);
            if (tokenRenewer == null || tokenRenewer.length() == 0) {
                throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
            }

            // For now, only getting tokens for the default file-system.
            final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
            if (tokens != null) {
                for (Token<?> token : tokens) {
                    LOG.info("Got dt for " + fs.getUri() + "; " + token);
                }
            }
            DataOutputBuffer dob = new DataOutputBuffer();
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            amContainer.setTokens(fsTokens);
        }

        appContext.setAMContainerSpec(amContainer);

        // Create the request to send to the ApplicationsManager
        ApplicationId appId = appContext.getApplicationId();
        yarnClient.submitApplication(appContext);

        return monitorApplication(appId) ? new NetworkedJob() : null;
    } catch (YarnException e) {
        e.printStackTrace();
        return null;
    }
}

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

License:Apache License

public boolean launch() throws Exception {
    LOG.info("Running Client");
    yarnClient.start();/* www.  j  a  v a2  s .c  om*/

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    _appId = appContext.getApplicationId();
    _appMasterConfig.setAppId(_appId.getId());
    String appName = _applicationSpec.getAppName();
    _appMasterConfig.setAppName(appName);
    _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass().getCanonicalName());
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    LOG.info("Copy Application archive file from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(_conf);

    // get packages for each component packages
    Map<String, URI> packages = new HashMap<String, URI>();
    packages.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI());
    packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI());
    for (String serviceName : _applicationSpec.getServices()) {
        packages.put(serviceName, _applicationSpec.getServicePackage(serviceName));
    }
    Map<String, Path> hdfsDest = new HashMap<String, Path>();
    Map<String, String> classpathMap = new HashMap<String, String>();
    for (String name : packages.keySet()) {
        URI uri = packages.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        hdfsDest.put(name, dst);
        String classpath = generateClasspathAfterExtraction(name, new File(uri));
        classpathMap.put(name, classpath);
        _appMasterConfig.setClasspath(name, classpath);
        String serviceMainClass = _applicationSpec.getServiceMainClass(name);
        if (serviceMainClass != null) {
            _appMasterConfig.setMainClass(name, serviceMainClass);
        }
    }

    // Get YAML files describing all workflows to immediately start
    Map<String, URI> workflowFiles = new HashMap<String, URI>();
    List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
        for (TaskConfig taskConfig : taskConfigs) {
            URI configUri = taskConfig.getYamlURI();
            if (taskConfig.name != null && configUri != null) {
                workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
            }
        }
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LocalResource appMasterPkg = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    LocalResource appSpecFile = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
    localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
    localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
    for (String name : workflowFiles.keySet()) {
        URI uri = workflowFiles.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        LocalResource taskLocalResource = setupLocalResource(fs, dst);
        localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
                taskLocalResource);
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*").append(File.pathSeparatorChar);
    classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
    // Set the env variables to be setup in the env where the application master will be run
    Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master launch command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    int amMemory = 4096;
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(AppMasterLauncher.class.getCanonicalName());
    // Set params for Application Master
    // vargs.add("--num_containers " + String.valueOf(numContainers));

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    int amPriority = 0;
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    String amQueue = "default";
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    LOG.info("Submitting application to YARN Resource Manager");

    ApplicationId applicationId = yarnClient.submitApplication(appContext);

    LOG.info("Submitted application with applicationId:" + applicationId);

    return true;
}

From source file:org.apache.helix.provisioning.yarn.YarnProvisioner.java

License:Apache License

private ContainerLaunchContext createLaunchContext(ContainerId containerId, Container container,
        Participant participant) throws Exception {

    ContainerLaunchContext participantContainer = Records.newRecord(ContainerLaunchContext.class);

    // Map<String, String> envs = System.getenv();
    String appName = applicationMasterConfig.getAppName();
    int appId = applicationMasterConfig.getAppId();
    String serviceName = _resourceConfig.getId().stringify();
    String serviceClasspath = applicationMasterConfig.getClassPath(serviceName);
    String mainClass = applicationMasterConfig.getMainClass(serviceName);
    String zkAddress = applicationMasterConfig.getZKAddress();

    // set the localresources needed to launch container
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LocalResource servicePackageResource = Records.newRecord(LocalResource.class);
    YarnConfiguration conf = new YarnConfiguration();
    FileSystem fs;//  w ww  .  j  av  a2s.c  o  m
    fs = FileSystem.get(conf);
    String pathSuffix = appName + "/" + appId + "/" + serviceName + ".tar";
    Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
    FileStatus destStatus = fs.getFileStatus(dst);

    // Set the type of resource - file or archive
    // archives are untarred at destination
    // we don't need the jar file to be untarred for now
    servicePackageResource.setType(LocalResourceType.ARCHIVE);
    // Set visibility of the resource
    // Setting to most private option
    servicePackageResource.setVisibility(LocalResourceVisibility.APPLICATION);
    // Set the resource to be copied over
    servicePackageResource.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    // Set timestamp and length of file so that the framework
    // can do basic sanity checks for the local resource
    // after it has been copied over to ensure it is the same
    // resource the client intended to use with the application
    servicePackageResource.setTimestamp(destStatus.getModificationTime());
    servicePackageResource.setSize(destStatus.getLen());
    LOG.info("Setting local resource:" + servicePackageResource + " for service" + serviceName);
    localResources.put(serviceName, servicePackageResource);

    // Set local resource info into app master container launch context
    participantContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    env.put(serviceName, dst.getName());
    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    classPathEnv.append(File.pathSeparatorChar);
    classPathEnv.append(serviceClasspath);
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");
    LOG.info("Setting classpath for service:\n" + classPathEnv.toString());
    env.put("CLASSPATH", classPathEnv.toString());

    participantContainer.setEnvironment(env);

    if (applicationMaster.allTokens != null) {
        LOG.info("Setting tokens: " + applicationMaster.allTokens);
        participantContainer.setTokens(applicationMaster.allTokens);
    }

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + 4096 + "m");
    // Set class name
    vargs.add(ParticipantLauncher.class.getCanonicalName());
    // Set params for container participant
    vargs.add("--zkAddress " + zkAddress);
    vargs.add("--cluster " + appName);
    vargs.add("--participantId " + participant.getId().stringify());
    vargs.add("--participantClass " + mainClass);

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up  container launch command " + command.toString() + " with arguments \n"
            + vargs);
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    participantContainer.setCommands(commands);
    return participantContainer;
}

From source file:org.apache.hoya.providers.accumulo.AccumuloProviderService.java

License:Apache License

@Override
public void buildContainerLaunchContext(ContainerLaunchContext ctx, AggregateConf instanceDefinition,
        Container container, String role, HoyaFileSystem hoyaFileSystem, Path generatedConfPath,
        MapOperations resourceComponent, MapOperations appComponent, Path containerTmpDirPath)
        throws IOException, HoyaException {

    this.hoyaFileSystem = hoyaFileSystem;
    this.instanceDefinition = instanceDefinition;

    // Set the environment
    Map<String, String> env = HoyaUtils.buildEnvMap(appComponent);
    env.put(ACCUMULO_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
    String hadoop_home = ApplicationConstants.Environment.HADOOP_COMMON_HOME.$();
    MapOperations appConfGlobal = appConf.getGlobalOptions();
    hadoop_home = appConfGlobal.getOption(OPTION_HADOOP_HOME, hadoop_home);
    env.put(HADOOP_HOME, hadoop_home);//from  www  .j  av  a  2s .  c o m
    env.put(HADOOP_PREFIX, hadoop_home);

    // By not setting ACCUMULO_HOME, this will cause the Accumulo script to
    // compute it on its own to an absolute path.

    env.put(ACCUMULO_CONF_DIR, ProviderUtils.convertToAppRelativePath(HoyaKeys.PROPAGATED_CONF_DIR_NAME));
    env.put(ZOOKEEPER_HOME, appConfGlobal.getMandatoryOption(OPTION_ZK_HOME));

    //local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    //add the configuration resources
    Map<String, LocalResource> confResources;
    confResources = hoyaFileSystem.submitDirectory(generatedConfPath, HoyaKeys.PROPAGATED_CONF_DIR_NAME);
    localResources.putAll(confResources);

    //Add binaries
    //now add the image if it was set
    String imageURI = instanceDefinition.getInternalOperations()
            .get(OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH);
    hoyaFileSystem.maybeAddImagePath(localResources, imageURI);

    ctx.setLocalResources(localResources);

    List<String> commands = new ArrayList<String>();
    CommandLineBuilder commandLine = new CommandLineBuilder();

    String heap = "-Xmx" + appComponent.getOption(RoleKeys.JVM_HEAP, DEFAULT_JVM_HEAP);
    String opt = "ACCUMULO_OTHER_OPTS";
    if (HoyaUtils.isSet(heap)) {
        if (AccumuloKeys.ROLE_MASTER.equals(role)) {
            opt = "ACCUMULO_MASTER_OPTS";
        } else if (AccumuloKeys.ROLE_TABLET.equals(role)) {
            opt = "ACCUMULO_TSERVER_OPTS";
        } else if (AccumuloKeys.ROLE_MONITOR.equals(role)) {
            opt = "ACCUMULO_MONITOR_OPTS";
        } else if (AccumuloKeys.ROLE_GARBAGE_COLLECTOR.equals(role)) {
            opt = "ACCUMULO_GC_OPTS";
        }
        env.put(opt, heap);
    }

    //this must stay relative if it is an image
    commandLine.add(providerUtils.buildPathToScript(instanceDefinition, "bin", "accumulo"));

    //role is translated to the accumulo one
    commandLine.add(AccumuloRoles.serviceForRole(role));

    // Add any role specific arguments to the command line
    String additionalArgs = ProviderUtils.getAdditionalArgs(appComponent);
    if (!StringUtils.isBlank(additionalArgs)) {
        commandLine.add(additionalArgs);
    }

    commandLine.addOutAndErrFiles(role + "-out.txt", role + "-err.txt");

    commands.add(commandLine.build());
    ctx.setCommands(commands);
    ctx.setEnvironment(env);
}

From source file:org.apache.hoya.providers.agent.AgentProviderService.java

License:Apache License

@Override
public void buildContainerLaunchContext(ContainerLaunchContext ctx, AggregateConf instanceDefinition,
        Container container, String role, HoyaFileSystem hoyaFileSystem, Path generatedConfPath,
        MapOperations resourceComponent, MapOperations appComponent, Path containerTmpDirPath)
        throws IOException, HoyaException {

    this.instanceDefinition = instanceDefinition;
    log.info("Build launch context for Agent");
    log.debug(instanceDefinition.toString());

    // Set the environment
    Map<String, String> env = HoyaUtils.buildEnvMap(appComponent);

    String workDir = ApplicationConstants.Environment.PWD.$();
    env.put("AGENT_WORK_ROOT", workDir);
    log.info("AGENT_WORK_ROOT set to " + workDir);
    String logDir = ApplicationConstants.Environment.LOG_DIRS.$();
    env.put("AGENT_LOG_ROOT", logDir);
    log.info("AGENT_LOG_ROOT set to " + logDir);

    //local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // TODO: Should agent need to support App Home
    String scriptPath = new File(AgentKeys.AGENT_MAIN_SCRIPT_ROOT, AgentKeys.AGENT_MAIN_SCRIPT).getPath();
    String appHome = instanceDefinition.getAppConfOperations().getGlobalOptions().get(AgentKeys.PACKAGE_PATH);
    if (appHome != null && !appHome.equals("")) {
        scriptPath = new File(appHome, AgentKeys.AGENT_MAIN_SCRIPT).getPath();
    }/*  w  w w .ja va 2 s .  c  o  m*/

    String agentImage = instanceDefinition.getInternalOperations()
            .get(OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH);
    if (agentImage != null) {
        LocalResource agentImageRes = hoyaFileSystem.createAmResource(new Path(agentImage),
                LocalResourceType.ARCHIVE);
        localResources.put(AgentKeys.AGENT_INSTALL_DIR, agentImageRes);
    }

    log.info("Using " + scriptPath + " for agent.");
    String appDef = instanceDefinition.getAppConfOperations().getGlobalOptions()
            .getMandatoryOption(AgentKeys.APP_DEF);
    LocalResource appDefRes = hoyaFileSystem.createAmResource(new Path(appDef), LocalResourceType.ARCHIVE);
    localResources.put(AgentKeys.APP_DEFINITION_DIR, appDefRes);

    String agentConf = instanceDefinition.getAppConfOperations().getGlobalOptions()
            .getMandatoryOption(AgentKeys.AGENT_CONF);
    LocalResource agentConfRes = hoyaFileSystem.createAmResource(new Path(agentConf), LocalResourceType.FILE);
    localResources.put(AgentKeys.AGENT_CONFIG_FILE, agentConfRes);

    String agentVer = instanceDefinition.getAppConfOperations().getGlobalOptions()
            .getMandatoryOption(AgentKeys.AGENT_VERSION);
    LocalResource agentVerRes = hoyaFileSystem.createAmResource(new Path(agentVer), LocalResourceType.FILE);
    localResources.put(AgentKeys.AGENT_VERSION_FILE, agentVerRes);

    ctx.setLocalResources(localResources);

    List<String> commandList = new ArrayList<String>();
    String label = getContainerLabel(container, role);
    CommandLineBuilder operation = new CommandLineBuilder();

    operation.add(AgentKeys.PYTHON_EXE);

    operation.add(scriptPath);
    operation.add(ARG_LABEL);
    operation.add(label);
    operation.add(ARG_HOST);
    operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_HOSTNAME));
    operation.add(ARG_PORT);
    operation.add(getClusterInfoPropertyValue(StatusKeys.INFO_AM_WEB_PORT));

    commandList.add(operation.build());
    ctx.setCommands(commandList);
    ctx.setEnvironment(env);

    // initialize the component instance state
    componentStatuses.put(label, new ComponentInstanceState(role, container.getId().toString(),
            getClusterInfoPropertyValue(OptionKeys.APPLICATION_NAME)));
}

From source file:org.apache.hoya.providers.hbase.HBaseProviderService.java

License:Apache License

@Override
public void buildContainerLaunchContext(ContainerLaunchContext ctx, AggregateConf instanceDefinition,
        Container container, String role, HoyaFileSystem hoyaFileSystem, Path generatedConfPath,
        MapOperations resourceComponent, MapOperations appComponent, Path containerTmpDirPath)
        throws IOException, HoyaException {

    this.hoyaFileSystem = hoyaFileSystem;
    this.instanceDefinition = instanceDefinition;
    // Set the environment
    Map<String, String> env = HoyaUtils.buildEnvMap(appComponent);

    env.put(HBASE_LOG_DIR, providerUtils.getLogdir());

    env.put("PROPAGATED_CONFDIR",
            ApplicationConstants.Environment.PWD.$() + "/" + HoyaKeys.PROPAGATED_CONF_DIR_NAME);

    //local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    //add the configuration resources
    Map<String, LocalResource> confResources;
    confResources = hoyaFileSystem.submitDirectory(generatedConfPath, HoyaKeys.PROPAGATED_CONF_DIR_NAME);
    localResources.putAll(confResources);
    //Add binaries
    //now add the image if it was set
    String imageURI = instanceDefinition.getInternalOperations()
            .get(OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH);
    hoyaFileSystem.maybeAddImagePath(localResources, imageURI);
    ctx.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();

    CommandLineBuilder command = new CommandLineBuilder();

    String heap = appComponent.getOption(RoleKeys.JVM_HEAP, DEFAULT_JVM_HEAP);
    if (HoyaUtils.isSet(heap)) {
        String adjustedHeap = HoyaUtils.translateTrailingHeapUnit(heap);
        env.put("HBASE_HEAPSIZE", adjustedHeap);
    }/*from  w  w  w.  j av  a 2 s  .c o m*/

    String gcOpts = appComponent.getOption(RoleKeys.GC_OPTS, DEFAULT_GC_OPTS);
    if (HoyaUtils.isSet(gcOpts)) {
        env.put("SERVER_GC_OPTS", gcOpts);
    }

    //this must stay relative if it is an image
    command.add(providerUtils.buildPathToScript(instanceDefinition, "bin", HBaseKeys.HBASE_SCRIPT));
    //config dir is relative to the generated file
    command.add(ARG_CONFIG);
    command.add("$PROPAGATED_CONFDIR");

    String roleCommand;
    String logfile;
    //now look at the role
    if (ROLE_WORKER.equals(role)) {
        //role is region server
        roleCommand = REGION_SERVER;
        logfile = "/region-server.txt";
    } else if (ROLE_MASTER.equals(role)) {
        roleCommand = MASTER;

        logfile = "/master.txt";
    } else {
        throw new HoyaInternalStateException("Cannot start role %s", role);
    }

    command.add(roleCommand);
    command.add(ACTION_START);
    //log details
    command.addOutAndErrFiles(logfile, null);

    String cmdStr = command.build();

    commands.add(cmdStr);
    ctx.setCommands(commands);
    ctx.setEnvironment(env);

}

From source file:org.apache.ignite.yarn.ApplicationMaster.java

License:Apache License

/** {@inheritDoc} */
public synchronized void onContainersAllocated(List<Container> conts) {
    for (Container c : conts) {
        if (checkContainer(c)) {
            try {
                ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

                if (UserGroupInformation.isSecurityEnabled())
                    // Set the tokens to the newly allocated container:
                    ctx.setTokens(allTokens.duplicate());

                Map<String, String> env = new HashMap<>(System.getenv());

                env.put("IGNITE_TCP_DISCOVERY_ADDRESSES", getAddress(c.getNodeId().getHost()));

                if (props.jvmOpts() != null && !props.jvmOpts().isEmpty())
                    env.put("JVM_OPTS", props.jvmOpts());

                ctx.setEnvironment(env);

                Map<String, LocalResource> resources = new HashMap<>();

                resources.put("ignite", IgniteYarnUtils.setupFile(ignitePath, fs, LocalResourceType.ARCHIVE));
                resources.put("ignite-config.xml",
                        IgniteYarnUtils.setupFile(cfgPath, fs, LocalResourceType.FILE));

                if (props.licencePath() != null)
                    resources.put("gridgain-license.xml", IgniteYarnUtils
                            .setupFile(new Path(props.licencePath()), fs, LocalResourceType.FILE));

                if (props.userLibs() != null)
                    resources.put("libs",
                            IgniteYarnUtils.setupFile(new Path(props.userLibs()), fs, LocalResourceType.FILE));

                ctx.setLocalResources(resources);

                ctx.setCommands(Collections.singletonList(
                        (props.licencePath() != null ? "cp gridgain-license.xml ./ignite/*/ || true && " : "")
                                + "cp -r ./libs/* ./ignite/*/libs/ || true && " + "./ignite/*/bin/ignite.sh "
                                + "./ignite-config.xml" + " -J-Xmx" + ((int) props.memoryPerNode()) + "m"
                                + " -J-Xms" + ((int) props.memoryPerNode()) + "m"
                                + IgniteYarnUtils.YARN_LOG_OUT));

                log.log(Level.INFO, "Launching container: {0}.", c.getId());

                nmClient.startContainer(c, ctx);

                containers.put(c.getId(), new IgniteContainer(c.getId(), c.getNodeId(),
                        c.getResource().getVirtualCores(), c.getResource().getMemory()));
            } catch (Exception ex) {
                log.log(Level.WARNING, "Error launching container " + c.getId(), ex);
            }//from  ww w. j  a  v a  2  s . co  m
        } else
            rmClient.releaseAssignedContainer(c.getId());
    }
}

From source file:org.apache.ignite.yarn.IgniteYarnClient.java

License:Apache License

/**
 * Main methods has one mandatory parameter and one optional parameter.
 *
 * @param args Path to jar mandatory parameter and property file is optional.
 *//*from www  .  j a v a  2 s .c o m*/
public static void main(String[] args) throws Exception {
    checkArguments(args);

    // Set path to app master jar.
    String pathAppMasterJar = args[0];

    ClusterProperties props = ClusterProperties.from(args.length == 2 ? args[1] : null);

    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    FileSystem fs = FileSystem.get(conf);

    Path ignite;

    // Load ignite and jar
    if (props.ignitePath() == null)
        ignite = getIgnite(props, fs);
    else
        ignite = new Path(props.ignitePath());

    // Upload the jar file to HDFS.
    Path appJar = IgniteYarnUtils.copyLocalToHdfs(fs, pathAppMasterJar,
            props.igniteWorkDir() + File.separator + IgniteYarnUtils.JAR_NAME);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    amContainer.setCommands(Collections
            .singletonList(Environment.JAVA_HOME.$() + "/bin/java -Xmx512m " + ApplicationMaster.class.getName()
                    + IgniteYarnUtils.SPACE + ignite.toUri() + IgniteYarnUtils.YARN_LOG_OUT));

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = IgniteYarnUtils.setupFile(appJar, fs, LocalResourceType.FILE);

    amContainer.setLocalResources(Collections.singletonMap(IgniteYarnUtils.JAR_NAME, appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = props.toEnvs();

    setupAppMasterEnv(appMasterEnv, conf);

    amContainer.setEnvironment(appMasterEnv);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials creds = new Credentials();

        String tokRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);

        if (tokRenewer == null || tokRenewer.length() == 0)
            throw new IOException("Master Kerberos principal for the RM is not set.");

        log.info("Found RM principal: " + tokRenewer);

        final Token<?> tokens[] = fs.addDelegationTokens(tokRenewer, creds);

        if (tokens != null)
            log.info("File system delegation tokens: " + Arrays.toString(tokens));

        amContainer.setTokens(IgniteYarnUtils.createTokenBuffer(creds));
    }

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(512);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("ignition"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();

    yarnClient.submitApplication(appContext);

    log.log(Level.INFO, "Submitted application. Application id: {0}", appId);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();

    while (appState == YarnApplicationState.NEW || appState == YarnApplicationState.NEW_SAVING
            || appState == YarnApplicationState.SUBMITTED || appState == YarnApplicationState.ACCEPTED) {
        TimeUnit.SECONDS.sleep(1L);

        appReport = yarnClient.getApplicationReport(appId);

        if (appState != YarnApplicationState.ACCEPTED
                && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED)
            log.log(Level.INFO, "Application {0} is ACCEPTED.", appId);

        appState = appReport.getYarnApplicationState();
    }

    log.log(Level.INFO, "Application {0} is {1}.", new Object[] { appId, appState });
}