Example usage for org.apache.hadoop.yarn.api.records Priority setPriority

List of usage examples for org.apache.hadoop.yarn.api.records Priority setPriority

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records Priority setPriority.

Prototype

@Public
@Stable
public abstract void setPriority(int priority);

Source Link

Document

Set the assigned priority

Usage

From source file:ml.shifu.guagua.yarn.GuaguaAppMaster.java

License:Apache License

/**
 * Setup the request that will be sent to the RM for the container ask.
 * //from   ww w .  java  2 s . co m
 * @return the setup ResourceRequest to be sent to RM
 */
private ContainerRequest setupContainerAskForRM() {
    // setup requirements for hosts, request containers firstly and then check allocated containers and splits to
    // get data locality.
    // TODO, better here to requests according to hosts of splits.
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(GuaguaYarnConstants.GUAGUA_YARN_DEFAULT_PRIORITY);

    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(getHeapPerContainer());
    capability.setVirtualCores(getYarnConf().getInt(GuaguaYarnConstants.GUAGUA_YARN_TASK_VCORES,
            GuaguaYarnConstants.GUAGUA_YARN_TASK_DEFAULT_VCORES));

    ContainerRequest request = new ContainerRequest(capability, null, null, pri);
    LOG.info("Requested container ask: {}", request.toString());
    return request;
}

From source file:ml.shifu.guagua.yarn.GuaguaYarnClient.java

License:Apache License

/**
 * To submit an app to yarn cluster and monitor the status.
 */// www .  j  a v  a 2 s  .c  o m
public int run(String[] args) throws Exception {
    LOG.info("Running Client");
    this.yarnClient.start();

    // request an application id from the RM. Get a new application id firstly.
    YarnClientApplication app = this.yarnClient.createApplication();
    GetNewApplicationResponse getNewAppResponse = app.getNewApplicationResponse();

    // check cluster status.
    checkPerNodeResourcesAvailable(getNewAppResponse);

    // configure our request for an exec container
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    this.setAppId(appContext.getApplicationId());
    LOG.info("Obtained new application ID: {}", this.getAppId());

    // set app id and app name
    appContext.setApplicationId(this.getAppId());
    this.setAppName(getConf().get(GuaguaYarnConstants.GUAGUA_YARN_APP_NAME));
    appContext.setApplicationName(this.getAppName());

    prepareInputSplits();
    // copy local resources to hdfs app folder
    copyResourcesToFS();

    appContext.setMaxAppAttempts(GuaguaYarnConstants.GUAGAU_APP_MASTER_DEFAULT_ATTMPTS);
    appContext.setQueue(getConf().get(GuaguaYarnConstants.GUAGUA_YARN_QUEUE_NAME,
            GuaguaYarnConstants.GUAGUA_YARN_DEFAULT_QUEUE_NAME));

    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(getConf().getInt(GuaguaYarnConstants.GUAGUA_YARN_MASTER_MEMORY,
            GuaguaYarnConstants.GUAGUA_YARN_DEFAULT_MASTER_MEMORY));
    capability.setVirtualCores(getConf().getInt(GuaguaYarnConstants.GUAGUA_YARN_MASTER_VCORES,
            GuaguaYarnConstants.GUAGUA_YARN_MASTER_DEFAULT_VCORES));
    appContext.setResource(capability);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(getConf().getInt(GuaguaYarnConstants.GUAGUA_YARN_MASTER_PRIORITY,
            GuaguaYarnConstants.GUAGUA_YARN_DEFAULT_PRIORITY));
    appContext.setPriority(pri);

    ContainerLaunchContext containerContext = buildContainerLaunchContext();
    appContext.setAMContainerSpec(containerContext);
    try {
        LOG.info("Submitting application to ASM");
        // obtain an "updated copy" of the appId for status checks/job kill later
        this.setAppId(this.yarnClient.submitApplication(appContext));
        LOG.info("Got new appId after submission : {}", getAppId());
    } catch (YarnException yre) {
        // try another time
        LOG.info("Submitting application again to ASM");
        // obtain an "updated copy" of the appId for status checks/job kill later
        this.setAppId(this.yarnClient.submitApplication(appContext));
        LOG.info("Got new appId after submission : {}", getAppId());
    }
    LOG.info("GuaguaAppMaster container request was submitted to ResourceManager for job: {}", getAppName());

    return awaitYarnJobCompletion();
}

From source file:org.apache.drill.yarn.core.ContainerRequestSpec.java

License:Apache License

/**
 * Create a YARN ContainerRequest object from the information in this object.
 *
 * @return/*from  w w w .  ja  va  2s. co m*/
 */
public ContainerRequest makeRequest() {
    assert memoryMb != 0;

    Priority priorityRec = Records.newRecord(Priority.class);
    priorityRec.setPriority(priority);

    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memoryMb);
    capability.setVirtualCores(vCores);
    DoYUtil.callSetDiskIfExists(capability, disks);

    boolean relaxLocality = true;
    String nodeArr[] = null;
    if (!hosts.isEmpty()) {
        nodeArr = new String[hosts.size()];
        hosts.toArray(nodeArr);
        relaxLocality = false;
    }
    String rackArr[] = null;
    if (!racks.isEmpty()) {
        nodeArr = new String[racks.size()];
        racks.toArray(rackArr);
        relaxLocality = false;
    }
    String nodeExpr = null;
    if (!DoYUtil.isBlank(nodeLabelExpr)) {
        nodeExpr = nodeLabelExpr;
        LOG.info("Requesting a container using node expression: " + nodeExpr);
    }

    // YARN is fragile. To (potentially) pass a node expression, we must use the
    // 5-argument constructor. The fourth argument (relax locality) MUST be set
    // to true if we omit the rack and node specs. (Else we get a runtime
    // error.

    return new ContainerRequest(capability, nodeArr, rackArr, priorityRec, relaxLocality, nodeExpr);
}

From source file:org.apache.flink.yarn.ApplicationMaster.java

License:Apache License

private void run() throws Exception {
    //Utils.logFilesInCurrentDirectory(LOG);
    // Initialize clients to ResourceManager and NodeManagers
    Configuration conf = Utils.initializeYarnConfiguration();
    FileSystem fs = FileSystem.get(conf);
    Map<String, String> envs = System.getenv();
    final String currDir = envs.get(Environment.PWD.key());
    final String logDirs = envs.get(Environment.LOG_DIRS.key());
    final String ownHostname = envs.get(Environment.NM_HOST.key());
    final String appId = envs.get(Client.ENV_APP_ID);
    final String clientHomeDir = envs.get(Client.ENV_CLIENT_HOME_DIR);
    final String applicationMasterHost = envs.get(Environment.NM_HOST.key());
    final String remoteFlinkJarPath = envs.get(Client.FLINK_JAR_PATH);
    final String shipListString = envs.get(Client.ENV_CLIENT_SHIP_FILES);
    final String yarnClientUsername = envs.get(Client.ENV_CLIENT_USERNAME);
    final int taskManagerCount = Integer.valueOf(envs.get(Client.ENV_TM_COUNT));
    final int memoryPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_MEMORY));
    final int coresPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_CORES));

    int heapLimit = Utils.calculateHeapSize(memoryPerTaskManager);

    if (currDir == null) {
        throw new RuntimeException("Current directory unknown");
    }//www  . ja v  a2  s. c  o  m
    if (ownHostname == null) {
        throw new RuntimeException("Own hostname (" + Environment.NM_HOST + ") not set.");
    }
    LOG.info("Working directory " + currDir);

    // load Flink configuration.
    Utils.getFlinkConfiguration(currDir);

    final String localWebInterfaceDir = currDir + "/resources/"
            + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME;

    // Update yaml conf -> set jobManager address to this machine's address.
    FileInputStream fis = new FileInputStream(currDir + "/flink-conf.yaml");
    BufferedReader br = new BufferedReader(new InputStreamReader(fis));
    Writer output = new BufferedWriter(new FileWriter(currDir + "/flink-conf-modified.yaml"));
    String line;
    while ((line = br.readLine()) != null) {
        if (line.contains(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
        } else if (line.contains(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + "\n");
        } else {
            output.append(line + "\n");
        }
    }
    // just to make sure.
    output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + localWebInterfaceDir + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY + ": " + logDirs + "\n");
    output.close();
    br.close();
    File newConf = new File(currDir + "/flink-conf-modified.yaml");
    if (!newConf.exists()) {
        LOG.warn("modified yaml does not exist!");
    }

    Utils.copyJarContents("resources/" + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME,
            ApplicationMaster.class.getProtectionDomain().getCodeSource().getLocation().getPath());

    JobManager jm;
    {
        String pathToNepheleConfig = currDir + "/flink-conf-modified.yaml";
        String[] args = { "-executionMode", "cluster", "-configDir", pathToNepheleConfig };

        // start the job manager
        jm = JobManager.initialize(args);

        // Start info server for jobmanager
        jm.startInfoServer();
    }

    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();

    NMClient nmClient = NMClient.createNMClient();
    nmClient.init(conf);
    nmClient.start();

    // Register with ResourceManager
    LOG.info("registering ApplicationMaster");
    rmClient.registerApplicationMaster(applicationMasterHost, 0, "http://" + applicationMasterHost + ":"
            + GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, "undefined"));

    // Priority for worker containers - priorities are intra-application
    Priority priority = Records.newRecord(Priority.class);
    priority.setPriority(0);

    // Resource requirements for worker containers
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memoryPerTaskManager);
    capability.setVirtualCores(coresPerTaskManager);

    // Make container requests to ResourceManager
    for (int i = 0; i < taskManagerCount; ++i) {
        ContainerRequest containerAsk = new ContainerRequest(capability, null, null, priority);
        LOG.info("Requesting TaskManager container " + i);
        rmClient.addContainerRequest(containerAsk);
    }

    LocalResource flinkJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);

    // register Flink Jar with remote HDFS
    final Path remoteJarPath = new Path(remoteFlinkJarPath);
    Utils.registerLocalResource(fs, remoteJarPath, flinkJar);

    // register conf with local fs.
    Path remoteConfPath = Utils.setupLocalResource(conf, fs, appId,
            new Path("file://" + currDir + "/flink-conf-modified.yaml"), flinkConf, new Path(clientHomeDir));
    LOG.info("Prepared localresource for modified yaml: " + flinkConf);

    boolean hasLog4j = new File(currDir + "/log4j.properties").exists();
    // prepare the files to ship
    LocalResource[] remoteShipRsc = null;
    String[] remoteShipPaths = shipListString.split(",");
    if (!shipListString.isEmpty()) {
        remoteShipRsc = new LocalResource[remoteShipPaths.length];
        { // scope for i
            int i = 0;
            for (String remoteShipPathStr : remoteShipPaths) {
                if (remoteShipPathStr == null || remoteShipPathStr.isEmpty()) {
                    continue;
                }
                remoteShipRsc[i] = Records.newRecord(LocalResource.class);
                Path remoteShipPath = new Path(remoteShipPathStr);
                Utils.registerLocalResource(fs, remoteShipPath, remoteShipRsc[i]);
                i++;
            }
        }
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");

    // Obtain allocated containers and launch
    int allocatedContainers = 0;
    int completedContainers = 0;
    while (allocatedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(0);
        for (Container container : response.getAllocatedContainers()) {
            LOG.info("Got new Container for TM " + container.getId() + " on host "
                    + container.getNodeId().getHost());
            ++allocatedContainers;

            // Launch container by create ContainerLaunchContext
            ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

            String tmCommand = "$JAVA_HOME/bin/java -Xmx" + heapLimit + "m " + javaOpts;
            if (hasLog4j) {
                tmCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                        + "/taskmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
            }
            tmCommand += " org.apache.flink.yarn.YarnTaskManagerRunner -configDir . " + " 1>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stdout.log" + " 2>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stderr.log";
            ctx.setCommands(Collections.singletonList(tmCommand));

            LOG.info("Starting TM with command=" + tmCommand);

            // copy resources to the TaskManagers.
            Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
            localResources.put("flink.jar", flinkJar);
            localResources.put("flink-conf.yaml", flinkConf);

            // add ship resources
            if (!shipListString.isEmpty()) {
                Preconditions.checkNotNull(remoteShipRsc);
                for (int i = 0; i < remoteShipPaths.length; i++) {
                    localResources.put(new Path(remoteShipPaths[i]).getName(), remoteShipRsc[i]);
                }
            }

            ctx.setLocalResources(localResources);

            // Setup CLASSPATH for Container (=TaskTracker)
            Map<String, String> containerEnv = new HashMap<String, String>();
            Utils.setupEnv(conf, containerEnv); //add flink.jar to class path.
            containerEnv.put(Client.ENV_CLIENT_USERNAME, yarnClientUsername);

            ctx.setEnvironment(containerEnv);

            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            try {
                Credentials credentials = user.getCredentials();
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.writeTokenStorageToStream(dob);
                ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
                ctx.setTokens(securityTokens);
            } catch (IOException e) {
                LOG.warn("Getting current user info failed when trying to launch the container"
                        + e.getMessage());
            }

            LOG.info("Launching container " + allocatedContainers);
            nmClient.startContainer(container, ctx);
        }
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container (while allocating) " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(100);
    }

    // Now wait for containers to complete

    while (completedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(completedContainers / taskManagerCount);
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(5000);
    }
    LOG.info("Shutting down JobManager");
    jm.shutdown();

    // Un-register with ResourceManager
    rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "", "");

}

From source file:org.apache.flink.yarn.appMaster.ApplicationMaster.java

License:Apache License

private void run() throws Exception {
    heapLimit = Utils.calculateHeapSize(memoryPerTaskManager);

    nmClient = NMClient.createNMClient();
    nmClient.init(conf);/*from   w  w  w  . j av  a2 s  .co  m*/
    nmClient.start();
    nmClient.cleanupRunningContainersOnStop(true);

    // Register with ResourceManager
    String url = "http://" + applicationMasterHost + ":" + jobManagerWebPort;
    LOG.info("Registering ApplicationMaster with tracking url " + url);
    rmClient.registerApplicationMaster(applicationMasterHost, 0, url);

    // Priority for worker containers - priorities are intra-application
    Priority priority = Records.newRecord(Priority.class);
    priority.setPriority(0);

    // Resource requirements for worker containers
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memoryPerTaskManager);
    capability.setVirtualCores(coresPerTaskManager);

    // Make container requests to ResourceManager
    for (int i = 0; i < taskManagerCount; ++i) {
        ContainerRequest containerAsk = new ContainerRequest(capability, null, null, priority);
        LOG.info("Requesting TaskManager container " + i);
        rmClient.addContainerRequest(containerAsk);
    }

    LocalResource flinkJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);

    // register Flink Jar with remote HDFS
    final Path remoteJarPath = new Path(remoteFlinkJarPath);
    Utils.registerLocalResource(fs, remoteJarPath, flinkJar);

    // register conf with local fs.
    Utils.setupLocalResource(conf, fs, appId, new Path("file://" + currDir + "/flink-conf-modified.yaml"),
            flinkConf, new Path(clientHomeDir));
    LOG.info("Prepared local resource for modified yaml: " + flinkConf);

    hasLogback = new File(currDir + "/logback.xml").exists();
    // prepare the files to ship
    LocalResource[] remoteShipRsc = null;
    String[] remoteShipPaths = shipListString.split(",");
    if (!shipListString.isEmpty()) {
        remoteShipRsc = new LocalResource[remoteShipPaths.length];
        { // scope for i
            int i = 0;
            for (String remoteShipPathStr : remoteShipPaths) {
                if (remoteShipPathStr == null || remoteShipPathStr.isEmpty()) {
                    continue;
                }
                remoteShipRsc[i] = Records.newRecord(LocalResource.class);
                Path remoteShipPath = new Path(remoteShipPathStr);
                Utils.registerLocalResource(fs, remoteShipPath, remoteShipRsc[i]);
                i++;
            }
        }
    }
    // copy resources to the TaskManagers.
    taskManagerLocalResources = new HashMap<String, LocalResource>(2);
    taskManagerLocalResources.put("flink.jar", flinkJar);
    taskManagerLocalResources.put("flink-conf.yaml", flinkConf);

    // add ship resources
    if (!shipListString.isEmpty()) {
        Preconditions.checkNotNull(remoteShipRsc);
        for (int i = 0; i < remoteShipPaths.length; i++) {
            taskManagerLocalResources.put(new Path(remoteShipPaths[i]).getName(), remoteShipRsc[i]);
        }
    }
    completedContainers = 0;

    // Obtain allocated containers and launch
    StringBuffer containerDiag = new StringBuffer(); // diagnostics log for the containers.
    allocateOutstandingContainer(containerDiag);
    LOG.info("Allocated all initial containers");

    // Now wait for containers to complete
    while (completedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(completedContainers / taskManagerCount);
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
            logDeadContainer(status, containerDiag);
        }
        Thread.sleep(5000);
    }
    if (isClosed) {
        return;
    }
    // Un-register with ResourceManager
    final String diagnosticsMessage = "Application Master shut down after all " + "containers finished\n"
            + containerDiag.toString();
    LOG.info("Diagnostics message: " + diagnosticsMessage);
    rmClient.unregisterApplicationMaster(FinalApplicationStatus.FAILED, diagnosticsMessage, "");
    this.close();
    amRpcServer.stop(); // we need to manually stop the RPC service. Usually, the Client stops the RPC,
    // but at this point, the AM has been shut down (for some reason).
    LOG.info("Application Master shutdown completed.");
}

From source file:org.apache.giraph.yarn.GiraphApplicationMaster.java

License:Apache License

/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 *///from  w  w  w .  ja v  a 2 s  . c  o m
private ContainerRequest setupContainerAskForRM() {
    // setup requirements for hosts
    // using * as any host will do for the distributed shell app
    // set the priority for the request
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(GiraphConstants.GIRAPH_YARN_PRIORITY);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(heapPerContainer);

    ContainerRequest request = new ContainerRequest(capability, null, null, pri);
    LOG.info("Requested container ask: " + request.toString());
    return request;
}

From source file:org.apache.hama.bsp.JobImpl.java

License:Apache License

private List<ResourceRequest> createBSPTaskRequest(int numTasks, int memoryInMb, int priority) {

    List<ResourceRequest> reqList = new ArrayList<ResourceRequest>(numTasks);
    for (int i = 0; i < numTasks; i++) {
        // Resource Request
        ResourceRequest rsrcRequest = Records.newRecord(ResourceRequest.class);

        // setup requirements for hosts
        // whether a particular rack/host is needed
        // useful for applications that are sensitive
        // to data locality
        rsrcRequest.setResourceName("*");

        // set the priority for the request
        Priority pri = Records.newRecord(Priority.class);
        pri.setPriority(priority);
        rsrcRequest.setPriority(pri);/* ww w .  j a  v a2s .co  m*/

        // Set up resource type requirements
        // For now, only memory is supported so we set memory requirements
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(memoryInMb);
        rsrcRequest.setCapability(capability);

        // set no. of containers needed
        // matching the specifications
        rsrcRequest.setNumContainers(numBSPTasks);
        reqList.add(rsrcRequest);
    }
    return reqList;
}

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

License:Apache License

public boolean launch() throws Exception {
    LOG.info("Running Client");
    yarnClient.start();//from   ww  w.  j a va2s  . c o  m

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    _appId = appContext.getApplicationId();
    _appMasterConfig.setAppId(_appId.getId());
    String appName = _applicationSpec.getAppName();
    _appMasterConfig.setAppName(appName);
    _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass().getCanonicalName());
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    LOG.info("Copy Application archive file from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(_conf);

    // get packages for each component packages
    Map<String, URI> packages = new HashMap<String, URI>();
    packages.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI());
    packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI());
    for (String serviceName : _applicationSpec.getServices()) {
        packages.put(serviceName, _applicationSpec.getServicePackage(serviceName));
    }
    Map<String, Path> hdfsDest = new HashMap<String, Path>();
    Map<String, String> classpathMap = new HashMap<String, String>();
    for (String name : packages.keySet()) {
        URI uri = packages.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        hdfsDest.put(name, dst);
        String classpath = generateClasspathAfterExtraction(name, new File(uri));
        classpathMap.put(name, classpath);
        _appMasterConfig.setClasspath(name, classpath);
        String serviceMainClass = _applicationSpec.getServiceMainClass(name);
        if (serviceMainClass != null) {
            _appMasterConfig.setMainClass(name, serviceMainClass);
        }
    }

    // Get YAML files describing all workflows to immediately start
    Map<String, URI> workflowFiles = new HashMap<String, URI>();
    List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
        for (TaskConfig taskConfig : taskConfigs) {
            URI configUri = taskConfig.getYamlURI();
            if (taskConfig.name != null && configUri != null) {
                workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
            }
        }
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LocalResource appMasterPkg = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    LocalResource appSpecFile = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
    localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
    localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
    for (String name : workflowFiles.keySet()) {
        URI uri = workflowFiles.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        LocalResource taskLocalResource = setupLocalResource(fs, dst);
        localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
                taskLocalResource);
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*").append(File.pathSeparatorChar);
    classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
    // Set the env variables to be setup in the env where the application master will be run
    Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master launch command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    int amMemory = 4096;
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(AppMasterLauncher.class.getCanonicalName());
    // Set params for Application Master
    // vargs.add("--num_containers " + String.valueOf(numContainers));

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    int amPriority = 0;
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    String amQueue = "default";
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    LOG.info("Submitting application to YARN Resource Manager");

    ApplicationId applicationId = yarnClient.submitApplication(appContext);

    LOG.info("Submitted application with applicationId:" + applicationId);

    return true;
}

From source file:org.apache.helix.provisioning.yarn.YarnProvisioner.java

License:Apache License

private ContainerRequest setupContainerAskForRM(ContainerSpec spec) {
    // setup requirements for hosts
    // using * as any host will do for the distributed shell app
    // set the priority for the request
    Priority pri = Records.newRecord(Priority.class);
    int requestPriority = 0;
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(requestPriority);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    int memory = spec.getMemory();
    capability.setMemory(memory);//from  w w w .j a v a2  s  . com

    ContainerRequest request = new ContainerRequest(capability, null, null, pri);
    LOG.info("Requested container ask: " + request.toString());
    return request;
}

From source file:org.apache.hoya.core.launch.AppMasterLauncher.java

License:Apache License

/**
 * Complete the launch context (copy in env vars, etc).
 * @return the container to launch/*w  ww .j  a v a 2 s  .  c om*/
 */
public ApplicationSubmissionContext completeAppMasterLaunch() throws IOException {

    //queue priority
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(priority);
    submissionContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    // Queue for App master

    submissionContext.setQueue(queue);

    //container requirements
    submissionContext.setResource(resource);

    if (keepContainersOverRestarts && AMRestartSupport.keepContainersAcrossSubmissions(submissionContext)) {
        log.debug("Requesting cluster stays running over AM failure");
    }

    submissionContext.setMaxAppAttempts(maxAppAttempts);

    if (secureCluster) {
        addSecurityTokens();
    } else {
        propagateUsernameInInsecureCluster();
    }
    completeContainerLaunch();
    submissionContext.setAMContainerSpec(containerLaunchContext);
    return submissionContext;

}