Example usage for org.apache.hadoop.yarn.api.records Resource newInstance

List of usage examples for org.apache.hadoop.yarn.api.records Resource newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records Resource newInstance.

Prototype

@Public
    @Stable
    public static Resource newInstance(long memory, int vCores) 

Source Link

Usage

From source file:io.hops.util.DBUtility.java

License:Apache License

public static RMNode processHopRMNodeCompsForScheduler(RMNodeComps hopRMNodeComps, RMContext rmContext)
        throws InvalidProtocolBufferException {
    org.apache.hadoop.yarn.api.records.NodeId nodeId;
    RMNode rmNode = null;/*from ww w .  java  2 s . c o  m*/
    if (hopRMNodeComps != null) {
        nodeId = ConverterUtils.toNodeId(hopRMNodeComps.getRMNodeId());
        rmNode = rmContext.getRMNodes().get(nodeId);

        // The first time we are receiving the RMNode, this will happen when the node registers
        if (rmNode == null) {
            // Retrieve heartbeat
            boolean nextHeartbeat = true;

            // Create Resource
            Resource resource = null;
            if (hopRMNodeComps.getHopResource() != null) {
                resource = Resource.newInstance(hopRMNodeComps.getHopResource().getMemory(),
                        hopRMNodeComps.getHopResource().getVirtualCores());
            } else {
                LOG.error("ResourceOption should not be null");
                resource = Resource.newInstance(0, 0);
            }
            /*rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(),
                    hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()),
                    resourceOption,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion(),
                    hopRMNodeComps.getHopRMNode().getHealthReport(),
                    hopRMNodeComps.getHopRMNode().getLastHealthReportTime(),
                    nextHeartbeat);*/

            rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(),
                    hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(),
                    ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resource,
                    hopRMNodeComps.getHopRMNode().getNodemanagerVersion());

            // Force Java to put the host in cache
            NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
        }

        // Update the RMNode
        if (hopRMNodeComps.getHopRMNode() != null) {
            ((RMNodeImplDist) rmNode).setState(hopRMNodeComps.getHopRMNode().getCurrentState());
        }
        if (hopRMNodeComps.getHopUpdatedContainerInfo() != null) {
            List<io.hops.metadata.yarn.entity.UpdatedContainerInfo> hopUpdatedContainerInfoList = hopRMNodeComps
                    .getHopUpdatedContainerInfo();

            if (hopUpdatedContainerInfoList != null && !hopUpdatedContainerInfoList.isEmpty()) {
                ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<>();

                Map<Integer, org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> ucis = new HashMap<>();
                LOG.debug(hopRMNodeComps.getRMNodeId() + " getting ucis " + hopUpdatedContainerInfoList.size()
                        + " pending event " + hopRMNodeComps.getPendingEvent().getId().getEventId());

                for (io.hops.metadata.yarn.entity.UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoList) {
                    if (!ucis.containsKey(hopUCI.getUpdatedContainerInfoId())) {
                        ucis.put(hopUCI.getUpdatedContainerInfoId(),
                                new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(),
                                        hopUCI.getUpdatedContainerInfoId()));
                    }

                    ContainerId cid = ConverterUtils.toContainerId(hopUCI.getContainerId());
                    io.hops.metadata.yarn.entity.ContainerStatus hopContainerStatus = hopRMNodeComps
                            .getHopContainersStatusMap().get(hopUCI.getContainerId());

                    org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                            .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()),
                                    hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus());

                    // Check ContainerStatus state to add it in the appropriate list
                    if (conStatus != null) {
                        LOG.debug("add uci for container " + conStatus.getContainerId() + " status "
                                + conStatus.getState());
                        if (conStatus.getState().equals(ContainerState.RUNNING)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getNewlyLaunchedContainers()
                                    .add(conStatus);
                        } else if (conStatus.getState().equals(ContainerState.COMPLETE)) {
                            ucis.get(hopUCI.getUpdatedContainerInfoId()).getCompletedContainers()
                                    .add(conStatus);
                        }
                    }
                }

                for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci : ucis
                        .values()) {
                    updatedContainerInfoQueue.add(uci);
                }

                ((RMNodeImplDist) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
            } else {
                LOG.debug(hopRMNodeComps.getRMNodeId()
                        + " hopUpdatedContainerInfoList = null || hopUpdatedContainerInfoList.isEmpty() "
                        + hopRMNodeComps.getPendingEvent().getId().getEventId());
            }
        } else {
            LOG.debug(hopRMNodeComps.getRMNodeId() + " hopRMNodeFull.getHopUpdatedContainerInfo()=null "
                    + hopRMNodeComps.getPendingEvent().getId().getEventId());
        }
    }

    return rmNode;
}

From source file:org.apache.flink.yarn.YarnFlinkResourceManager.java

License:Apache License

@Override
protected void requestNewWorkers(int numWorkers) {
    final long mem = taskManagerParameters.taskManagerTotalMemoryMB();
    final int containerMemorySizeMB;

    if (mem <= Integer.MAX_VALUE) {
        containerMemorySizeMB = (int) mem;
    } else {/*from   w w w  .  j a v a  2s .c  om*/
        containerMemorySizeMB = Integer.MAX_VALUE;
        LOG.error("Decreasing container size from {} MB to {} MB (integer value overflow)", mem,
                containerMemorySizeMB);
    }

    for (int i = 0; i < numWorkers; i++) {
        numPendingContainerRequests++;
        LOG.info("Requesting new TaskManager container with {} megabytes memory. Pending requests: {}",
                containerMemorySizeMB, numPendingContainerRequests);

        // Priority for worker containers - priorities are intra-application
        Priority priority = Priority.newInstance(0);

        // Resource requirements for worker containers
        int taskManagerSlots = Integer.valueOf(System.getenv(YarnConfigKeys.ENV_SLOTS));
        int vcores = config.getInteger(ConfigConstants.YARN_VCORES, Math.max(taskManagerSlots, 1));
        Resource capability = Resource.newInstance(containerMemorySizeMB, vcores);

        resourceManagerClient
                .addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority));
    }

    // make sure we transmit the request fast and receive fast news of granted allocations
    resourceManagerClient.setHeartbeatInterval(FAST_YARN_HEARTBEAT_INTERVAL_MS);
}

From source file:org.apache.flink.yarn.YarnResourceManager.java

License:Apache License

@Override
public void startNewWorker(ResourceProfile resourceProfile) {
    // Priority for worker containers - priorities are intra-application
    //TODO: set priority according to the resource allocated
    Priority priority = Priority.newInstance(generatePriority(resourceProfile));
    int mem = resourceProfile.getMemoryInMB() < 0 ? DEFAULT_TSK_EXECUTOR_MEMORY_SIZE
            : (int) resourceProfile.getMemoryInMB();
    int vcore = resourceProfile.getCpuCores() < 1 ? 1 : (int) resourceProfile.getCpuCores();
    Resource capability = Resource.newInstance(mem, vcore);
    requestYarnContainer(capability, priority);
}

From source file:org.apache.flink.yarn.YarnResourceManagerTest.java

License:Apache License

private static Container mockContainer(String host, int port, int containerId) {
    Container mockContainer = mock(Container.class);

    NodeId mockNodeId = NodeId.newInstance(host, port);
    ContainerId mockContainerId = ContainerId.newInstance(
            ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(), 1), 1),
            containerId);/* w  ww.j  ava  2  s.  c o m*/

    when(mockContainer.getId()).thenReturn(mockContainerId);
    when(mockContainer.getNodeId()).thenReturn(mockNodeId);
    when(mockContainer.getResource()).thenReturn(Resource.newInstance(200, 1));
    when(mockContainer.getPriority()).thenReturn(Priority.UNDEFINED);

    return mockContainer;
}

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private Resource prepareContainerResource(GetNewApplicationResponse newApplicationResponse) {
    int memoryMbs = this.appMasterMemoryMbs;
    int maximumMemoryCapacity = newApplicationResponse.getMaximumResourceCapability().getMemory();
    if (memoryMbs > maximumMemoryCapacity) {
        LOGGER.info(String.format(
                "Specified AM memory [%d] is above the maximum memory capacity [%d] of the "
                        + "cluster, using the maximum memory capacity instead.",
                memoryMbs, maximumMemoryCapacity));
        memoryMbs = maximumMemoryCapacity;
    }//from w  w w .j  ava 2 s. co m

    int vCores = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_CORES_KEY);
    int maximumVirtualCoreCapacity = newApplicationResponse.getMaximumResourceCapability().getVirtualCores();
    if (vCores > maximumVirtualCoreCapacity) {
        LOGGER.info(String.format(
                "Specified AM vcores [%d] is above the maximum vcore capacity [%d] of the "
                        + "cluster, using the maximum vcore capacity instead.",
                memoryMbs, maximumMemoryCapacity));
        vCores = maximumVirtualCoreCapacity;
    }

    // Set up resource type requirements for ApplicationMaster
    return Resource.newInstance(memoryMbs, vCores);
}

From source file:org.apache.gobblin.yarn.YarnServiceTest.java

License:Apache License

private void startApp() throws Exception {
    // submit a dummy app
    ApplicationSubmissionContext appSubmissionContext = yarnClient.createApplication()
            .getApplicationSubmissionContext();
    this.applicationId = appSubmissionContext.getApplicationId();

    ContainerLaunchContext containerLaunchContext = BuilderUtils.newContainerLaunchContext(
            Collections.emptyMap(), Collections.emptyMap(), Arrays.asList("sleep", "100"),
            Collections.emptyMap(), null, Collections.emptyMap());

    // Setup the application submission context
    appSubmissionContext.setApplicationName("TestApp");
    appSubmissionContext.setResource(Resource.newInstance(128, 1));
    appSubmissionContext.setPriority(Priority.newInstance(0));
    appSubmissionContext.setAMContainerSpec(containerLaunchContext);

    this.yarnClient.submitApplication(appSubmissionContext);

    // wait for application to be accepted
    int i;/* w  w w  .  java2 s.c o  m*/
    RMAppAttempt attempt = null;
    for (i = 0; i < 120; i++) {
        ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);

        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            this.applicationAttemptId = appReport.getCurrentApplicationAttemptId();
            attempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
                    .get(appReport.getCurrentApplicationAttemptId().getApplicationId()).getCurrentAppAttempt();
            break;
        }
        Thread.sleep(1000);
    }

    Assert.assertTrue(i < 120, "timed out waiting for ACCEPTED state");

    // Set the AM-RM token in the UGI for access during testing
    UserGroupInformation.setLoginUser(
            UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    UserGroupInformation.getCurrentUser().addToken(attempt.getAMRMToken());
}

From source file:org.apache.gobblin.yarn.YarnServiceTest.java

License:Apache License

@Test(groups = { "gobblin.yarn", "disabledOnTravis" }, dependsOnMethods = "testReleasedContainerCache")
public void testBuildContainerCommand() throws Exception {
    Config modifiedConfig = this.config
            .withValue(GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_OVERHEAD_MBS_KEY,
                    ConfigValueFactory.fromAnyRef("10"))
            .withValue(GobblinYarnConfigurationKeys.CONTAINER_JVM_MEMORY_XMX_RATIO_KEY,
                    ConfigValueFactory.fromAnyRef("0.8"));

    TestYarnService yarnService = new TestYarnService(modifiedConfig, "testApp2", "appId2", this.clusterConf,
            FileSystem.getLocal(new Configuration()), this.eventBus);

    ContainerId containerId = ContainerId
            .newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 0), 0), 0);
    Resource resource = Resource.newInstance(2048, 1);
    Container container = Container.newInstance(containerId, null, null, resource, null, null);

    String command = yarnService.buildContainerCommand(container, "helixInstance1");

    // 1628 is from 2048 * 0.8 - 10
    Assert.assertTrue(command.contains("-Xmx1628"));
}

From source file:org.apache.hama.bsp.ApplicationMaster.java

License:Apache License

/**
 * Setup the request that will be sent to the RM for the container ask.
 * /*from   ww  w.  ja v a2s .  c o m*/
 * @return the setup ResourceRequest to be sent to RM
 */
private AMRMClient.ContainerRequest setupContainerAskForRM() {
    // setup requirements for hosts
    // using * as any host will do for the distributed shell app
    // set the priority for the request
    // TODO - what is the range for priority? how to decide?
    Priority pri = Priority.newInstance(requestPriority);

    // Set up resource type requirements
    // For now, memory and CPU are supported so we set memory and cpu
    // requirements
    Resource capability = Resource.newInstance(containerMemory, containerVirtualCores);

    AMRMClient.ContainerRequest request = new AMRMClient.ContainerRequest(capability, null, null, pri);
    LOG.info("Requested container ask: " + request.toString());
    return request;
}

From source file:org.apache.metron.maas.service.Client.java

License:Apache License

/**
 * Main run function for the client//from ww w  . ja  v a 2  s  . c o m
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    if (domainId != null && domainId.length() > 0 && toCreateDomain) {
        prepareTimelineDomain();
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);

    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    Path ajPath = addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources,
            null);

    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null);
    }

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the
    // eventual containers that will be launched to execute the shell scripts
    if (domainId != null && domainId.length() > 0) {
        env.put(Constants.TIMELINEDOMAIN, domainId);
    }

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add(ApplicationMaster.AMOptions.toArgs(ApplicationMaster.AMOptions.ZK_QUORUM.of(zkQuorum),
            ApplicationMaster.AMOptions.ZK_ROOT.of(zkRoot),
            ApplicationMaster.AMOptions.APP_JAR_PATH.of(ajPath.toString())));
    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null,
            null, null);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and
    // vcores requirements
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // Monitor the application
    return monitorApplication(appId);

}

From source file:org.apache.metron.maas.service.yarn.Resources.java

License:Apache License

public static Resource toResource(EnumMap<Resources, Integer> resourceMap) {
    return Resource.newInstance(resourceMap.get(Resources.MEMORY), resourceMap.get(Resources.V_CORE));
}