Example usage for org.apache.hadoop.yarn.api.records Resource getMemory

List of usage examples for org.apache.hadoop.yarn.api.records Resource getMemory

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records Resource getMemory.

Prototype

@Public
@Deprecated
public abstract int getMemory();

Source Link

Document

This method is DEPRECATED: Use Resource#getMemorySize() instead Get memory of the resource.

Usage

From source file:org.apache.twill.internal.yarn.Hadoop21YarnAMClient.java

License:Apache License

@Override
protected Resource adjustCapability(Resource resource) {
    int cores = resource.getVirtualCores();
    int updatedCores = Math.min(resource.getVirtualCores(), maxCapability.getVirtualCores());

    if (cores != updatedCores) {
        resource.setVirtualCores(updatedCores);
        LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores);
    }/*  w w w .  j  a v a  2 s  . com*/

    int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory());
    if (resource.getMemory() != updatedMemory) {
        resource.setMemory(updatedMemory);
        LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory);
    }

    return resource;
}

From source file:org.apache.twill.internal.yarn.Hadoop21YarnAppClient.java

License:Apache License

@Override
public ProcessLauncher<ApplicationMasterInfo> createLauncher(TwillSpecification twillSpec,
        @Nullable String schedulerQueue) throws Exception {
    // Request for new application
    YarnClientApplication application = yarnClient.createApplication();
    final GetNewApplicationResponse response = application.getNewApplicationResponse();
    final ApplicationId appId = response.getApplicationId();

    // Setup the context for application submission
    final ApplicationSubmissionContext appSubmissionContext = application.getApplicationSubmissionContext();
    appSubmissionContext.setApplicationId(appId);
    appSubmissionContext.setApplicationName(twillSpec.getName());

    if (schedulerQueue != null) {
        appSubmissionContext.setQueue(schedulerQueue);
    }// w  w w  .  j ava2 s  . co  m

    // TODO: Make it adjustable through TwillSpec (TWILL-90)
    // Set the resource requirement for AM
    final Resource capability = adjustMemory(response, Resource.newInstance(Constants.APP_MASTER_MEMORY_MB, 1));
    ApplicationMasterInfo appMasterInfo = new ApplicationMasterInfo(appId, capability.getMemory(),
            capability.getVirtualCores());

    ApplicationSubmitter submitter = new ApplicationSubmitter() {
        @Override
        public ProcessController<YarnApplicationReport> submit(YarnLaunchContext context) {
            ContainerLaunchContext launchContext = context.getLaunchContext();

            appSubmissionContext.setAMContainerSpec(launchContext);
            appSubmissionContext.setResource(capability);
            appSubmissionContext.setMaxAppAttempts(2);

            try {
                yarnClient.submitApplication(appSubmissionContext);
                return new ProcessControllerImpl(yarnClient, appId);
            } catch (Exception e) {
                LOG.error("Failed to submit application {}", appId, e);
                throw Throwables.propagate(e);
            }
        }
    };

    return new ApplicationMasterProcessLauncher(appMasterInfo, submitter);
}

From source file:org.apache.twill.yarn.PlacementPolicyTestRun.java

License:Apache License

/**
 * Verify the cluster configuration (number and capability of node managers) required for the tests.
 *///  w  w  w . ja v a  2s .c  o  m
@BeforeClass
public static void verifyClusterCapability() throws InterruptedException {
    // Ignore verifications if it is running against older Hadoop versions which does not support blacklists.
    Assume.assumeTrue(YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_22));

    // All runnables in this test class use same resource specification for the sake of convenience.
    resource = ResourceSpecification.Builder.with().setVirtualCores(RUNNABLE_CORES)
            .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA).build();
    twoInstancesResource = ResourceSpecification.Builder.with().setVirtualCores(RUNNABLE_CORES)
            .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA).setInstances(2).build();

    // The tests need exactly three NodeManagers in the cluster.
    int trials = 0;
    while (trials++ < 20) {
        try {
            nodeReports = TWILL_TESTER.getNodeReports();
            if (nodeReports != null && nodeReports.size() == 3) {
                break;
            }
        } catch (Exception e) {
            LOG.error("Failed to get node reports", e);
        }
        LOG.warn("NodeManagers != 3. {}", nodeReports);
        TimeUnit.SECONDS.sleep(1);
    }

    // All NodeManagers should have enough capacity available to accommodate at least two runnables.
    for (NodeReport nodeReport : nodeReports) {
        Resource capability = nodeReport.getCapability();
        Resource used = nodeReport.getUsed();
        Assert.assertNotNull(capability);
        if (used != null) {
            Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory() - used.getMemory());
        } else {
            Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory());
        }
    }
}

From source file:org.apache.twill.yarn.PlacementPolicyTestRun.java

License:Apache License

/**
 * Helper function to verify DISTRIBUTED placement policies.
 * Returns the number of NodeManagers on which runnables got provisioned.
 * @return number of NodeManagers on which runnables got provisioned.
 *//*from w w w . j a  v a2  s. c o  m*/
private int getProvisionedNodeManagerCount() throws Exception {
    int provisionedNodeManagerCount = 0;
    for (NodeReport nodeReport : getNodeReports()) {
        Resource used = nodeReport.getUsed();
        if (used != null && used.getMemory() > 0) {
            provisionedNodeManagerCount++;
        }
    }
    return provisionedNodeManagerCount;
}

From source file:org.springframework.yarn.boot.MockUtils.java

License:Apache License

public static Resource getMockResource(int mem, int cores) {
    Resource resource = mock(Resource.class);
    when(resource.getMemory()).thenReturn(mem);
    when(resource.getVirtualCores()).thenReturn(cores);
    return resource;
}

From source file:probos.TestKittenUtils2.java

License:Open Source License

@Test
public void testLuaMemory() throws Exception {
    PBSJob job;//from www.  j a v  a2 s  .c o  m
    int mem = 32000;
    job = UtilsForTest.getSimpleJob("testLuaMemory", "#PBS -l nodes=1:mem=" + mem);

    LuaApplicationMasterParameters lamp = testJobCompilesMaster(job);
    List<ContainerLaunchParameters> clpI = lamp.getContainerLaunchParameters();
    assertEquals(1, clpI.size());
    ContainerLaunchParameters clpTask = clpI.get(0);
    Resource MAX = Records.newRecord(Resource.class);
    Resource r;
    MAX.setMemory(Integer.MAX_VALUE);
    MAX.setVirtualCores(100);
    r = clpTask.getContainerResource(MAX);
    assertEquals(mem, r.getMemory());
    assertEquals(1, r.getVirtualCores());

    MAX.setMemory(8192);
    MAX.setVirtualCores(100);
    r = clpTask.getContainerResource(MAX);
    assertEquals(8192, r.getMemory());
    assertEquals(1, r.getVirtualCores());

}

From source file:tachyon.yarn.ApplicationMaster.java

License:Apache License

public void requestContainers() throws Exception {
    // Priority for Tachyon master and worker containers - priorities are intra-application
    Priority priority = Records.newRecord(Priority.class);
    priority.setPriority(0);/*w  w  w .j av  a 2s  . c  o  m*/

    // Resource requirements for master containers
    Resource masterResource = Records.newRecord(Resource.class);
    masterResource.setMemory(mMasterMemInMB);
    masterResource.setVirtualCores(mMasterCpu);

    String[] nodes = { mMasterAddress };

    // Make container request for Tachyon master to ResourceManager
    boolean relaxLocality = true;
    if (!mMasterAddress.equals("localhost")) {
        relaxLocality = false;
    }
    ContainerRequest masterContainerAsk = new ContainerRequest(masterResource, nodes, null /* any racks */,
            priority, relaxLocality);
    LOG.info("Making resource request for Tachyon master: cpu {} memory {} MB on node {}",
            masterResource.getVirtualCores(), masterResource.getMemory(), mMasterAddress);
    mRMClient.addContainerRequest(masterContainerAsk);

    // Wait until Tachyon master container has been allocated
    while (!mMasterContainerAllocated) {
        Thread.sleep(1000);
    }

    // Resource requirements for master containers
    Resource workerResource = Records.newRecord(Resource.class);
    workerResource.setMemory(mWorkerMemInMB + mRamdiskMemInMB);
    workerResource.setVirtualCores(mWorkerCpu);

    // Make container requests for workers to ResourceManager
    for (int i = 0; i < mNumWorkers; i++) {
        ContainerRequest containerAsk = new ContainerRequest(workerResource, null /* any hosts */,
                null /* any racks */, priority);
        LOG.info("Making resource request for Tachyon worker {}: cpu {} memory {} MB on any nodes", i,
                workerResource.getVirtualCores(), workerResource.getMemory());
        mRMClient.addContainerRequest(containerAsk);
    }

    // Wait until all Tachyon worker containers have been allocated
    while (mNumAllocatedWorkerContainers < mNumWorkers) {
        Thread.sleep(1000);
    }

    LOG.info("Master and workers are launched");
    // Wait for 5 more seconds to avoid application unregistered before some container fully
    // launched.
    while (!mApplicationDone) {
        Thread.sleep(5000);
    }
}

From source file:uk.gov.gchq.gaffer.slider.util.AppConfigGenerator.java

License:Apache License

private AvailableResources getYarnResources() throws IOException, YarnException {
    final Configuration config = new Configuration();
    final YarnClient yarn = YarnClient.createYarnClient();
    yarn.init(config);/*from   ww  w  . j a  v a2  s. c  o  m*/
    yarn.start();

    // Query YARN to find out the largest container it is capable of scheduling
    final YarnClientApplication app = yarn.createApplication();
    final Resource resources = app.getNewApplicationResponse().getMaximumResourceCapability();

    // Also find out how many nodes there are in the cluster by asking for the number of registered Node Managers
    final YarnClusterMetrics metrics = yarn.getYarnClusterMetrics();

    yarn.close();

    return new AvailableResources(resources.getVirtualCores(), resources.getMemory(),
            metrics.getNumNodeManagers());
}

From source file:yarnkit.container.AbstractContainerLaunchParameters.java

License:Apache License

@Override
public Resource getContainerResource(@Nonnull Resource capability) {
    Resource resource = Records.newRecord(Resource.class);
    resource.setVirtualCores(Math.min(capability.getVirtualCores(), getVirtualCores()));
    resource.setMemory(Math.min(capability.getMemory(), getMemory()));
    return resource;
}