Example usage for org.apache.hadoop.yarn.conf YarnConfiguration NM_PMEM_MB

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration NM_PMEM_MB

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration NM_PMEM_MB.

Prototype

String NM_PMEM_MB

To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration NM_PMEM_MB.

Click Source Link

Document

Amount of memory in MB that can be allocated for containers.

Usage

From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java

License:Apache License

@Test
public void testResourceRejections() throws Exception {
    try {//from w w w.  j  av a  2s  .c  om
        Configuration conf = createMiniYarnConfig(true);
        conf.setInt(YarnConfiguration.NM_VCORES, 1);
        conf.setInt(YarnConfiguration.NM_PMEM_MB, 4096);
        conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 2);
        conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 5020);
        startYarn(conf, 1);
        Configuration llamaConf = getLlamaConfiguration();
        llamaConf.setBoolean(LlamaAM.NORMALIZING_ENABLED_KEY, false);
        LlamaAM llama = LlamaAM.create(llamaConf);
        try {
            llama.start();
            List<NodeInfo> nodes = llama.getNodes();

            //invalid node
            try {
                Resource r = TestUtils.createResource("xyz:-1", Resource.Locality.MUST, 1, 4096);
                llama.reserve(TestUtils.createReservation(UUID.randomUUID(), "u", "root.queue1",
                        Arrays.asList(r), true));
                Assert.fail();
            } catch (LlamaException ex) {
                //NOP
            }

            //over max cpus
            try {
                Resource r = TestUtils.createResource(nodes.get(0).getLocation(), Resource.Locality.MUST, 3,
                        4096);
                llama.reserve(TestUtils.createReservation(UUID.randomUUID(), "u", "root.queue1",
                        Arrays.asList(r), true));
                Assert.fail();
            } catch (LlamaException ex) {
                //NOP
            }

            //over max memory
            try {
                Resource r = TestUtils.createResource(nodes.get(0).getLocation(), Resource.Locality.MUST, 1,
                        4097);
                llama.reserve(TestUtils.createReservation(UUID.randomUUID(), "u", "root.queue1",
                        Arrays.asList(r), true));
                Assert.fail();
            } catch (LlamaException ex) {
                //NOP
            }

            //over node cpus
            try {
                Resource r = TestUtils.createResource(nodes.get(0).getLocation(), Resource.Locality.MUST, 2,
                        4096);
                llama.reserve(TestUtils.createReservation(UUID.randomUUID(), "u", "root.queue1",
                        Arrays.asList(r), true));
                Assert.fail();
            } catch (LlamaException ex) {
                //NOP
            }

            //over node memory
            try {
                Resource r = TestUtils.createResource(nodes.get(0).getLocation(), Resource.Locality.MUST, 1,
                        5021);
                llama.reserve(TestUtils.createReservation(UUID.randomUUID(), "u", "root.queue1",
                        Arrays.asList(r), true));
                Assert.fail();
            } catch (LlamaException ex) {
                //NOP
            }

        } finally {
            llama.stop();
        }
    } finally {
        stopYarn();
    }
}

From source file:com.cloudera.llama.nm.LlamaNMServer.java

License:Apache License

@Override
protected void startService() {
    int memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB);
    int virtualCores = conf.getInt(YarnConfiguration.NM_VCORES, YarnConfiguration.DEFAULT_NM_VCORES);
    totalCapacity = Resource.newInstance(memoryMb, virtualCores);

    try {/*ww  w. j a v  a 2  s  .  c o m*/
        clientNotificationService = new ClientNotificationService(getServerConf(), null, getMetricRegistry());
        clientNotificationService.start();
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:de.huberlin.wbi.hiway.am.WorkflowDriver.java

License:Apache License

/**
 * Main run function for the application master. Does more initialization (sic!).
 * Calls the abstract {@link #parseWorkflow()}, then {@link #executeWorkflow()} and finally {@link #finish()}.
 * @return True if there were no errors/*from  w  w w. ja  va  2s. c o m*/
 */
protected boolean run() throws IOException {
    /* log */ Logger.writeToStdout("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    try (DataOutputBuffer dob = new DataOutputBuffer()) {

        credentials.writeTokenStorageToStream(dob);
        // remove the AM->RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

        // Resource Manager communications setup
        RMCallbackHandler allocListener = new RMCallbackHandler(this);
        amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
        amRMClient.init(conf);
        amRMClient.start();

        // Node Managers communications setup
        containerListener = new NMCallbackHandler(this);
        nmClientAsync = new NMClientAsyncImpl(containerListener);
        nmClientAsync.init(conf);
        nmClientAsync.start();

        // get workflow file
        if (hdfs.exists(workflowPath)) {
            Path localPath = new Path(workflowPath.getName());
            hdfs.copyToLocalFile(false, workflowPath, localPath);
            workflowPath = localPath;
            workflowFile = new Data(workflowPath);
            workflowFile.stageOut();
        } else {
            // TODO this doesn't work; the path is triggered when running the application e.g., as hiway workflows/test.dax
            // but stageIn then fails, because in the HDFS, there is only test.dax and not workflows/test.dax
            workflowFile = new Data(workflowPath);
            workflowFile.stageIn();
        }

        // Register self with ResourceManager. This will start heartbeating to the RM.
        /* the hostname of the container running the Hi-WAY ApplicationMaster */
        String appMasterHostname = NetUtils.getHostname();
        /* the port on which the ApplicationMaster listens for status updates from clients */
        int appMasterRpcPort = -1;
        /* the tracking URL to which the ApplicationMaster publishes info for clients to monitor */
        String appMasterTrackingUrl = "";
        RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
                appMasterRpcPort, appMasterTrackingUrl);

        // initialize scheduler
        switch (schedulerEnumValue) {
        case roundRobin:
        case heft:
            int workerMemory = conf.getInt(YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB);
            scheduler = schedulerEnumValue.equals(HiWayConfiguration.HIWAY_SCHEDULERS.roundRobin)
                    ? new RoundRobin(getWorkflowName())
                    : new HEFT(getWorkflowName(), workerMemory / containerMemory);
            break;
        case greedy:
            scheduler = new GreedyQueue(getWorkflowName());
            break;
        case memoryAware:
            scheduler = new MemoryAware(getWorkflowName(), amRMClient);
            break;
        case perfectDaxGQ:
            scheduler = new PerfectDaxGreedyQueue(getWorkflowName());
            break;
        default:
            C3PO c3po = new C3PO(getWorkflowName());
            switch (schedulerEnumValue) {
            case dataAware:
                c3po.setConservatismWeight(0.01d);
                c3po.setnClones(0);
                c3po.setPlacementAwarenessWeight(12d);
                c3po.setOutlookWeight(0.01d);
                break;
            default:
                c3po.setConservatismWeight(3d);
                c3po.setnClones(2);
                c3po.setPlacementAwarenessWeight(1d);
                c3po.setOutlookWeight(2d);
            }
            scheduler = c3po;
        }
        scheduler.init(conf, hdfs, containerMemory, customMemoryMap, containerCores, requestPriority);
        scheduler.initializeProvenanceManager();

        /* log */ logger.writeEntryToLog(new JsonReportEntry(getRunId(), null, null, null, null, null,
                HiwayDBI.KEY_WF_NAME, getWorkflowName()));
        logger.federatedReport = new Data(appId + ".log");

        // parse workflow, obtain ready tasks
        Collection<TaskInstance> readyTasks = parseWorkflow();

        // scheduler updates runtime estimates for all tasks comprising the workflow
        scheduler.updateRuntimeEstimates(getRunId().toString());

        scheduler.addTasks(readyTasks);

        // Dump out information about cluster capability as seen by the resource manager
        maxMem = response.getMaximumResourceCapability().getMemory();
        maxCores = response.getMaximumResourceCapability().getVirtualCores();
        /* log */ Logger.writeToStdout("Max mem capabililty of resources in this cluster " + maxMem);

        // A resource ask cannot exceed the max.
        if (containerMemory > maxMem) {
            /* log */ Logger.writeToStdout("Container memory specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem);
            containerMemory = maxMem;
        }
        if (containerCores > maxCores) {
            /* log */ Logger.writeToStdout("Container vcores specified above max threshold of cluster."
                    + " Using max value." + ", specified=" + containerCores + ", max=" + maxCores);
            containerCores = maxCores;
        }

        // this is the actual work loop:
        // ask for resources until the workflow is done.
        executeWorkflow();

        finish();

    } catch (Exception e) {
        e.printStackTrace(System.out);
        System.exit(-1);
    }
    return success;
}

From source file:org.apache.flink.yarn.YARNSessionFIFOITCase.java

License:Apache License

@BeforeClass
public static void setup() {
    yarnConfiguration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    yarnConfiguration.setInt(YarnConfiguration.NM_PMEM_MB, 768);
    yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    yarnConfiguration.set(YarnTestBase.TEST_CLUSTER_NAME_KEY, "flink-yarn-tests-fifo");
    startYARNWithConfig(yarnConfiguration);
}

From source file:org.apache.flink.yarn.YARNSessionFIFOSecuredITCase.java

License:Apache License

@BeforeClass
public static void setup() {

    LOG.info("starting secure cluster environment for testing");

    yarnConfiguration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    yarnConfiguration.setInt(YarnConfiguration.NM_PMEM_MB, 768);
    yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    yarnConfiguration.set(YarnTestBase.TEST_CLUSTER_NAME_KEY, "flink-yarn-tests-fifo-secured");

    SecureTestEnvironment.prepare(tmp);//from  w ww . j ava  2  s . c o  m

    populateYarnSecureConfigurations(yarnConfiguration, SecureTestEnvironment.getHadoopServicePrincipal(),
            SecureTestEnvironment.getTestKeytab());

    Configuration flinkConfig = new Configuration();
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, SecureTestEnvironment.getTestKeytab());
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL,
            SecureTestEnvironment.getHadoopServicePrincipal());

    SecurityUtils.SecurityConfiguration ctx = new SecurityUtils.SecurityConfiguration(flinkConfig,
            yarnConfiguration);
    try {
        TestingSecurityContext.install(ctx, SecureTestEnvironment.getClientSecurityConfigurationMap());

        SecurityUtils.getInstalledContext().runSecured(new Callable<Object>() {
            @Override
            public Integer call() {
                startYARNSecureMode(yarnConfiguration, SecureTestEnvironment.getHadoopServicePrincipal(),
                        SecureTestEnvironment.getTestKeytab());
                return null;
            }
        });

    } catch (Exception e) {
        throw new RuntimeException("Exception occurred while setting up secure test context. Reason: {}", e);
    }

}