Example usage for org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt RMAppAttempt getAppAttemptId

List of usage examples for org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt RMAppAttempt getAppAttemptId

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt RMAppAttempt getAppAttemptId.

Prototype

ApplicationAttemptId getAppAttemptId();

Source Link

Document

Get the application attempt id for this RMAppAttempt .

Usage

From source file:io.hops.ha.common.TransactionStateImpl.java

License:Apache License

public void addAppAttempt(RMAppAttempt appAttempt) {
    this.appAttempts.put(appAttempt.getAppAttemptId().toString(), appAttempt);
}

From source file:io.hops.ha.common.TransactionStateImpl.java

License:Apache License

private void persistAppAttempt() throws IOException {
    if (!appAttempts.isEmpty()) {
        ApplicationAttemptStateDataAccess DA = (ApplicationAttemptStateDataAccess) RMStorageFactory
                .getDataAccess(ApplicationAttemptStateDataAccess.class);
        List<ApplicationAttemptState> toAdd = new ArrayList<ApplicationAttemptState>();
        for (String appAttemptIdStr : appAttempts.keySet()) {
            RMAppAttempt appAttempt = appAttempts.get(appAttemptIdStr);
            String appIdStr = appAttempt.getAppAttemptId().getApplicationId().toString();

            Credentials credentials = appAttempt.getCredentials();
            ByteBuffer appAttemptTokens = null;

            if (credentials != null) {
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.writeTokenStorageToStream(dob);
                appAttemptTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            }//  ww w. j  av a2s.c o m
            ApplicationAttemptStateDataPBImpl attemptStateData = (ApplicationAttemptStateDataPBImpl) ApplicationAttemptStateDataPBImpl
                    .newApplicationAttemptStateData(appAttempt.getAppAttemptId(),
                            appAttempt.getMasterContainer(), appAttemptTokens, appAttempt.getStartTime(),
                            appAttempt.getState(), appAttempt.getOriginalTrackingUrl(),
                            appAttempt.getDiagnostics(), appAttempt.getFinalApplicationStatus(),
                            appAttempt.getRanNodes(), appAttempt.getJustFinishedContainers(),
                            appAttempt.getProgress(), appAttempt.getHost(), appAttempt.getRpcPort());

            byte[] attemptIdByteArray = attemptStateData.getProto().toByteArray();
            LOG.debug("adding appAttempt : " + appAttempt.getAppAttemptId() + " with state "
                    + appAttempt.getState());
            toAdd.add(new ApplicationAttemptState(appIdStr, appAttemptIdStr, attemptIdByteArray,
                    appAttempt.getHost(), appAttempt.getRpcPort(), appAttemptTokens,
                    appAttempt.getTrackingUrl()));
        }
        DA.addAll(toAdd);
    }
}

From source file:io.hops.metadata.util.TestFairSchedulerUtilities.java

License:Apache License

@Test
public void TestSimpleFairShareCalculation() throws Exception {
    MockRM rm = new MockRM(conf);
    rm.start();/*from w w w  . java 2  s .  co m*/

    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
    MockNM nm2 = rm.registerNode("127.0.0.1:1234", 6 * GB);

    //submit an application of 2GB memory
    RMApp app1 = rm.submitApp(3 * GB, "", "user1", null, "queue1");
    RMApp app2 = rm.submitApp(1 * GB, "", "user2", null, "queue2");

    nm1.nodeHeartbeat(true);

    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
    RMAppAttempt attempt2 = app2.getCurrentAppAttempt();

    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();

    nm1.nodeHeartbeat(true);

    MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
    am2.registerAppAttempt();

    Thread.sleep(3000);

    //get the Scheduler
    FairScheduler fairScheduler = (FairScheduler) rm.getResourceScheduler();

    Collection<FSLeafQueue> queues = fairScheduler.getQueueManager().getLeafQueues();
    assertEquals(3, queues.size());

    rm.stop();

}

From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java

License:Apache License

@Test(timeout = 30000)
public void random() throws Exception {
    MockRM rm = new MockRM(conf);
    rm.start();/*w w w . j  a  v  a 2  s .co  m*/
    //register one NodeManager
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);

    //submit an application of 2GB memory
    RMApp app1 = rm.submitApp(3 * GB, "", "user1", null, "queue1");
    RMApp app2 = rm.submitApp(3 * GB, "", "user2", null, "queue2");

    /**
     * ************************
     * THIS TEST PASSES AS IT IS IF WE INCREASE THE MEMORY OF ONE OF THE 2 APPS,
     * THEN IT WILL FAIL AS THE
     * ENTIRE CLUSTER CAPACITY IS 6 GB
     * APPATTEMPT STATE WILL NOT BE ALLOCATED AND AN EXCEPTION WILL BE THROWN
     *************************
     */
    nm1.nodeHeartbeat(true);

    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
    RMAppAttempt attempt2 = app2.getCurrentAppAttempt();

    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();

    MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
    am2.registerAppAttempt();
    Thread.sleep(2000);
    rm.stop();
    Thread.sleep(2000);

}

From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java

License:Apache License

@Test(timeout = 60000)
public void test() throws Exception {
    MockRM rm = new MockRM(conf);
    rm.start();/*from  w  w  w . j  a  v  a  2s. c o  m*/
    //register two NodeManagers
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
    MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);
    Thread.sleep(1000);
    //verify that they are persisted in the db
    List<FiCaSchedulerNode> dbNodes = RMUtilities.getAllFiCaSchedulerNodes();
    assertEquals(2, dbNodes.size());

    //submit an application of 2GB memory
    RMApp app1 = rm.submitApp(2048);
    Thread.sleep(2000);
    //at this point tables :
    //ha_fifoscheduler_apps,
    //ha_schedulerapplication
    //should container one row for the specific app. You can verify that looking at the db.

    //also the tables :
    //ha_appschedulinginfo,
    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();

    //get the scheduler
    FifoScheduler fifoScheduler = (FifoScheduler) rm.getResourceScheduler();

    //get applications map
    Map<ApplicationId, org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication> apps = fifoScheduler
            .getSchedulerApplications();
    //get nodes map
    Map<NodeId, org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode> nodes = fifoScheduler
            .getNodes();

    //get current application
    org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication sA = apps
            .get(app1.getApplicationId());
    //get currentApplicationAttemptId
    SchedulerApplicationAttempt saAttempt = sA.getCurrentAppAttempt();

    List<org.apache.hadoop.yarn.api.records.ResourceRequest> reqs = saAttempt.getAppSchedulingInfo()
            .getAllResourceRequests();
    //retrieve requests from the database
    Map<String, List<ResourceRequest>> requests = RMUtilities.getAllResourceRequests();
    List<ResourceRequest> dbReqs = requests.get(attempt1.getAppAttemptId().toString());

    //compare
    assertEquals(reqs.size(), dbReqs.size());

    //its time to kick the scheduling, 2GB given to AM1, remaining 4GB on nm1
    nm1.nodeHeartbeat(true);
    //Thread.sleep(1000);

    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();

    SchedulerNodeReport report_nm1 = fifoScheduler.getNodeReport(nm1.getNodeId());
    Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());

    //retrieve used Resource from database
    Resource resource = RMUtilities.getResource(nm1.getNodeId().toString(), Resource.USED,
            Resource.FICASCHEDULERNODE);
    assertEquals(2 * GB, resource.getMemory());

    //get newlyAllocatedContainers
    List<RMContainer> newlyAllocatedContainers = saAttempt.getNewlyAllocatedContainers();
    //get launchedContainers
    org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode ficaNode = nodes
            .get(nm1.getNodeId());
    List<RMContainer> launchedContainers = ficaNode.getRunningContainers();

    //get liveContainers
    Map<ContainerId, RMContainer> liveContainers = saAttempt.getLiveContainersMap();

    //retrieve newlyAllocatedContainers from the database
    List<FiCaSchedulerAppNewlyAllocatedContainers> dbNewlyAlCont = RMUtilities
            .getNewlyAllocatedContainers(attempt1.getAppAttemptId().toString());
    //retrieve launchedContainers from the database
    Map<String, List<LaunchedContainers>> map = RMUtilities.getAllLaunchedContainers();
    List<LaunchedContainers> dbLaunchCont = map.get(nm1.getNodeId().toString());
    //retrieve liveContainers from the database
    Map<String, List<FiCaSchedulerAppLiveContainers>> mapLiveCont = RMUtilities.getAllLiveContainers();
    List<FiCaSchedulerAppLiveContainers> dbLiveCont = mapLiveCont.get(attempt1.getAppAttemptId().toString());

    assertEquals(newlyAllocatedContainers.size(), dbNewlyAlCont.size());
    assertEquals(launchedContainers.size(), dbLaunchCont.size());
    assertEquals(liveContainers.size(), dbLiveCont.size());

    //submit a second application of 2GB memory
    RMApp app2 = rm.submitApp(2048);
    // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
    nm2.nodeHeartbeat(true);
    RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
    MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
    am2.registerAppAttempt();
    SchedulerNodeReport report_nm2 = fifoScheduler.getNodeReport(nm2.getNodeId());
    Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

    //retrieve used Resource from database
    Resource hopResource2 = RMUtilities.getResource(nm2.getNodeId().toString(), Resource.USED,
            Resource.FICASCHEDULERNODE);
    assertEquals(2 * GB, hopResource2.getMemory());

    // add request for containers
    am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
    AllocateResponse alloc1Response = am1.schedule(); // send the request

    // add request for containers
    am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
    AllocateResponse alloc2Response = am2.schedule(); // send the request

    // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0 GB to nm1
    nm1.nodeHeartbeat(true);
    //Thread.sleep(1000);
    while (alloc1Response.getAllocatedContainers().size() < 1) {
        LOG.info("Waiting for containers to be created for app 1...");
        Thread.sleep(1000);
        alloc1Response = am1.schedule();
    }
    while (alloc2Response.getAllocatedContainers().size() < 1) {
        LOG.info("Waiting for containers to be created for app 2...");
        Thread.sleep(1000);
        alloc2Response = am2.schedule();
    }
    // kick the scheduler, nothing given remaining 2 GB to nm2
    nm2.nodeHeartbeat(true);
    Thread.sleep(1000);

    List<Container> allocated1 = alloc1Response.getAllocatedContainers();
    Assert.assertEquals(1, allocated1.size());
    Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
    Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());

    List<Container> allocated2 = alloc2Response.getAllocatedContainers();
    Assert.assertEquals(1, allocated2.size());
    Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
    Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());

    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
    report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
    Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
    Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory());

    Resource nm1AvailableResource = RMUtilities.getResource(nm1.getNodeId().toString(), Resource.AVAILABLE,
            Resource.FICASCHEDULERNODE);
    Resource nm2AvailableResource = RMUtilities.getResource(nm2.getNodeId().toString(), Resource.AVAILABLE,
            Resource.FICASCHEDULERNODE);

    assertEquals(0, nm1AvailableResource.getMemory());
    assertEquals(2 * GB, nm2AvailableResource.getMemory());

    Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory());
    Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

    Resource nm1UsedResource = RMUtilities.getResource(nm1.getNodeId().toString(), Resource.USED,
            Resource.FICASCHEDULERNODE);
    Resource nm2UsedResource = RMUtilities.getResource(nm2.getNodeId().toString(), Resource.USED,
            Resource.FICASCHEDULERNODE);

    assertEquals(6 * GB, nm1UsedResource.getMemory());
    assertEquals(2 * GB, nm2UsedResource.getMemory());

    Thread.sleep(2000);
    rm.stop();
    Thread.sleep(2000);
}