Example usage for org.apache.hadoop.yarn.client.api YarnClient init

List of usage examples for org.apache.hadoop.yarn.client.api YarnClient init

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.client.api YarnClient init.

Prototype

@Override
public void init(Configuration conf) 

Source Link

Document

This invokes #serviceInit

Usage

From source file:com.datatorrent.stram.security.StramUserLogin.java

License:Apache License

public static long refreshTokens(long tokenLifeTime, String destinationDir, String destinationFile,
        final Configuration conf, String hdfsKeyTabFile, final Credentials credentials,
        final InetSocketAddress rmAddress, final boolean renewRMToken) throws IOException {
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    //renew tokens
    final String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
    }//w  ww .j a  va  2s.c o m
    FileSystem fs = FileSystem.newInstance(conf);
    File keyTabFile;
    try {
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);
    } finally {
        fs.close();
    }
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            UserGroupInformation.getCurrentUser().getUserName(), keyTabFile.getAbsolutePath());
    try {
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                FileSystem fs1 = FileSystem.newInstance(conf);
                YarnClient yarnClient = null;
                if (renewRMToken) {
                    yarnClient = YarnClient.createYarnClient();
                    yarnClient.init(conf);
                    yarnClient.start();
                }
                Credentials creds = new Credentials();
                try {
                    fs1.addDelegationTokens(tokenRenewer, creds);
                    if (renewRMToken) {
                        org.apache.hadoop.yarn.api.records.Token rmDelToken = yarnClient
                                .getRMDelegationToken(new Text(tokenRenewer));
                        Token<RMDelegationTokenIdentifier> rmToken = ConverterUtils.convertFromYarn(rmDelToken,
                                rmAddress);
                        creds.addToken(rmToken.getService(), rmToken);
                    }
                } finally {
                    fs1.close();
                    if (renewRMToken) {
                        yarnClient.stop();
                    }
                }
                credentials.addAll(creds);
                return null;
            }
        });
        UserGroupInformation.getCurrentUser().addCredentials(credentials);
    } catch (InterruptedException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    } catch (IOException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    }
    LOG.debug("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("updated token: {}", token);
    }
    keyTabFile.delete();
    return expiryTime;
}

From source file:com.datatorrent.stram.StreamingAppMasterService.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from w ww. j a  v a 2 s  .c o  m
 */
@SuppressWarnings("SleepWhileInLoop")
private void execute() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    LOG.info("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("token: {}", token);
    }
    final Configuration conf = getConfig();
    long tokenLifeTime = (long) (dag.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * Math
            .min(dag.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME), dag.getValue(LogicalPlan.RM_TOKEN_LIFE_TIME)));
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    LOG.debug(" expiry token time {}", tokenLifeTime);
    String hdfsKeyTabFile = dag.getValue(LogicalPlan.KEY_TAB_FILE);

    // Register self with ResourceManager
    RegisterApplicationMasterResponse response = amRmClient.registerApplicationMaster(appMasterHostname, 0,
            appMasterTrackingUrl);

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    int maxVcores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max mem {}m and vcores {} capabililty of resources in this cluster ", maxMem, maxVcores);

    // for locality relaxation fall back
    Map<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> requestedResources = Maps
            .newHashMap();

    // Setup heartbeat emitter
    // TODO poll RM every now and then with an empty request to let RM know that we are alive
    // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting:
    // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS
    // The allocate calls to the RM count as heartbeat so, for now, this additional heartbeat emitter
    // is not required.

    int loopCounter = -1;
    List<ContainerId> releasedContainers = new ArrayList<ContainerId>();
    int numTotalContainers = 0;
    // keep track of already requested containers to not request them again while waiting for allocation
    int numRequestedContainers = 0;
    int numReleasedContainers = 0;
    int nextRequestPriority = 0;
    ResourceRequestHandler resourceRequestor = new ResourceRequestHandler();

    YarnClient clientRMService = YarnClient.createYarnClient();

    try {
        // YARN-435
        // we need getClusterNodes to populate the initial node list,
        // subsequent updates come through the heartbeat response
        clientRMService.init(conf);
        clientRMService.start();

        ApplicationReport ar = StramClientUtils.getStartedAppInstanceByName(clientRMService,
                dag.getAttributes().get(DAG.APPLICATION_NAME),
                UserGroupInformation.getLoginUser().getUserName(), dag.getAttributes().get(DAG.APPLICATION_ID));
        if (ar != null) {
            appDone = true;
            dnmgr.shutdownDiagnosticsMessage = String.format(
                    "Application master failed due to application %s with duplicate application name \"%s\" by the same user \"%s\" is already started.",
                    ar.getApplicationId().toString(), ar.getName(), ar.getUser());
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finishApplication(FinalApplicationStatus.FAILED, numTotalContainers);
            return;
        }
        resourceRequestor.updateNodeReports(clientRMService.getNodeReports());
    } catch (Exception e) {
        throw new RuntimeException("Failed to retrieve cluster nodes report.", e);
    } finally {
        clientRMService.stop();
    }

    // check for previously allocated containers
    // as of 2.2, containers won't survive AM restart, but this will change in the future - YARN-1490
    checkContainerStatus();
    FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED;
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);

    while (!appDone) {
        loopCounter++;

        if (UserGroupInformation.isSecurityEnabled() && System.currentTimeMillis() >= expiryTime
                && hdfsKeyTabFile != null) {
            String applicationId = appAttemptID.getApplicationId().toString();
            expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, "." + File.separator + "tmp",
                    applicationId, conf, hdfsKeyTabFile, credentials, rmAddress, true);
        }

        Runnable r;
        while ((r = this.pendingTasks.poll()) != null) {
            r.run();
        }

        // log current state
        /*
         * LOG.info("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" +
         * numTotalContainers + ", requested=" + numRequestedContainers + ", completed=" + numCompletedContainers +
         * ", failed=" + numFailedContainers + ", currentAllocated=" + this.allAllocatedContainers.size());
         */
        // Sleep before each loop when asking RM for containers
        // to avoid flooding RM with spurious requests when it
        // need not have any available containers
        try {
            sleep(1000);
        } catch (InterruptedException e) {
            LOG.info("Sleep interrupted " + e.getMessage());
        }

        // Setup request to be sent to RM to allocate containers
        List<ContainerRequest> containerRequests = new ArrayList<ContainerRequest>();
        List<ContainerRequest> removedContainerRequests = new ArrayList<ContainerRequest>();

        // request containers for pending deploy requests
        if (!dnmgr.containerStartRequests.isEmpty()) {
            StreamingContainerAgent.ContainerStartRequest csr;
            while ((csr = dnmgr.containerStartRequests.poll()) != null) {
                if (csr.container.getRequiredMemoryMB() > maxMem) {
                    LOG.warn("Container memory {}m above max threshold of cluster. Using max value {}m.",
                            csr.container.getRequiredMemoryMB(), maxMem);
                    csr.container.setRequiredMemoryMB(maxMem);
                }
                if (csr.container.getRequiredVCores() > maxVcores) {
                    LOG.warn("Container vcores {} above max threshold of cluster. Using max value {}.",
                            csr.container.getRequiredVCores(), maxVcores);
                    csr.container.setRequiredVCores(maxVcores);
                }
                csr.container.setResourceRequestPriority(nextRequestPriority++);
                ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true);
                MutablePair<Integer, ContainerRequest> pair = new MutablePair<Integer, ContainerRequest>(
                        loopCounter, cr);
                requestedResources.put(csr, pair);
                containerRequests.add(cr);
            }
        }

        if (!requestedResources.isEmpty()) {
            //resourceRequestor.clearNodeMapping();
            for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources
                    .entrySet()) {
                if ((loopCounter - entry.getValue().getKey()) > NUMBER_MISSED_HEARTBEATS) {
                    StreamingContainerAgent.ContainerStartRequest csr = entry.getKey();
                    removedContainerRequests.add(entry.getValue().getRight());
                    ContainerRequest cr = resourceRequestor.createContainerRequest(csr, false);
                    entry.getValue().setLeft(loopCounter);
                    entry.getValue().setRight(cr);
                    containerRequests.add(cr);
                }
            }
        }

        numTotalContainers += containerRequests.size();
        numRequestedContainers += containerRequests.size();
        AllocateResponse amResp = sendContainerAskToRM(containerRequests, removedContainerRequests,
                releasedContainers);
        if (amResp.getAMCommand() != null) {
            LOG.info(" statement executed:{}", amResp.getAMCommand());
            switch (amResp.getAMCommand()) {
            case AM_RESYNC:
            case AM_SHUTDOWN:
                throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");
            default:
                throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");

            }
        }
        releasedContainers.clear();

        // Retrieve list of allocated containers from the response
        List<Container> newAllocatedContainers = amResp.getAllocatedContainers();
        // LOG.info("Got response from RM for container ask, allocatedCnt=" + newAllocatedContainers.size());
        numRequestedContainers -= newAllocatedContainers.size();
        long timestamp = System.currentTimeMillis();
        for (Container allocatedContainer : newAllocatedContainers) {

            LOG.info("Got new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode="
                    + allocatedContainer.getNodeId() + ", containerNodeURI="
                    + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
                    + allocatedContainer.getResource().getMemory() + ", priority"
                    + allocatedContainer.getPriority());
            // + ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString());

            boolean alreadyAllocated = true;
            StreamingContainerAgent.ContainerStartRequest csr = null;
            for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources
                    .entrySet()) {
                if (entry.getKey().container.getResourceRequestPriority() == allocatedContainer.getPriority()
                        .getPriority()) {
                    alreadyAllocated = false;
                    csr = entry.getKey();
                    break;
                }
            }

            if (alreadyAllocated) {
                LOG.info("Releasing {} as resource with priority {} was already assigned",
                        allocatedContainer.getId(), allocatedContainer.getPriority());
                releasedContainers.add(allocatedContainer.getId());
                numReleasedContainers++;
                numRequestedContainers++;
                continue;
            }
            if (csr != null) {
                requestedResources.remove(csr);
            }

            // allocate resource to container
            ContainerResource resource = new ContainerResource(allocatedContainer.getPriority().getPriority(),
                    allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString(),
                    allocatedContainer.getResource().getMemory(),
                    allocatedContainer.getResource().getVirtualCores(),
                    allocatedContainer.getNodeHttpAddress());
            StreamingContainerAgent sca = dnmgr.assignContainer(resource, null);

            if (sca == null) {
                // allocated container no longer needed, add release request
                LOG.warn("Container {} allocated but nothing to deploy, going to release this container.",
                        allocatedContainer.getId());
                releasedContainers.add(allocatedContainer.getId());
            } else {
                AllocatedContainer allocatedContainerHolder = new AllocatedContainer(allocatedContainer);
                this.allocatedContainers.put(allocatedContainer.getId().toString(), allocatedContainerHolder);
                ByteBuffer tokens = null;
                if (UserGroupInformation.isSecurityEnabled()) {
                    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                    Token<StramDelegationTokenIdentifier> delegationToken = allocateDelegationToken(
                            ugi.getUserName(), heartbeatListener.getAddress());
                    allocatedContainerHolder.delegationToken = delegationToken;
                    //ByteBuffer tokens = LaunchContainerRunnable.getTokens(delegationTokenManager, heartbeatListener.getAddress());
                    tokens = LaunchContainerRunnable.getTokens(ugi, delegationToken);
                }
                LaunchContainerRunnable launchContainer = new LaunchContainerRunnable(allocatedContainer,
                        nmClient, sca, tokens);
                // Thread launchThread = new Thread(runnableLaunchContainer);
                // launchThreads.add(launchThread);
                // launchThread.start();
                launchContainer.run(); // communication with NMs is now async

                // record container start event
                StramEvent ev = new StramEvent.StartContainerEvent(allocatedContainer.getId().toString(),
                        allocatedContainer.getNodeId().toString());
                ev.setTimestamp(timestamp);
                dnmgr.recordEventAsync(ev);
            }
        }

        // track node updates for future locality constraint allocations
        // TODO: it seems 2.0.4-alpha doesn't give us any updates
        resourceRequestor.updateNodeReports(amResp.getUpdatedNodes());

        // Check the completed containers
        List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses();
        // LOG.debug("Got response from RM for container ask, completedCnt=" + completedContainers.size());
        for (ContainerStatus containerStatus : completedContainers) {
            LOG.info("Completed containerId=" + containerStatus.getContainerId() + ", state="
                    + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus()
                    + ", diagnostics=" + containerStatus.getDiagnostics());

            // non complete containers should not be here
            assert (containerStatus.getState() == ContainerState.COMPLETE);

            AllocatedContainer allocatedContainer = allocatedContainers
                    .remove(containerStatus.getContainerId().toString());
            if (allocatedContainer != null && allocatedContainer.delegationToken != null) {
                UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                delegationTokenManager.cancelToken(allocatedContainer.delegationToken, ugi.getUserName());
            }
            int exitStatus = containerStatus.getExitStatus();
            if (0 != exitStatus) {
                if (allocatedContainer != null) {
                    numFailedContainers.incrementAndGet();
                }
                //          if (exitStatus == 1) {
                //            // non-recoverable StreamingContainer failure
                //            appDone = true;
                //            finalStatus = FinalApplicationStatus.FAILED;
                //            dnmgr.shutdownDiagnosticsMessage = "Unrecoverable failure " + containerStatus.getContainerId();
                //            LOG.info("Exiting due to: {}", dnmgr.shutdownDiagnosticsMessage);
                //          }
                //          else {
                // Recoverable failure or process killed (externally or via stop request by AM)
                // also occurs when a container was released by the application but never assigned/launched
                LOG.debug("Container {} failed or killed.", containerStatus.getContainerId());
                dnmgr.scheduleContainerRestart(containerStatus.getContainerId().toString());
                //          }
            } else {
                // container completed successfully
                numCompletedContainers.incrementAndGet();
                LOG.info("Container completed successfully." + ", containerId="
                        + containerStatus.getContainerId());
            }

            String containerIdStr = containerStatus.getContainerId().toString();
            dnmgr.removeContainerAgent(containerIdStr);

            // record container stop event
            StramEvent ev = new StramEvent.StopContainerEvent(containerIdStr, containerStatus.getExitStatus());
            ev.setReason(containerStatus.getDiagnostics());
            dnmgr.recordEventAsync(ev);
        }

        if (dnmgr.forcedShutdown) {
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finalStatus = FinalApplicationStatus.FAILED;
            appDone = true;
        } else if (allocatedContainers.isEmpty() && numRequestedContainers == 0
                && dnmgr.containerStartRequests.isEmpty()) {
            LOG.debug("Exiting as no more containers are allocated or requested");
            finalStatus = FinalApplicationStatus.SUCCEEDED;
            appDone = true;
        }

        LOG.debug("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total="
                + numTotalContainers + ", requested=" + numRequestedContainers + ", released="
                + numReleasedContainers + ", completed=" + numCompletedContainers + ", failed="
                + numFailedContainers + ", currentAllocated=" + allocatedContainers.size());

        // monitor child containers
        dnmgr.monitorHeartbeat();
    }

    finishApplication(finalStatus, numTotalContainers);
}

From source file:com.flyhz.avengers.framework.application.InitEnvApplication.java

License:Apache License

private void initJar() {
    LOG.info("initJar");
    numTotalContainers = 1;// w  w w .  ja v a2  s . c om
    containerMemory = 10;
    if (numTotalContainers == 0) {
        throw new IllegalArgumentException("Cannot run distributed shell with no containers");
    }
    requestPriority = 0;
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    List<NodeReport> clusterNodeReports;
    try {
        clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        LOG.info("Got all node!");
        for (NodeReport node : clusterNodeReports) {
            nodeSet.add(node.getNodeId().getHost());
        }

        LOG.info("initJar Completed setting up app master command {}", initJarCmd);
        numTotalContainers = nodeSet.size();
        for (int i = 0; i < numTotalContainers; i++) {
            ContainerRequest containerAsk = setupContainerAskForRM();
            amRMClient.addContainerRequest(containerAsk);
        }

        numRequestedContainers.set(numTotalContainers);

    } catch (YarnException e) {
        LOG.error("initJarAndClasspath", e);
    } catch (IOException e) {
        LOG.error("initJarAndClasspath", e);
    } finally {
        try {
            yarnClient.close();
        } catch (IOException e) {
        }
    }
}

From source file:com.flyhz.avengers.framework.AvengersAppMaster.java

License:Apache License

private void initJar() {
    LOG.info("initJar");
    initCommon();/*from w w  w. j a va2  s .c  o m*/
    this.currentProcess = "initJar";
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    List<NodeReport> clusterNodeReports;
    try {
        clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        LOG.info("Got all node!");
        for (NodeReport node : clusterNodeReports) {
            LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                    + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                    + node.getNumContainers());
            nodeSet.add(node.getNodeId().getHost());
        }
        Vector<CharSequence> vargs = new Vector<CharSequence>(30);
        String appTempDir = conf.get("hadoop.tmp.dir");
        FileSystem fs = DistributedFileSystem.get(conf);
        String hdfsJar = fs.getHomeDirectory() + "/avengers/" + this.appAttemptID.getApplicationId().getId()
                + "/AvengersAppMaster.jar";
        vargs.add("if [ ! -e " + appTempDir + "/" + this.appAttemptID.getApplicationId().getId()
                + "/AvengersAppMaster.jar" + " ];");
        vargs.add("then");
        vargs.add("/bin/mkdir -p");
        vargs.add(appTempDir + "/" + this.appAttemptID.getApplicationId().getId() + ";");
        vargs.add(Environment.HADOOP_YARN_HOME.$() + "/bin/hadoop");
        vargs.add("fs");
        vargs.add("-copyToLocal");
        vargs.add(hdfsJar);
        vargs.add(appTempDir + "/" + this.appAttemptID.getApplicationId().getId() + ";");
        vargs.add("fi");
        // Add log redirect params
        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/Avengers.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/Avengers.stderr");

        // Get final commmand
        StringBuilder command = new StringBuilder();
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        initJarCmd = command.toString();
        LOG.info("initJar Completed setting up app master command {}", initJarCmd);
        numTotalContainers = nodeSet.size();
        for (int i = 0; i < numTotalContainers; i++) {
            ContainerRequest containerAsk = setupContainerAskForRM();
            amRMClient.addContainerRequest(containerAsk);
        }

        numRequestedContainers.set(numTotalContainers);

        while (!done && (numCompletedContainers.get() != numTotalContainers)) {
            try {
                Thread.sleep(200);
            } catch (InterruptedException ex) {
            }
        }
    } catch (YarnException e) {
        LOG.error("initJarAndClasspath", e);
    } catch (IOException e) {
        LOG.error("initJarAndClasspath", e);
    } finally {
        try {
            yarnClient.close();
        } catch (IOException e) {
        }
    }
}

From source file:com.github.sakserv.minicluster.simpleyarnapp.Client.java

License:Apache License

public void run(String[] args) throws Exception {
    final String command = args[0];
    final int n = Integer.valueOf(args[1]);
    final Path jarPath = new Path(args[2]);
    final String resourceManagerAddress = args[3];
    final String resourceManagerHostname = args[4];
    final String resourceManagerSchedulerAddress = args[5];
    final String resourceManagerResourceTrackerAddress = args[6];

    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    conf.set("yarn.resourcemanager.address", resourceManagerAddress);
    conf.set("yarn.resourcemanager.hostname", resourceManagerHostname);
    conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress);
    conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();//from w w w  . j  a va 2s.c  o  m

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " com.hortonworks.simpleyarnapp.ApplicationMaster" + " " + command + " " + String.valueOf(n) + " "
            + resourceManagerAddress + " " + resourceManagerHostname + " " + resourceManagerSchedulerAddress
            + " " + resourceManagerResourceTrackerAddress + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
            + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    setupAppMasterJar(jarPath, appMasterJar);
    amContainer.setLocalResources(Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("simple-yarn-app"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();
    System.out.println("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

}

From source file:com.google.mr4c.hadoop.yarn.YarnBinding.java

License:Open Source License

public void addClusterLimits(JobConf jobConf, ResourceConfig resConf) throws IOException {
    if (resConf.isEmpty()) {
        return;/*from   ww w  . ja v a  2  s  . co  m*/
    }
    Configuration conf = new Configuration(jobConf);
    YarnClient client = YarnClient.createYarnClient();
    client.init(conf);
    client.start();
    try {
        for (ResourceLimit limit : computeResourceLimits(client.getNodeReports())) {
            resConf.addLimit(limit);
        }
    } catch (YarnException ye) {
        throw new IOException(ye);
    } finally {
        client.stop();
    }
}

From source file:com.gpiskas.yarn.Client.java

License:Open Source License

private void run() throws Exception {
    conf = new YarnConfiguration();

    // Create Yarn Client
    YarnClient client = YarnClient.createYarnClient();
    client.init(conf);
    client.start();// w w w .j  a v  a  2 s.c o  m

    // Create Application
    YarnClientApplication app = client.createApplication();

    // Create AM Container
    ContainerLaunchContext amCLC = Records.newRecord(ContainerLaunchContext.class);
    amCLC.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " com.gpiskas.yarn.AppMaster" + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
            + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Set AM jar
    LocalResource jar = Records.newRecord(LocalResource.class);
    Utils.setUpLocalResource(Utils.YARNAPP_JAR_PATH, jar, conf);
    amCLC.setLocalResources(Collections.singletonMap(Utils.YARNAPP_JAR_NAME, jar));

    // Set AM CLASSPATH
    Map<String, String> env = new HashMap<String, String>();
    Utils.setUpEnv(env, conf);
    amCLC.setEnvironment(env);

    // Set AM resources
    Resource res = Records.newRecord(Resource.class);
    res.setMemory(256);
    res.setVirtualCores(1);

    // Create ApplicationSubmissionContext
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("YARNAPP");
    appContext.setQueue("default");
    appContext.setAMContainerSpec(amCLC);
    appContext.setResource(res);

    // Submit Application
    ApplicationId id = appContext.getApplicationId();
    System.out.println("Client: Submitting " + id);
    client.submitApplication(appContext);

    ApplicationReport appReport = client.getApplicationReport(id);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(1000);
        appReport = client.getApplicationReport(id);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Client: Finished " + id + " with state " + appState);
}

From source file:com.hazelcast.yarn.HazelcastYarnClient.java

License:Open Source License

private YarnClient startYarnClient(YarnConfiguration conf) {
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();//from   ww w  . ja  v  a  2 s.  co  m
    return yarnClient;
}

From source file:com.ibm.bi.dml.yarn.DMLYarnClient.java

License:Open Source License

/**
 * Method to launch the dml yarn app master and execute the given dml script
 * with the given configuration and jar file.
 * //  www  . j av a2 s . c  o m
 * NOTE: on launching the yarn app master, we do not explicitly probe if we
 *     are running on a yarn or MR1 cluster. In case of MR1, already the class 
 *     YarnConfiguration will not be found and raise a classnotfound. In case of any 
 *     exception we fall back to run CP directly in the client process.
 * 
 * @return true if dml program successfully executed as yarn app master
 * @throws IOException 
 */
protected boolean launchDMLYarnAppmaster() throws IOException, DMLScriptException {
    boolean ret = false;
    String hdfsWD = null;

    try {
        Timing time = new Timing(true);

        // load yarn configuration
        YarnConfiguration yconf = new YarnConfiguration();

        // create yarn client
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(yconf);
        yarnClient.start();

        // create application and get the ApplicationID
        YarnClientApplication app = yarnClient.createApplication();
        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
        ApplicationId appId = appContext.getApplicationId();
        LOG.debug("Created application (applicationID: " + appId + ")");

        // prepare hdfs working directory via ApplicationID
        // copy script, config, jar file to hdfs
        hdfsWD = DMLAppMasterUtils.constructHDFSWorkingDir(_dmlConfig, appId);
        copyResourcesToHdfsWorkingDir(yconf, hdfsWD);

        //construct command line argument
        String command = constructAMCommand(_args, _dmlConfig);
        LOG.debug("Constructed application master command: \n" + command);

        // set up the container launch context for the application master
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
        amContainer.setCommands(Collections.singletonList(command));
        amContainer.setLocalResources(constructLocalResourceMap(yconf));
        amContainer.setEnvironment(constructEnvionmentMap(yconf));

        // Set up resource type requirements for ApplicationMaster
        int memHeap = _dmlConfig.getIntValue(DMLConfig.YARN_APPMASTERMEM);
        int memAlloc = (int) computeMemoryAllocation(memHeap);
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(memAlloc);
        capability.setVirtualCores(NUM_CORES);
        LOG.debug("Requested application resources: memory=" + memAlloc + ", vcores=" + NUM_CORES);

        // Finally, set-up ApplicationSubmissionContext for the application
        String qname = _dmlConfig.getTextValue(DMLConfig.YARN_APPQUEUE);
        appContext.setApplicationName(APPMASTER_NAME); // application name
        appContext.setAMContainerSpec(amContainer);
        appContext.setResource(capability);
        appContext.setQueue(qname); // queue
        LOG.debug("Configured application meta data: name=" + APPMASTER_NAME + ", queue=" + qname);

        // submit application (non-blocking)
        yarnClient.submitApplication(appContext);

        // Check application status periodically (and output web ui address)
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        LOG.info("Application tracking-URL: " + appReport.getTrackingUrl());
        YarnApplicationState appState = appReport.getYarnApplicationState();
        YarnApplicationState oldState = appState;
        LOG.info("Application state: " + appState);
        while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
                && appState != YarnApplicationState.FAILED) {
            Thread.sleep(APP_STATE_INTERVAL); //wait for 200ms
            appReport = yarnClient.getApplicationReport(appId);
            appState = appReport.getYarnApplicationState();
            if (appState != oldState) {
                oldState = appState;
                LOG.info("Application state: " + appState);
            }
        }
        //check final status (failed or succeeded)
        FinalApplicationStatus finalState = appReport.getFinalApplicationStatus();
        LOG.info("Application final status: " + finalState);

        //show application and total runtime
        double appRuntime = (double) (appReport.getFinishTime() - appReport.getStartTime()) / 1000;
        LOG.info("Application runtime: " + appRuntime + " sec.");
        LOG.info("Total runtime: " + String.format("%.3f", time.stop() / 1000) + " sec.");

        //raised script-level error in case of failed final status
        if (finalState != FinalApplicationStatus.SUCCEEDED) {
            //propagate script-level stop call message
            String stop_msg = readMessageToHDFSWorkingDir(_dmlConfig, yconf, appId);
            if (stop_msg != null)
                throw new DMLScriptException(stop_msg);

            //generic failure message
            throw new DMLRuntimeException(
                    "DML yarn app master finished with final status: " + finalState + ".");
        }

        ret = true;
    } catch (DMLScriptException ex) {
        //rethrow DMLScriptException to propagate stop call
        throw ex;
    } catch (Exception ex) {
        LOG.error("Failed to run DML yarn app master.", ex);
        ret = false;
    } finally {
        //cleanup working directory
        if (hdfsWD != null)
            MapReduceTool.deleteFileIfExistOnHDFS(hdfsWD);
    }

    return ret;
}

From source file:com.ibm.bi.dml.yarn.ropt.YarnClusterAnalyzer.java

License:Open Source License

public static void analyzeYarnCluster(boolean verbose) {
    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();/*from   ww  w. j  av  a 2  s  .co m*/
    analyzeYarnCluster(yarnClient, conf, verbose);
}