Example usage for org.apache.hadoop.yarn.api.records ApplicationId getClusterTimestamp

List of usage examples for org.apache.hadoop.yarn.api.records ApplicationId getClusterTimestamp

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ApplicationId getClusterTimestamp.

Prototype

@Public
@Stable
public abstract long getClusterTimestamp();

Source Link

Document

Get the start time of the ResourceManager which is used to generate globally unique ApplicationId.

Usage

From source file:com.continuuity.weave.internal.appmaster.ApplicationMasterProcessLauncher.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
    final ApplicationId appId = getContainerInfo();

    // Set the resource requirement for AM
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(Constants.APP_MASTER_MEMORY_MB);
    YarnUtils.setVirtualCores(capability, 1);

    // Put in extra environments
    Map<String, String> env = ImmutableMap.<String, String>builder().putAll(launchContext.getEnvironment())
            .put(EnvKeys.YARN_APP_ID, Integer.toString(appId.getId()))
            .put(EnvKeys.YARN_APP_ID_CLUSTER_TIME, Long.toString(appId.getClusterTimestamp()))
            .put(EnvKeys.YARN_APP_ID_STR, appId.toString())
            .put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(Constants.APP_MASTER_MEMORY_MB))
            .put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(YarnUtils.getVirtualCores(capability)))
            .build();/*from w  w  w  .  ja  v  a  2s.  co  m*/

    launchContext.setEnvironment(env);
    return (ProcessController<R>) submitter.submit(launchContext, capability);
}

From source file:com.continuuity.weave.yarn.YarnWeavePreparer.java

License:Open Source License

@Override
public WeaveController start() {
    // TODO: Unify this with {@link ProcessLauncher}
    try {//from  ww w  . ja va2  s .c o  m
        GetNewApplicationResponse response = yarnClient.getNewApplication();
        ApplicationId applicationId = response.getApplicationId();

        ApplicationSubmissionContext appSubmissionContext = Records
                .newRecord(ApplicationSubmissionContext.class);
        appSubmissionContext.setApplicationId(applicationId);
        appSubmissionContext.setApplicationName(weaveSpec.getName());

        Map<String, LocalResource> localResources = Maps.newHashMap();

        Multimap<String, LocalFile> transformedLocalFiles = HashMultimap.create();

        createAppMasterJar(createBundler(), localResources);
        createContainerJar(createBundler(), localResources);
        populateRunnableResources(weaveSpec, transformedLocalFiles);
        saveWeaveSpec(weaveSpec, transformedLocalFiles, localResources);
        saveLogback(localResources);
        saveLauncher(localResources);
        saveKafka(localResources);
        saveArguments(arguments, runnableArgs, localResources);
        saveLocalFiles(localResources, ImmutableSet.of("weaveSpec.json", "logback-template.xml",
                "container.jar", "launcher.jar", "arguments.json"));

        ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
        containerLaunchContext.setLocalResources(localResources);

        // java -cp launcher.jar:$HADOOP_CONF_DIR -XmxMemory
        //     com.continuuity.weave.internal.WeaveLauncher
        //     appMaster.jar
        //     com.continuuity.weave.internal.appmaster.ApplicationMasterMain
        //     false
        containerLaunchContext.setCommands(ImmutableList.of("java", "-cp", "launcher.jar:$HADOOP_CONF_DIR",
                "-Xmx" + APP_MASTER_MEMORY_MB + "m", WeaveLauncher.class.getName(), "appMaster.jar",
                ApplicationMasterMain.class.getName(), Boolean.FALSE.toString(),
                " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
                " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

        containerLaunchContext.setEnvironment(ImmutableMap.<String, String>builder()
                .put(EnvKeys.WEAVE_APP_ID, Integer.toString(applicationId.getId()))
                .put(EnvKeys.WEAVE_APP_ID_CLUSTER_TIME, Long.toString(applicationId.getClusterTimestamp()))
                .put(EnvKeys.WEAVE_APP_DIR, getAppLocation().toURI().toASCIIString())
                .put(EnvKeys.WEAVE_ZK_CONNECT, zkClient.getConnectString())
                .put(EnvKeys.WEAVE_RUN_ID, runId.getId()).build());
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(APP_MASTER_MEMORY_MB);
        containerLaunchContext.setResource(capability);

        appSubmissionContext.setAMContainerSpec(containerLaunchContext);

        yarnClient.submitApplication(appSubmissionContext);

        return createController(applicationId, runId, logHandlers);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.mellanox.hadoop.mapred.UdaShuffleHandler.java

License:Apache License

@Override
public void initializeApplication(ApplicationInitializationContext context) {
    LOG.info("starting initializeApplication of UdaShuffleHandler");

    String user = context.getUser();
    ApplicationId appId = context.getApplicationId();

    JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
    //     rdmaChannel = new UdaPluginSH(conf, user, jobId);     
    rdmaChannel.addJob(user, jobId);//  w w  w.j  av a 2s. c  o m
    LOG.info("finished initializeApplication of UdaShuffleHandler");
}

From source file:com.mellanox.hadoop.mapred.UdaShuffleHandler.java

License:Apache License

@Override
public void stopApplication(ApplicationTerminationContext context) {
    ApplicationId appId = context.getApplicationId();
    LOG.info("stopApplication of UdaShuffleHandler");
    JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
    rdmaChannel.removeJob(jobId);/*from   w  w w .  ja  va2 s . c om*/
    LOG.info("stopApplication of UdaShuffleHandler is done");

}

From source file:io.amient.yarn1.YarnClient.java

License:Open Source License

/**
 * This method should be called by the implementing application static main
 * method. It does all the work around creating a yarn application and
 * submitting the request to the yarn resource manager. The class given in
 * the appClass argument will be run inside the yarn-allocated master
 * container.//  w  w  w .j a v  a  2 s.com
 */
public static void submitApplicationMaster(Properties appConfig, Class<? extends YarnMaster> masterClass,
        String[] args, Boolean awaitCompletion) throws Exception {
    log.info("Yarn1 App Configuration:");
    for (Object param : appConfig.keySet()) {
        log.info(param.toString() + " = " + appConfig.get(param).toString());
    }
    String yarnConfigPath = appConfig.getProperty("yarn1.site", "/etc/hadoop");
    String masterClassName = masterClass.getName();
    appConfig.setProperty("yarn1.master.class", masterClassName);
    String applicationName = appConfig.getProperty("yarn1.application.name", masterClassName);
    log.info("--------------------------------------------------------------");

    if (Boolean.valueOf(appConfig.getProperty("yarn1.local.mode", "false"))) {
        YarnMaster.run(appConfig, args);
        return;
    }

    int masterPriority = Integer.valueOf(
            appConfig.getProperty("yarn1.master.priority", String.valueOf(YarnMaster.DEFAULT_MASTER_PRIORITY)));
    int masterMemoryMb = Integer.valueOf(appConfig.getProperty("yarn1.master.memory.mb",
            String.valueOf(YarnMaster.DEFAULT_MASTER_MEMORY_MB)));
    int masterNumCores = Integer.valueOf(
            appConfig.getProperty("yarn1.master.num.cores", String.valueOf(YarnMaster.DEFAULT_MASTER_CORES)));
    String queue = appConfig.getProperty("yarn1.queue");

    Configuration yarnConfig = new YarnConfiguration();
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/core-site.xml"));
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/hdfs-site.xml"));
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/yarn-site.xml"));
    for (Map.Entry<Object, Object> entry : appConfig.entrySet()) {
        yarnConfig.set(entry.getKey().toString(), entry.getValue().toString());
    }

    final org.apache.hadoop.yarn.client.api.YarnClient yarnClient = org.apache.hadoop.yarn.client.api.YarnClient
            .createYarnClient();
    yarnClient.init(yarnConfig);
    yarnClient.start();

    for (NodeReport report : yarnClient.getNodeReports(NodeState.RUNNING)) {
        log.debug("Node report:" + report.getNodeId() + " @ " + report.getHttpAddress() + " | "
                + report.getCapability());
    }

    log.info("Submitting application master class " + masterClassName);

    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    final ApplicationId appId = appResponse.getApplicationId();
    if (appId == null) {
        System.exit(111);
    } else {
        appConfig.setProperty("am.timestamp", String.valueOf(appId.getClusterTimestamp()));
        appConfig.setProperty("am.id", String.valueOf(appId.getId()));
    }

    YarnClient.distributeResources(yarnConfig, appConfig, applicationName);

    String masterJvmArgs = appConfig.getProperty("yarn1.master.jvm.args", "");
    YarnContainerContext masterContainer = new YarnContainerContext(yarnConfig, appConfig, masterJvmArgs,
            masterPriority, masterMemoryMb, masterNumCores, applicationName, YarnMaster.class, args);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName(masterClassName);
    appContext.setResource(masterContainer.capability);
    appContext.setPriority(masterContainer.priority);
    appContext.setQueue(queue);
    appContext.setApplicationType(appConfig.getProperty("yarn1.application.type", "YARN"));
    appContext.setAMContainerSpec(masterContainer.createContainerLaunchContext());

    log.info("Master container spec: " + masterContainer.capability);

    yarnClient.submitApplication(appContext);

    ApplicationReport report = yarnClient.getApplicationReport(appId);
    log.info("Tracking URL: " + report.getTrackingUrl());

    if (awaitCompletion) {
        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                if (!yarnClient.isInState(Service.STATE.STOPPED)) {
                    log.info("Killing yarn application in shutdown hook");
                    try {
                        yarnClient.killApplication(appId);
                    } catch (Throwable e) {
                        log.error("Failed to kill yarn application - please check YARN Resource Manager", e);
                    }
                }
            }
        });

        float lastProgress = -0.0f;
        while (true) {
            try {
                Thread.sleep(10000);
                report = yarnClient.getApplicationReport(appId);
                if (lastProgress != report.getProgress()) {
                    lastProgress = report.getProgress();
                    log.info(report.getApplicationId() + " " + (report.getProgress() * 100.00) + "% "
                            + (System.currentTimeMillis() - report.getStartTime()) + "(ms) "
                            + report.getDiagnostics());
                }
                if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.UNDEFINED)) {
                    log.info(report.getApplicationId() + " " + report.getFinalApplicationStatus());
                    log.info("Tracking url: " + report.getTrackingUrl());
                    log.info("Finish time: " + ((System.currentTimeMillis() - report.getStartTime()) / 1000)
                            + "(s)");
                    break;
                }
            } catch (Throwable e) {
                log.error("Master Heart Beat Error - terminating", e);
                yarnClient.killApplication(appId);
                Thread.sleep(2000);
            }
        }
        yarnClient.stop();

        if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.SUCCEEDED)) {
            System.exit(112);
        }
    }
    yarnClient.stop();
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private String getApplicationIdString(ApplicationId appId) {
    return Long.toString(appId.getClusterTimestamp()) + "_" + Integer.toString(appId.getId());
}

From source file:org.apache.hoya.yarn.client.HoyaYarnClientImpl.java

License:Apache License

/**
 * Kill a running application//from  w w  w.  j a  v  a2 s. com
 * @param applicationId
 * @return the response
 * @throws YarnException YARN problems
 * @throws IOException IO problems
 */
public KillApplicationResponse killRunningApplication(ApplicationId applicationId, String reason)
        throws YarnException, IOException {
    log.info("Killing application {} - {}", applicationId.getClusterTimestamp(), reason);
    KillApplicationRequest request = Records.newRecord(KillApplicationRequest.class);
    request.setApplicationId(applicationId);
    return getRmClient().forceKillApplication(request);
}

From source file:org.apache.tajo.master.container.TajoContainerId.java

License:Apache License

@Override
public String toString() {
    NumberFormat fmt = NumberFormat.getInstance();
    fmt.setGroupingUsed(false);/*from  ww  w .j  a  v a2  s . co  m*/
    fmt.setMinimumIntegerDigits(4);

    StringBuilder sb = new StringBuilder();
    sb.append("container_");
    ApplicationId appId = getApplicationAttemptId().getApplicationId();
    sb.append(appId.getClusterTimestamp()).append("_");
    sb.append(fmt.format(appId.getId())).append("_");
    sb.append(appAttemptIdFormat.get().format(getApplicationAttemptId().getAttemptId())).append("_");
    sb.append(containerIdFormat.get().format(getId()));
    return sb.toString();
}

From source file:org.apache.tez.auxservices.ShuffleHandler.java

License:Apache License

@Override
public void initializeApplication(ApplicationInitializationContext context) {

    String user = context.getUser();
    ApplicationId appId = context.getApplicationId();
    ByteBuffer secret = context.getApplicationDataForService();
    // TODO these bytes should be versioned
    try {/*  w w  w  . j av a  2s . co  m*/
        Token<JobTokenIdentifier> jt = deserializeServiceData(secret);
        // TODO: Once SHuffle is out of NM, this can use MR APIs
        JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
        recordJobShuffleInfo(jobId, user, jt);
    } catch (IOException e) {
        LOG.error("Error during initApp", e);
        // TODO add API to AuxiliaryServices to report failures
    }
}

From source file:org.apache.tez.auxservices.ShuffleHandler.java

License:Apache License

@Override
public void stopApplication(ApplicationTerminationContext context) {
    ApplicationId appId = context.getApplicationId();
    JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
    try {/*from  w ww  .  java 2s . c o m*/
        removeJobShuffleInfo(jobId);
    } catch (IOException e) {
        LOG.error("Error during stopApp", e);
        // TODO add API to AuxiliaryServices to report failures
    }
}