Example usage for org.apache.hadoop.yarn.api.records ContainerId getApplicationAttemptId

List of usage examples for org.apache.hadoop.yarn.api.records ContainerId getApplicationAttemptId

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ContainerId getApplicationAttemptId.

Prototype

@Public
@Stable
public abstract ApplicationAttemptId getApplicationAttemptId();

Source Link

Document

Get the ApplicationAttemptId of the application to which the Container was assigned.

Usage

From source file:org.apache.hama.bsp.ApplicationMaster.java

License:Apache License

/**
 * Gets the application attempt ID from the environment. This should be set by
 * YARN when the container has been launched.
 * //from www  .  ja v  a2  s . c o m
 * @return a new ApplicationAttemptId which is unique and identifies this
 *         task.
 */
private static ApplicationAttemptId getApplicationAttemptId() throws IOException {
    Map<String, String> envs = System.getenv();
    if (!envs.containsKey(ApplicationConstants.Environment.CONTAINER_ID.name())) {
        throw new IllegalArgumentException("ApplicationAttemptId not set in the environment");
    }

    ContainerId containerId = ConverterUtils
            .toContainerId(envs.get(ApplicationConstants.Environment.CONTAINER_ID.name()));
    return containerId.getApplicationAttemptId();
}

From source file:org.apache.helix.provisioning.yarn.AppMasterLauncher.java

License:Apache License

public static void main(String[] args) throws Exception {
    Map<String, String> env = System.getenv();
    LOG.info("Starting app master with the following environment variables");
    for (String key : env.keySet()) {
        LOG.info(key + "\t\t=" + env.get(key));
    }//from  w w w  .ja v a 2s . c om

    Options opts;
    opts = new Options();
    opts.addOption("num_containers", true, "Number of containers");

    // START ZOOKEEPER
    String dataDir = "dataDir";
    String logDir = "logDir";
    IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
        @Override
        public void createDefaultNameSpace(ZkClient zkClient) {

        }
    };
    try {
        FileUtils.deleteDirectory(new File(dataDir));
        FileUtils.deleteDirectory(new File(logDir));
    } catch (IOException e) {
        LOG.error(e);
    }

    final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
    server.start();

    // start Generic AppMaster that interacts with Yarn RM
    AppMasterConfig appMasterConfig = new AppMasterConfig();
    String containerIdStr = appMasterConfig.getContainerId();
    ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
    ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();

    String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
    String className = appMasterConfig.getApplicationSpecFactory();

    GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
    try {
        genericApplicationMaster.start();
    } catch (Exception e) {
        LOG.error("Unable to start application master: ", e);
    }
    ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);

    // TODO: Avoid setting static variable.
    YarnProvisioner.applicationMaster = genericApplicationMaster;
    YarnProvisioner.applicationMasterConfig = appMasterConfig;
    ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
    YarnProvisioner.applicationSpec = applicationSpec;
    String zkAddress = appMasterConfig.getZKAddress();
    String clusterName = appMasterConfig.getAppName();

    // CREATE CLUSTER and setup the resources
    // connect
    ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
    connection.connect();

    // create the cluster
    ClusterId clusterId = ClusterId.from(clusterName);
    ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
    StateModelDefinition statelessService = new StateModelDefinition(
            StateModelConfigGenerator.generateConfigForStatelessService());
    StateModelDefinition taskStateModel = new StateModelDefinition(
            StateModelConfigGenerator.generateConfigForTaskStateModel());
    clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(statelessService)
            .addStateModelDefinition(taskStateModel).build());
    for (String service : applicationSpec.getServices()) {
        String resourceName = service;
        // add the resource with the local provisioner
        ResourceId resourceId = ResourceId.from(resourceName);

        ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
        serviceConfig.setSimpleField("service_name", service);
        int numContainers = serviceConfig.getIntField("num_containers", 1);

        YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
        provisionerConfig.setNumContainers(numContainers);

        AutoRebalanceModeISBuilder idealStateBuilder = new AutoRebalanceModeISBuilder(resourceId);
        idealStateBuilder.setStateModelDefId(statelessService.getStateModelDefId());
        idealStateBuilder.add(PartitionId.from(resourceId, "0"));
        idealStateBuilder.setNumReplica(1);
        ResourceConfig.Builder resourceConfigBuilder = new ResourceConfig.Builder(
                ResourceId.from(resourceName));
        ResourceConfig resourceConfig = resourceConfigBuilder.provisionerConfig(provisionerConfig)
                .idealState(idealStateBuilder.build()) //
                .build();
        clusterAccessor.addResource(resourceConfig);
    }
    // start controller
    ControllerId controllerId = ControllerId.from("controller1");
    HelixController controller = connection.createController(clusterId, controllerId);
    controller.start();

    // Start any pre-specified jobs
    List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
        YarnConfiguration conf = new YarnConfiguration();
        FileSystem fs;
        fs = FileSystem.get(conf);
        for (TaskConfig taskConfig : taskConfigs) {
            URI yamlUri = taskConfig.getYamlURI();
            if (yamlUri != null && taskConfig.name != null) {
                InputStream is = readFromHDFS(fs, taskConfig.name, yamlUri, applicationSpec,
                        appAttemptID.getApplicationId());
                Workflow workflow = Workflow.parse(is);
                TaskDriver taskDriver = new TaskDriver(new ZKHelixManager(controller));
                taskDriver.start(workflow);
            }
        }
    }

    Thread shutdownhook = new Thread(new Runnable() {
        @Override
        public void run() {
            server.shutdown();
        }
    });
    Runtime.getRuntime().addShutdownHook(shutdownhook);
    Thread.sleep(10000);

}

From source file:org.apache.metron.maas.service.ApplicationMaster.java

License:Apache License

/**
 * Parse command line options//from w  ww . jav  a  2s  . com
 *
 * @param args Command line args
 * @return Whether init successful and run should be invoked
 * @throws ParseException
 * @throws IOException
 */
public boolean init(String[] args) throws ParseException, IOException {
    CommandLine cliParser = AMOptions.parse(new GnuParser(), args);

    //Check whether customer log4j.properties file exists
    if (fileExist(log4jPath)) {
        try {
            Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, log4jPath);
        } catch (Exception e) {
            LOG.warn("Can not set up custom log4j properties. " + e);
        }
    }

    if (AMOptions.HELP.has(cliParser)) {
        AMOptions.printHelp();
        return false;
    }

    zkQuorum = AMOptions.ZK_QUORUM.get(cliParser);
    zkRoot = AMOptions.ZK_ROOT.get(cliParser);
    appJarPath = new Path(AMOptions.APP_JAR_PATH.get(cliParser));

    Map<String, String> envs = System.getenv();

    if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
        if (AMOptions.APP_ATTEMPT_ID.has(cliParser)) {
            String appIdStr = AMOptions.APP_ATTEMPT_ID.get(cliParser, "");
            appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
        } else {
            throw new IllegalArgumentException("Application Attempt Id not set in the environment");
        }
    } else {
        ContainerId containerId = ConverterUtils.toContainerId(envs.get(Environment.CONTAINER_ID.name()));
        appAttemptID = containerId.getApplicationAttemptId();
    }

    if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
        throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HOST.name())) {
        throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
        throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_PORT.name())) {
        throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment");
    }

    LOG.info("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId()
            + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId="
            + appAttemptID.getAttemptId());

    if (cliParser.hasOption("shell_env")) {
        String shellEnvs[] = cliParser.getOptionValues("shell_env");
        for (String env : shellEnvs) {
            env = env.trim();
            int index = env.indexOf('=');
            if (index == -1) {
                shellEnv.put(env, "");
                continue;
            }
            String key = env.substring(0, index);
            String val = "";
            if (index < (env.length() - 1)) {
                val = env.substring(index + 1);
            }
            shellEnv.put(key, val);
        }
    }

    if (envs.containsKey(Constants.TIMELINEDOMAIN)) {
        domainId = envs.get(Constants.TIMELINEDOMAIN);
    }
    return true;
}

From source file:org.apache.myriad.TestObjectFactory.java

License:Apache License

/**
 * Returns a new RMContainer corresponding to the RMNode and RMContext. The RMContainer is the 
 * ResourceManager's view of an application container per the Hadoop docs
 * /*  w  ww  . j a  v a2s.  c  o m*/
 * @param node
 * @param context
 * @param appId
 * @param cores
 * @param memory
 * @return RMContainer
 */
public static RMContainer getRMContainer(RMNode node, RMContext context, int appId, int cores, int memory) {
    ContainerId containerId = ContainerId.newContainerId(
            ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456789, 1), 1), appId);

    Container container = Container.newInstance(containerId, node.getNodeID(), node.getHttpAddress(),
            Resources.createResource(memory, cores), null, null);
    return new RMContainerImpl(container, containerId.getApplicationAttemptId(), node.getNodeID(), "user1",
            context);
}

From source file:org.apache.reef.runtime.yarn.driver.YarnDriverRuntimeRestartManager.java

License:Apache License

private static ApplicationAttemptId getAppAttemptId(final String containerIdString) {
    if (containerIdString == null) {
        return null;
    }//  w  w w. j av  a  2s  .  c om

    try {
        final ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
        return containerId.getApplicationAttemptId();
    } catch (Exception e) {
        LOG.log(Level.WARNING, "Unable to get the applicationAttempt ID from the environment, exception " + e
                + " was thrown.");
        return null;
    }
}

From source file:org.apache.reef.runtime.yarn.util.YarnUtilities.java

License:Apache License

/**
 * @param containerIdString the Container ID of the running Container.
 * @return the Application Attempt ID of the YARN application.
 *//*from w ww.  j  a va 2  s.c o  m*/
public static ApplicationAttemptId getAppAttemptId(final String containerIdString) {
    if (containerIdString == null) {
        return null;
    }

    try {
        final ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
        return containerId.getApplicationAttemptId();
    } catch (Exception e) {
        LOG.log(Level.WARNING, "Unable to get the applicationAttempt ID from the environment, exception " + e
                + " was thrown.");
        return null;
    }
}

From source file:org.apache.samza.job.yarn.refactor.YarnAppState.java

License:Apache License

public YarnAppState(JobModelManager jobModelManager, int taskId, ContainerId amContainerId, String nodeHost,
        int nodePort, int nodeHttpPort, SamzaAppState state) {
    this.jobModelManager = jobModelManager;
    this.taskId = taskId;
    this.amContainerId = amContainerId;
    this.nodeHost = nodeHost;
    this.nodePort = nodePort;
    this.nodeHttpPort = nodeHttpPort;
    this.appAttemptId = amContainerId.getApplicationAttemptId();
    this.samzaAppState = state;
}

From source file:org.apache.samza.job.yarn.SamzaAppState.java

License:Apache License

public SamzaAppState(JobCoordinator jobCoordinator, int taskId, ContainerId amContainerId, String nodeHost,
        int nodePort, int nodeHttpPort) {
    this.jobCoordinator = jobCoordinator;
    this.taskId = taskId;
    this.amContainerId = amContainerId;
    this.nodeHost = nodeHost;
    this.nodePort = nodePort;
    this.nodeHttpPort = nodeHttpPort;
    this.appAttemptId = amContainerId.getApplicationAttemptId();

}

From source file:org.apache.samza.job.yarn.YarnAppState.java

License:Apache License

public YarnAppState(int taskId, ContainerId amContainerId, String nodeHost, int nodePort, int nodeHttpPort) {
    this.taskId = taskId;
    this.amContainerId = amContainerId;
    this.nodeHost = nodeHost;
    this.nodePort = nodePort;
    this.nodeHttpPort = nodeHttpPort;
    this.appAttemptId = amContainerId.getApplicationAttemptId();
}

From source file:org.apache.sysml.yarn.DMLAppMaster.java

License:Apache License

public void runApplicationMaster(String[] args) throws YarnException, IOException {
    _conf = new YarnConfiguration();

    //obtain application ID
    String containerIdString = System.getenv(Environment.CONTAINER_ID.name());
    ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
    _appId = containerId.getApplicationAttemptId().getApplicationId();
    LOG.info("SystemML appplication master (applicationID: " + _appId + ")");

    //initialize clients to ResourceManager
    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(_conf);//from  w ww .  ja v a  2s .c o  m
    rmClient.start();

    //register with ResourceManager
    rmClient.registerApplicationMaster("", 0, ""); //host, port for rm communication
    LOG.debug("Registered the SystemML application master with resource manager");

    //start status reporter to ResourceManager
    DMLAppMasterStatusReporter reporter = new DMLAppMasterStatusReporter(rmClient, 10000);
    reporter.start();
    LOG.debug("Started status reporter (heartbeat to resource manager)");

    //set DMLscript app master context
    DMLScript.setActiveAM();

    //parse input arguments
    String[] otherArgs = new GenericOptionsParser(_conf, args).getRemainingArgs();

    //run SystemML CP
    FinalApplicationStatus status = null;
    try {
        //core dml script execution (equivalent to non-AM runtime)
        boolean success = DMLScript.executeScript(_conf, otherArgs);

        if (success)
            status = FinalApplicationStatus.SUCCEEDED;
        else
            status = FinalApplicationStatus.FAILED;
    } catch (DMLScriptException ex) {
        LOG.error(DMLYarnClient.APPMASTER_NAME + ": Failed to executed DML script due to stop call:\n\t"
                + ex.getMessage());
        status = FinalApplicationStatus.FAILED;
        writeMessageToHDFSWorkingDir(ex.getMessage());
    } catch (Exception ex) {
        LOG.error(DMLYarnClient.APPMASTER_NAME + ": Failed to executed DML script.", ex);
        status = FinalApplicationStatus.FAILED;
    } finally {
        //stop periodic status reports
        reporter.stopStatusReporter();
        LOG.debug("Stopped status reporter");

        //unregister resource manager client
        rmClient.unregisterApplicationMaster(status, "", "");
        LOG.debug("Unregistered the SystemML application master");
    }
}