Example usage for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment

List of usage examples for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ContainerLaunchContext setEnvironment.

Prototype

@Public
@Stable
public abstract void setEnvironment(Map<String, String> environment);

Source Link

Document

Add environment variables for the container.

Usage

From source file:org.apache.reef.runtime.yarn.driver.unmanaged.UnmanagedAmTest.java

License:Apache License

@Test
public void testAmShutdown() throws IOException, YarnException {

    Assume.assumeTrue("This test requires a YARN Resource Manager to connect to",
            Boolean.parseBoolean(System.getenv("REEF_TEST_YARN")));

    final YarnConfiguration yarnConfig = new YarnConfiguration();

    // Start YARN client and register the application

    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(yarnConfig);//from w w w  .  j  a v  a  2  s  . co m
    yarnClient.start();

    final ContainerLaunchContext containerContext = Records.newRecord(ContainerLaunchContext.class);
    containerContext.setCommands(Collections.<String>emptyList());
    containerContext.setLocalResources(Collections.<String, LocalResource>emptyMap());
    containerContext.setEnvironment(Collections.<String, String>emptyMap());
    containerContext.setTokens(getTokens());

    final ApplicationSubmissionContext appContext = yarnClient.createApplication()
            .getApplicationSubmissionContext();
    appContext.setApplicationName("REEF_Unmanaged_AM_Test");
    appContext.setAMContainerSpec(containerContext);
    appContext.setUnmanagedAM(true);
    appContext.setQueue("default");

    final ApplicationId applicationId = appContext.getApplicationId();
    LOG.log(Level.INFO, "Registered YARN application: {0}", applicationId);

    yarnClient.submitApplication(appContext);

    LOG.log(Level.INFO, "YARN application submitted: {0}", applicationId);

    addToken(yarnClient.getAMRMToken(applicationId));

    // Start the AM

    final AMRMClientAsync<AMRMClient.ContainerRequest> rmClient = AMRMClientAsync.createAMRMClientAsync(1000,
            this);
    rmClient.init(yarnConfig);
    rmClient.start();

    final NMClientAsync nmClient = new NMClientAsyncImpl(this);
    nmClient.init(yarnConfig);
    nmClient.start();

    final RegisterApplicationMasterResponse registration = rmClient
            .registerApplicationMaster(NetUtils.getHostname(), -1, null);

    LOG.log(Level.INFO, "Unmanaged AM is running: {0}", registration);

    rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "Success!", null);

    LOG.log(Level.INFO, "Unregistering AM: state {0}", rmClient.getServiceState());

    // Shutdown the AM

    rmClient.stop();
    nmClient.stop();

    // Get the final application report

    final ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);
    final YarnApplicationState appState = appReport.getYarnApplicationState();
    final FinalApplicationStatus finalAttemptStatus = appReport.getFinalApplicationStatus();

    LOG.log(Level.INFO, "Application {0} final attempt {1} status: {2}/{3}", new Object[] { applicationId,
            appReport.getCurrentApplicationAttemptId(), appState, finalAttemptStatus });

    Assert.assertEquals("Application must be in FINISHED state", YarnApplicationState.FINISHED, appState);
    Assert.assertEquals("Final status must be SUCCEEDED", FinalApplicationStatus.SUCCEEDED, finalAttemptStatus);

    // Shutdown YARN client

    yarnClient.stop();
}

From source file:org.apache.samza.job.yarn.ContainerUtil.java

License:Apache License

protected void startContainer(Path packagePath, Container container, Map<String, String> env,
        final String cmd) {
    log.info("starting container {} {} {} {}", new Object[] { packagePath, container, env, cmd });

    // set the local package so that the containers and app master are provisioned with it
    LocalResource packageResource = Records.newRecord(LocalResource.class);
    URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
    FileStatus fileStatus;/*from   w  w  w.  j  a  v  a2  s .co m*/
    try {
        fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
    } catch (IOException ioe) {
        log.error("IO Exception when accessing the package status from the filesystem", ioe);
        throw new SamzaException("IO Exception when accessing the package status from the filesystem");
    }

    packageResource.setResource(packageUrl);
    packageResource.setSize(fileStatus.getLen());
    packageResource.setTimestamp(fileStatus.getModificationTime());
    packageResource.setType(LocalResourceType.ARCHIVE);
    packageResource.setVisibility(LocalResourceVisibility.APPLICATION);

    ByteBuffer allTokens;
    // copy tokens (copied from dist shell example)
    try {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);

        // now remove the AM->RM token so that containers cannot access it
        Iterator iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            TokenIdentifier token = ((Token) iter.next()).decodeIdentifier();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    } catch (IOException ioe) {
        ioe.printStackTrace();
        throw new SamzaException("IO Exception when writing credentials to output buffer");
    }

    ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
    context.setEnvironment(env);
    context.setTokens(allTokens.duplicate());
    context.setCommands(new ArrayList<String>() {
        {
            add(cmd);
        }
    });
    context.setLocalResources(Collections.singletonMap("__package", packageResource));

    log.debug("setting package to {}", packageResource);
    log.debug("setting context to {}", context);

    StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
    startContainerRequest.setContainerLaunchContext(context);
    try {
        nmClient.startContainer(container, context);
    } catch (YarnException ye) {
        log.error("Received YarnException when starting container: " + container.getId(), ye);
        throw new SamzaException("Received YarnException when starting container: " + container.getId());
    } catch (IOException ioe) {
        log.error("Received IOException when starting container: " + container.getId(), ioe);
        throw new SamzaException("Received IOException when starting container: " + container.getId());
    }
}

From source file:org.apache.samza.job.yarn.refactor.YarnContainerRunner.java

License:Apache License

/**
 *    Runs a command as a process on the container. All binaries needed by the physical process are packaged in the URL
 *    specified by packagePath.//from  w  w w.j  a  v  a2  s .  c o m
 */
private void startContainer(Path packagePath, Container container, Map<String, String> env, final String cmd)
        throws SamzaContainerLaunchException {
    log.info("starting container {} {} {} {}", new Object[] { packagePath, container, env, cmd });

    // set the local package so that the containers and app master are provisioned with it
    LocalResource packageResource = Records.newRecord(LocalResource.class);
    URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
    FileStatus fileStatus;
    try {
        fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
    } catch (IOException ioe) {
        log.error("IO Exception when accessing the package status from the filesystem", ioe);
        throw new SamzaContainerLaunchException(
                "IO Exception when accessing the package status from the filesystem");
    }

    packageResource.setResource(packageUrl);
    packageResource.setSize(fileStatus.getLen());
    packageResource.setTimestamp(fileStatus.getModificationTime());
    packageResource.setType(LocalResourceType.ARCHIVE);
    packageResource.setVisibility(LocalResourceVisibility.APPLICATION);

    ByteBuffer allTokens;
    // copy tokens (copied from dist shell example)
    try {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);

        // now remove the AM->RM token so that containers cannot access it
        Iterator iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            TokenIdentifier token = ((Token) iter.next()).decodeIdentifier();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    } catch (IOException ioe) {
        log.error("IOException when writing credentials.", ioe);
        throw new SamzaContainerLaunchException("IO Exception when writing credentials to output buffer");
    }

    ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
    context.setEnvironment(env);
    context.setTokens(allTokens.duplicate());
    context.setCommands(new ArrayList<String>() {
        {
            add(cmd);
        }
    });
    context.setLocalResources(Collections.singletonMap("__package", packageResource));

    log.debug("setting package to {}", packageResource);
    log.debug("setting context to {}", context);

    StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
    startContainerRequest.setContainerLaunchContext(context);
    try {
        nmClient.startContainer(container, context);
    } catch (YarnException ye) {
        log.error("Received YarnException when starting container: " + container.getId(), ye);
        throw new SamzaContainerLaunchException(
                "Received YarnException when starting container: " + container.getId(), ye);
    } catch (IOException ioe) {
        log.error("Received IOException when starting container: " + container.getId(), ioe);
        throw new SamzaContainerLaunchException(
                "Received IOException when starting container: " + container.getId(), ioe);
    }
}

From source file:org.apache.samza.job.yarn.YarnClusterResourceManager.java

License:Apache License

/**
 * Runs a command as a process on the container. All binaries needed by the physical process are packaged in the URL
 * specified by packagePath.//from   w  w  w  . j  a v a  2s  .com
 */
private void startContainer(Path packagePath, Container container, Map<String, String> env, final String cmd)
        throws IOException {
    LocalResource packageResource = Records.newRecord(LocalResource.class);
    URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
    FileStatus fileStatus;
    fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
    packageResource.setResource(packageUrl);
    log.debug("Set package resource in YarnContainerRunner for {}", packageUrl);
    packageResource.setSize(fileStatus.getLen());
    packageResource.setTimestamp(fileStatus.getModificationTime());
    packageResource.setType(LocalResourceType.ARCHIVE);
    packageResource.setVisibility(LocalResourceVisibility.APPLICATION);

    ByteBuffer allTokens;
    // copy tokens to start the container
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);

    // now remove the AM->RM token so that containers cannot access it
    Iterator iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        TokenIdentifier token = ((org.apache.hadoop.security.token.Token) iter.next()).decodeIdentifier();
        if (token != null && token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    Map<String, LocalResource> localResourceMap = new HashMap<>();
    localResourceMap.put("__package", packageResource);

    // include the resources from the universal resource configurations
    LocalizerResourceMapper resourceMapper = new LocalizerResourceMapper(new LocalizerResourceConfig(config),
            yarnConfiguration);
    localResourceMap.putAll(resourceMapper.getResourceMap());

    ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
    context.setEnvironment(env);
    context.setTokens(allTokens.duplicate());
    context.setCommands(new ArrayList<String>() {
        {
            add(cmd);
        }
    });
    context.setLocalResources(localResourceMap);

    if (UserGroupInformation.isSecurityEnabled()) {
        Map<ApplicationAccessType, String> acls = yarnConfig.getYarnApplicationAcls();
        if (!acls.isEmpty()) {
            context.setApplicationACLs(acls);
        }
    }

    log.debug("Setting localResourceMap to {}", localResourceMap);
    log.debug("Setting context to {}", context);

    StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
    startContainerRequest.setContainerLaunchContext(context);

    log.info(
            "Making an async start request for Container ID: {} on host: {} with local resource map: {} and context: {}",
            container.getId(), container.getNodeHttpAddress(), localResourceMap.toString(), context);
    nmClientAsync.startContainerAsync(container, context);
}

From source file:org.apache.samza.job.yarn.YarnContainerRunner.java

License:Apache License

/**
 *    Runs a command as a process on the container. All binaries needed by the physical process are packaged in the URL
 *    specified by packagePath./*from ww w  .  ja  va2  s  .  co m*/
 */
private void startContainer(Path packagePath, Container container, Map<String, String> env, final String cmd)
        throws SamzaContainerLaunchException {
    log.info("starting container {} {} {} {}", new Object[] { packagePath, container, env, cmd });

    // TODO: SAMZA-1144 remove the customized approach for package resource and use the common one.
    // But keep it now for backward compatibility.
    // set the local package so that the containers and app master are provisioned with it
    LocalResource packageResource = Records.newRecord(LocalResource.class);
    URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
    FileStatus fileStatus;
    try {
        fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
    } catch (IOException ioe) {
        log.error("IO Exception when accessing the package status from the filesystem", ioe);
        throw new SamzaContainerLaunchException(
                "IO Exception when accessing the package status from the filesystem");
    }

    packageResource.setResource(packageUrl);
    log.info("set package Resource in YarnContainerRunner for {}", packageUrl);
    packageResource.setSize(fileStatus.getLen());
    packageResource.setTimestamp(fileStatus.getModificationTime());
    packageResource.setType(LocalResourceType.ARCHIVE);
    packageResource.setVisibility(LocalResourceVisibility.APPLICATION);

    ByteBuffer allTokens;
    // copy tokens (copied from dist shell example)
    try {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);

        // now remove the AM->RM token so that containers cannot access it
        Iterator iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            TokenIdentifier token = ((Token) iter.next()).decodeIdentifier();
            if (token != null && token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    } catch (IOException ioe) {
        log.error("IOException when writing credentials.", ioe);
        throw new SamzaContainerLaunchException("IO Exception when writing credentials to output buffer");
    }

    Map<String, LocalResource> localResourceMap = new HashMap<>();
    localResourceMap.put("__package", packageResource);

    // include the resources from the universal resource configurations
    LocalizerResourceMapper resourceMapper = new LocalizerResourceMapper(new LocalizerResourceConfig(config),
            yarnConfiguration);
    localResourceMap.putAll(resourceMapper.getResourceMap());

    ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
    context.setEnvironment(env);
    context.setTokens(allTokens.duplicate());
    context.setCommands(new ArrayList<String>() {
        {
            add(cmd);
        }
    });
    context.setLocalResources(localResourceMap);

    log.debug("setting localResourceMap to {}", localResourceMap);
    log.debug("setting context to {}", context);

    StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
    startContainerRequest.setContainerLaunchContext(context);
    try {
        nmClient.startContainer(container, context);
    } catch (YarnException ye) {
        log.error("Received YarnException when starting container: " + container.getId(), ye);
        throw new SamzaContainerLaunchException(
                "Received YarnException when starting container: " + container.getId(), ye);
    } catch (IOException ioe) {
        log.error("Received IOException when starting container: " + container.getId(), ioe);
        throw new SamzaContainerLaunchException(
                "Received IOException when starting container: " + container.getId(), ioe);
    }
}

From source file:org.apache.tajo.master.TaskRunnerLauncherImpl.java

License:Apache License

private ContainerLaunchContext createCommonContainerLaunchContext() {
    TajoConf conf = (TajoConf) getConfig();

    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    try {//w  w  w. ja  v a  2  s . com
        ctx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
    } catch (IOException e) {
        e.printStackTrace();
    }

    ////////////////////////////////////////////////////////////////////////////
    // Set the env variables to be setup
    ////////////////////////////////////////////////////////////////////////////
    LOG.info("Set the environment for the application master");

    Map<String, String> environment = new HashMap<String, String>();
    //String initialClassPath = getInitialClasspath(conf);
    environment.put(Environment.SHELL.name(), "/bin/bash");
    environment.put(Environment.JAVA_HOME.name(), System.getenv(Environment.JAVA_HOME.name()));

    // TODO - to be improved with org.apache.tajo.sh shell script
    Properties prop = System.getProperties();
    if (prop.getProperty("tajo.test", "FALSE").equalsIgnoreCase("TRUE")) {
        environment.put(Environment.CLASSPATH.name(), prop.getProperty("java.class.path", null));
    } else {
        // Add AppMaster.jar location to classpath
        // At some point we should not be required to add
        // the hadoop specific classpaths to the env.
        // It should be provided out of the box.
        // For now setting all required classpaths including
        // the classpath to "." for the application jar
        StringBuilder classPathEnv = new StringBuilder("./");
        //for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) {
        for (String c : YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH) {
            classPathEnv.append(':');
            classPathEnv.append(c.trim());
        }

        classPathEnv.append(":" + System.getenv("TAJO_BASE_CLASSPATH"));
        classPathEnv.append(":./log4j.properties:./*");
        environment.put("HADOOP_HOME", System.getenv("HADOOP_HOME"));
        environment.put(Environment.HADOOP_COMMON_HOME.name(), System.getenv("HADOOP_HOME"));
        environment.put(Environment.HADOOP_HDFS_HOME.name(), System.getenv("HADOOP_HOME"));
        environment.put(Environment.HADOOP_YARN_HOME.name(), System.getenv("HADOOP_HOME"));
        environment.put("TAJO_BASE_CLASSPATH", System.getenv("TAJO_BASE_CLASSPATH"));
        environment.put(Environment.CLASSPATH.name(), classPathEnv.toString());
    }

    ctx.setEnvironment(environment);

    ////////////////////////////////////////////////////////////////////////////
    // Set the local resources
    ////////////////////////////////////////////////////////////////////////////
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    FileSystem fs = null;

    LOG.info("defaultFS: " + conf.get("fs.default.name"));
    LOG.info("defaultFS: " + conf.get("fs.defaultFS"));
    try {
        fs = FileSystem.get(conf);
    } catch (IOException e) {
        e.printStackTrace();
    }

    FileContext fsCtx = null;
    try {
        fsCtx = FileContext.getFileContext(getConfig());
    } catch (UnsupportedFileSystemException e) {
        e.printStackTrace();
    }

    LOG.info("Writing a QueryConf to HDFS and add to local environment");
    Path queryConfPath = new Path(fs.getHomeDirectory(), QueryConf.FILENAME);
    try {
        writeConf(conf, queryConfPath);

        LocalResource queryConfSrc = createApplicationResource(fsCtx, queryConfPath, LocalResourceType.FILE);
        localResources.put(QueryConf.FILENAME, queryConfSrc);

        ctx.setLocalResources(localResources);
    } catch (IOException e) {
        e.printStackTrace();
    }

    // Add shuffle token
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    try {
        //LOG.info("Putting shuffle token in serviceData");
        serviceData.put(PullServerAuxService.PULLSERVER_SERVICEID, PullServerAuxService.serializeMetaData(0));
    } catch (IOException ioe) {
        LOG.error(ioe);
    }
    ctx.setServiceData(serviceData);

    return ctx;
}

From source file:org.apache.tajo.master.YarnContainerProxy.java

License:Apache License

public static ContainerLaunchContext createCommonContainerLaunchContext(Configuration config, String queryId,
        boolean isMaster) {
    TajoConf conf = (TajoConf) config;//w ww . j a va 2  s  . c om

    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

    try {
        ByteBuffer userToken = ByteBuffer
                .wrap(UserGroupInformation.getCurrentUser().getShortUserName().getBytes());
        ctx.setTokens(userToken);
    } catch (IOException e) {
        e.printStackTrace();
    }

    ////////////////////////////////////////////////////////////////////////////
    // Set the env variables to be setup
    ////////////////////////////////////////////////////////////////////////////
    LOG.info("Set the environment for the application master");

    Map<String, String> environment = new HashMap<String, String>();
    //String initialClassPath = getInitialClasspath(conf);
    environment.put(ApplicationConstants.Environment.SHELL.name(), "/bin/bash");
    if (System.getenv(ApplicationConstants.Environment.JAVA_HOME.name()) != null) {
        environment.put(ApplicationConstants.Environment.JAVA_HOME.name(),
                System.getenv(ApplicationConstants.Environment.JAVA_HOME.name()));
    }

    // TODO - to be improved with org.apache.tajo.sh shell script
    Properties prop = System.getProperties();

    if (prop.getProperty("tajo.test", "FALSE").equalsIgnoreCase("TRUE")
            || (System.getenv("tajo.test") != null && System.getenv("tajo.test").equalsIgnoreCase("TRUE"))) {
        LOG.info("tajo.test is TRUE");
        environment.put(ApplicationConstants.Environment.CLASSPATH.name(),
                prop.getProperty("java.class.path", null));
        environment.put("tajo.test", "TRUE");
    } else {
        // Add AppMaster.jar location to classpath
        // At some point we should not be required to add
        // the hadoop specific classpaths to the env.
        // It should be provided out of the box.
        // For now setting all required classpaths including
        // the classpath to "." for the application jar
        StringBuilder classPathEnv = new StringBuilder("./");
        //for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) {
        for (String c : YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH) {
            classPathEnv.append(':');
            classPathEnv.append(c.trim());
        }

        classPathEnv.append(":" + System.getenv("TAJO_BASE_CLASSPATH"));
        classPathEnv.append(":./log4j.properties:./*");
        if (System.getenv("HADOOP_HOME") != null) {
            environment.put("HADOOP_HOME", System.getenv("HADOOP_HOME"));
            environment.put(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name(),
                    System.getenv("HADOOP_HOME"));
            environment.put(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name(),
                    System.getenv("HADOOP_HOME"));
            environment.put(ApplicationConstants.Environment.HADOOP_YARN_HOME.name(),
                    System.getenv("HADOOP_HOME"));
        }

        if (System.getenv("TAJO_BASE_CLASSPATH") != null) {
            environment.put("TAJO_BASE_CLASSPATH", System.getenv("TAJO_BASE_CLASSPATH"));
        }
        environment.put(ApplicationConstants.Environment.CLASSPATH.name(), classPathEnv.toString());
    }

    ctx.setEnvironment(environment);

    if (LOG.isDebugEnabled()) {
        LOG.debug("=================================================");
        for (Map.Entry<String, String> entry : environment.entrySet()) {
            LOG.debug(entry.getKey() + "=" + entry.getValue());
        }
        LOG.debug("=================================================");
    }
    ////////////////////////////////////////////////////////////////////////////
    // Set the local resources
    ////////////////////////////////////////////////////////////////////////////
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LOG.info("defaultFS: " + conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));

    try {
        FileSystem fs = FileSystem.get(conf);
        FileContext fsCtx = FileContext.getFileContext(conf);
        Path systemConfPath = TajoConf.getSystemConfPath(conf);
        if (!fs.exists(systemConfPath)) {
            LOG.error("system_conf.xml (" + systemConfPath.toString() + ") Not Found");
        }
        LocalResource systemConfResource = createApplicationResource(fsCtx, systemConfPath,
                LocalResourceType.FILE);
        localResources.put(TajoConstants.SYSTEM_CONF_FILENAME, systemConfResource);
        ctx.setLocalResources(localResources);
    } catch (IOException e) {
        LOG.error(e.getMessage(), e);
    }

    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    try {
        serviceData.put(PullServerAuxService.PULLSERVER_SERVICEID, PullServerAuxService.serializeMetaData(0));
    } catch (IOException ioe) {
        LOG.error(ioe);
    }
    ctx.setServiceData(serviceData);

    return ctx;
}

From source file:org.apache.tajo.yarn.command.LaunchCommand.java

License:Apache License

private void setupEnv(ContainerLaunchContext amContainer) throws IOException {
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());// w  w  w.j  a va2  s. c  o  m
    }

    classPathEnv.append(File.pathSeparatorChar).append("./").append(libDir).append("/*");

    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());
    env.put(Constants.TAJO_ARCHIVE_PATH, tajoArchive);
    env.put(Constants.TAJO_ARCHIVE_ROOT, "tajo/" + getTajoRootInArchive(tajoArchive));
    env.put(Constants.TAJO_HOME, "$PWD/${" + Constants.TAJO_ARCHIVE_ROOT + "}");
    env.put(Constants.TAJO_CONF_DIR, "$PWD/conf");
    env.put(Constants.TAJO_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    env.put(Constants.TAJO_CLASSPATH, "/export/apps/hadoop/site/lib/*:$PWD/" + libDir + "/*");
    amContainer.setEnvironment(env);
}

From source file:org.apache.tajo.yarn.container.WorkerContainerTask.java

License:Apache License

private void setupEnv(ContainerLaunchContext amContainer) throws IOException {
    LOG.info("Set the environment for the worker");
    Map<String, String> env = new HashMap<String, String>();

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
            .append(File.pathSeparatorChar).append("./*");

    env.put("CLASSPATH", classPathEnv.toString());
    env.put(Constants.TAJO_ARCHIVE_ROOT, System.getenv(Constants.TAJO_ARCHIVE_ROOT));
    env.put(Constants.TAJO_HOME, "$PWD/${" + Constants.TAJO_ARCHIVE_ROOT + "}");
    env.put(Constants.TAJO_CONF_DIR, "$PWD/conf");
    env.put(Constants.TAJO_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    env.put(Constants.TAJO_CLASSPATH, "/export/apps/hadoop/site/lib/*");
    amContainer.setEnvironment(env);
}

From source file:org.deeplearning4j.iterativereduce.runtime.yarn.ContainerManagerHandler.java

License:Apache License

public StartContainerResponse startContainer(List<String> commands, Map<String, LocalResource> localResources,
        Map<String, String> env) throws IOException {

    if (containerManager == null)
        throw new IllegalStateException("Cannot start a continer before connecting to the container manager!");

    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    ctx.setContainerId(container.getId());
    ctx.setResource(container.getResource());
    ctx.setLocalResources(localResources);
    ctx.setCommands(commands);/*w  w  w  . j  a v a  2s .  c  o m*/
    ctx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
    ctx.setEnvironment(env);

    if (LOG.isDebugEnabled()) {
        LOG.debug("Using ContainerLaunchContext with" + ", containerId=" + ctx.getContainerId() + ", memory="
                + ctx.getResource().getMemory() + ", localResources=" + ctx.getLocalResources().toString()
                + ", commands=" + ctx.getCommands().toString() + ", env=" + ctx.getEnvironment().toString());
    }

    StartContainerRequest request = Records.newRecord(StartContainerRequest.class);
    request.setContainerLaunchContext(ctx);

    LOG.info("Starting container, containerId=" + container.getId().toString() + ", host="
            + container.getNodeId().getHost() + ", http=" + container.getNodeHttpAddress());

    return containerManager.startContainer(request);
}