Example usage for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer

List of usage examples for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer.

Prototype

public DataOutputBuffer() 

Source Link

Document

Constructs a new empty buffer.

Usage

From source file:org.apache.flink.yarn.YarnApplicationMasterRunner.java

License:Apache License

/**
 * Creates the launch context, which describes how to bring up a TaskManager process in
 * an allocated YARN container.//from  w w  w  .j  a va  2  s.co  m
 * 
 * <p>This code is extremely YARN specific and registers all the resources that the TaskManager
 * needs (such as JAR file, config file, ...) and all environment variables in a YARN
 * container launch context. The launch context then ensures that those resources will be
 * copied into the containers transient working directory. 
 * 
 * <p>We do this work before we start the ResourceManager actor in order to fail early if
 * any of the operations here fail.
 * 
 * @param flinkConfig
 *         The Flink configuration object.
 * @param yarnConfig
 *         The YARN configuration object.
 * @param env
 *         The environment variables.
 * @param tmParams
 *         The TaskManager container memory parameters. 
 * @param taskManagerConfig
 *         The configuration for the TaskManagers.
 * @param workingDirectory
 *         The current application master container's working directory. 
 * @param taskManagerMainClass
 *         The class with the main method.
 * @param log
 *         The logger.
 * 
 * @return The launch context for the TaskManager processes.
 * 
 * @throws Exception Thrown if teh launch context could not be created, for example if
 *                   the resources could not be copied.
 */
public static ContainerLaunchContext createTaskManagerContext(Configuration flinkConfig,
        YarnConfiguration yarnConfig, Map<String, String> env, ContaineredTaskManagerParameters tmParams,
        Configuration taskManagerConfig, String workingDirectory, Class<?> taskManagerMainClass, Logger log)
        throws Exception {

    log.info("Setting up resources for TaskManagers");

    // get and validate all relevant variables

    String remoteFlinkJarPath = env.get(YarnConfigKeys.FLINK_JAR_PATH);
    require(remoteFlinkJarPath != null, "Environment variable %s not set", YarnConfigKeys.FLINK_JAR_PATH);

    String appId = env.get(YarnConfigKeys.ENV_APP_ID);
    require(appId != null, "Environment variable %s not set", YarnConfigKeys.ENV_APP_ID);

    String clientHomeDir = env.get(YarnConfigKeys.ENV_CLIENT_HOME_DIR);
    require(clientHomeDir != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_HOME_DIR);

    String shipListString = env.get(YarnConfigKeys.ENV_CLIENT_SHIP_FILES);
    require(shipListString != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_SHIP_FILES);

    String yarnClientUsername = env.get(YarnConfigKeys.ENV_CLIENT_USERNAME);
    require(yarnClientUsername != null, "Environment variable %s not set", YarnConfigKeys.ENV_CLIENT_USERNAME);

    // obtain a handle to the file system used by YARN
    final org.apache.hadoop.fs.FileSystem yarnFileSystem;
    try {
        yarnFileSystem = org.apache.hadoop.fs.FileSystem.get(yarnConfig);
    } catch (IOException e) {
        throw new Exception("Could not access YARN's default file system", e);
    }

    // register Flink Jar with remote HDFS
    LocalResource flinkJar = Records.newRecord(LocalResource.class);
    {
        Path remoteJarPath = new Path(remoteFlinkJarPath);
        Utils.registerLocalResource(yarnFileSystem, remoteJarPath, flinkJar);
    }

    // register conf with local fs
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    {
        // write the TaskManager configuration to a local file
        final File taskManagerConfigFile = new File(workingDirectory,
                UUID.randomUUID() + "-taskmanager-conf.yaml");
        LOG.debug("Writing TaskManager configuration to {}", taskManagerConfigFile.getAbsolutePath());
        BootstrapTools.writeConfiguration(taskManagerConfig, taskManagerConfigFile);

        Utils.setupLocalResource(yarnFileSystem, appId, new Path(taskManagerConfigFile.toURI()), flinkConf,
                new Path(clientHomeDir));

        log.info("Prepared local resource for modified yaml: {}", flinkConf);
    }

    Map<String, LocalResource> taskManagerLocalResources = new HashMap<>();
    taskManagerLocalResources.put("flink.jar", flinkJar);
    taskManagerLocalResources.put("flink-conf.yaml", flinkConf);

    // prepare additional files to be shipped
    for (String pathStr : shipListString.split(",")) {
        if (!pathStr.isEmpty()) {
            LocalResource resource = Records.newRecord(LocalResource.class);
            Path path = new Path(pathStr);
            Utils.registerLocalResource(yarnFileSystem, path, resource);
            taskManagerLocalResources.put(path.getName(), resource);
        }
    }

    // now that all resources are prepared, we can create the launch context

    log.info("Creating container launch context for TaskManagers");

    boolean hasLogback = new File(workingDirectory, "logback.xml").exists();
    boolean hasLog4j = new File(workingDirectory, "log4j.properties").exists();

    String launchCommand = BootstrapTools.getTaskManagerShellCommand(flinkConfig, tmParams, ".",
            ApplicationConstants.LOG_DIR_EXPANSION_VAR, hasLogback, hasLog4j, taskManagerMainClass);

    log.info("Starting TaskManagers with command: " + launchCommand);

    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    ctx.setCommands(Collections.singletonList(launchCommand));
    ctx.setLocalResources(taskManagerLocalResources);

    Map<String, String> containerEnv = new HashMap<>();
    containerEnv.putAll(tmParams.taskManagerEnv());

    // add YARN classpath, etc to the container environment
    Utils.setupEnv(yarnConfig, containerEnv);
    containerEnv.put(YarnConfigKeys.ENV_CLIENT_USERNAME, yarnClientUsername);

    ctx.setEnvironment(containerEnv);

    try {
        UserGroupInformation user = UserGroupInformation.getCurrentUser();
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        ctx.setTokens(securityTokens);
    } catch (Throwable t) {
        log.error("Getting current user info failed when trying to launch the container", t);
    }

    return ctx;
}

From source file:org.apache.giraph.yarn.GiraphApplicationMaster.java

License:Apache License

/**
 * Populate allTokens with the tokens received
 * @return//from   ww w.  ja v a2s . c  o  m
 */
private void getAllTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (LOG.isDebugEnabled()) {
            LOG.debug("Token type :" + token.getKind());
        }
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
}

From source file:org.apache.giraph.yarn.GiraphYarnClient.java

License:Apache License

/**
 * Set delegation tokens for AM container
 * @param amContainer AM container/*from w w  w .j  a  v a 2s .c o  m*/
 * @return
 */
private void setToken(ContainerLaunchContext amContainer) throws IOException {
    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = giraphConf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        FileSystem fs = FileSystem.get(giraphConf);
        // For now, only getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }
}

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    // Pass on the credentials from the hadoop token file if present.
    // The value in the token file takes precedence.
    if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
        Credentials tokenFileCredentials = Credentials
                .readTokenStorageFile(new File(System.getenv(HADOOP_TOKEN_FILE_LOCATION)), new Configuration());
        credentials.addAll(tokenFileCredentials);
    }/*from   w  ww  .j  ava  2  s . co  m*/

    String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer");
    }

    // For now, only getting tokens for the default file-system.
    Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials);
    if (tokens != null) {
        for (Token<?> token : tokens) {
            LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token);
        }
    }

    Closer closer = Closer.create();
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);
        ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
        containerLaunchContext.setTokens(fsTokens);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gora.util.IOUtils.java

License:Apache License

/** Serializes the object to the given dataoutput using
 * available Hadoop serializations*/
public static <T> byte[] serialize(Configuration conf, T obj) throws IOException {
    DataOutputBuffer buffer = new DataOutputBuffer();
    serialize(conf, buffer, obj);//from www .  jav  a  2s .c om
    return buffer.getData();
}

From source file:org.apache.gora.util.TestIOUtils.java

License:Apache License

private void testNullFieldsWith(Object... values) throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    DataInputBuffer in = new DataInputBuffer();

    IOUtils.writeNullFieldsInfo(out, values);

    in.reset(out.getData(), out.getLength());

    boolean[] ret = IOUtils.readNullFieldsInfo(in);

    //assert/*from   www  .j  av a 2 s.c  o m*/
    assertEquals(values.length, ret.length);

    for (int i = 0; i < values.length; i++) {
        assertEquals(values[i] == null, ret[i]);
    }
}

From source file:org.apache.hama.bsp.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 * //from w  w  w.  j av a 2s. com
 * @throws org.apache.hadoop.yarn.exceptions.YarnException
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(localConf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(localConf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capability of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    for (Container container : previousAMRunningContainers) {
        launchedContainers.add(container.getId());
    }
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        AMRMClient.ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
}

From source file:org.apache.hama.bsp.BSPJobClient.java

License:Apache License

private static int writeSplits(BSPJob job, InputSplit[] splits, Path submitSplitFile, int maxTasks)
        throws IOException {
    final DataOutputStream out = writeSplitsFileHeader(job.getConfiguration(), submitSplitFile, splits.length);
    try {/*from ww w  .  j a  va 2 s.c o  m*/
        DataOutputBuffer buffer = new DataOutputBuffer();
        RawSplit rawSplit = new RawSplit();
        for (InputSplit split : splits) {

            // set partitionID to rawSplit
            if (split.getClass().getName().equals(FileSplit.class.getName())
                    && job.getBoolean("input.has.partitioned", false)) {
                String[] extractPartitionID = ((FileSplit) split).getPath().getName().split("[-]");
                if (extractPartitionID.length > 1)
                    rawSplit.setPartitionID(Integer.parseInt(extractPartitionID[1]));
            }

            rawSplit.setClassName(split.getClass().getName());
            buffer.reset();
            split.write(buffer);
            rawSplit.setDataLength(split.getLength());
            rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
            rawSplit.setLocations(split.getLocations());
            rawSplit.write(out);
        }
    } finally {
        out.close();
    }
    return splits.length;
}

From source file:org.apache.hama.bsp.TestClusterStatus.java

License:Apache License

public final void testWriteAndReadFields() throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    DataInputBuffer in = new DataInputBuffer();

    ClusterStatus status1;/*from  w ww  .  ja v  a  2  s  . com*/
    Map<String, GroomServerStatus> grooms = new HashMap<String, GroomServerStatus>();

    for (int i = 0; i < 10; i++) {
        int num = rnd.nextInt();
        String groomName = "groom_" + num;
        String peerName = "peerhost:" + num;
        grooms.put(groomName, new GroomServerStatus(peerName, new ArrayList<TaskStatus>(0), 25, 2));
    }

    int tasks = rnd.nextInt(100);
    int maxTasks = rnd.nextInt(100);
    BSPMaster.State state = BSPMaster.State.RUNNING;

    status1 = new ClusterStatus(grooms, tasks, maxTasks, state);
    status1.write(out);

    in.reset(out.getData(), out.getLength());

    ClusterStatus status2 = new ClusterStatus();
    status2.readFields(in);

    for (Entry<String, GroomServerStatus> entry : status2.getActiveGroomServerStatus().entrySet()) {
        assertEquals(entry.getValue().getMaxTasks(), 2);
        assertEquals(entry.getValue().getFailures(), 25);
    }

    Map<String, String> grooms_s = new HashMap<String, String>(status1.getActiveGroomNames());
    Map<String, String> grooms_o = new HashMap<String, String>(status2.getActiveGroomNames());

    assertEquals(status1.getGroomServers(), status2.getGroomServers());

    assertTrue(grooms_s.entrySet().containsAll(grooms_o.entrySet()));
    assertTrue(grooms_o.entrySet().containsAll(grooms_s.entrySet()));

    assertEquals(status1.getTasks(), status2.getTasks());
    assertEquals(status1.getMaxTasks(), status2.getMaxTasks());
}

From source file:org.apache.hama.bsp.YARNBSPJobClient.java

License:Apache License

@Override
protected RunningJob launchJob(BSPJobID jobId, BSPJob normalJob, Path submitJobFile, FileSystem pFs)
        throws IOException {
    YARNBSPJob job = (YARNBSPJob) normalJob;

    LOG.info("Submitting job...");
    if (getConf().get("bsp.child.mem.in.mb") == null) {
        LOG.warn("BSP Child memory has not been set, YARN will guess your needs or use default values.");
    }//w  w  w. j av a2  s .com

    FileSystem fs = pFs;
    if (fs == null) {
        fs = FileSystem.get(getConf());
    }

    if (getConf().get("bsp.user.name") == null) {
        String s = getUnixUserName();
        getConf().set("bsp.user.name", s);
        LOG.debug("Retrieved username: " + s);
    }

    yarnClient.start();
    try {
        YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
        LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers="
                + clusterMetrics.getNumNodeManagers());

        List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
        LOG.info("Got Cluster node info from ASM");
        for (NodeReport node : clusterNodeReports) {
            LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                    + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                    + node.getNumContainers());
        }

        QueueInfo queueInfo = yarnClient.getQueueInfo("default");
        LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
                + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
                + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
                + queueInfo.getChildQueues().size());

        List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
        for (QueueUserACLInfo aclInfo : listAclInfo) {
            for (QueueACL userAcl : aclInfo.getUserAcls()) {
                LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                        + userAcl.name());
            }
        }

        // Get a new application id
        YarnClientApplication app = yarnClient.createApplication();

        // Create a new ApplicationSubmissionContext
        //ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
        ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();

        id = appContext.getApplicationId();

        // set the application name
        appContext.setApplicationName(job.getJobName());

        // Create a new container launch context for the AM's container
        ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

        // Define the local resources required
        Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
        // Lets assume the jar we need for our ApplicationMaster is available in
        // HDFS at a certain known path to us and we want to make it available to
        // the ApplicationMaster in the launched container
        if (job.getJar() == null) {
            throw new IllegalArgumentException("Jar must be set in order to run the application!");
        }

        Path jarPath = new Path(job.getJar());
        jarPath = fs.makeQualified(jarPath);
        getConf().set("bsp.jar", jarPath.makeQualified(fs.getUri(), jarPath).toString());

        FileStatus jarStatus = fs.getFileStatus(jarPath);
        LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
        amJarRsrc.setType(LocalResourceType.FILE);
        amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));
        amJarRsrc.setTimestamp(jarStatus.getModificationTime());
        amJarRsrc.setSize(jarStatus.getLen());

        // this creates a symlink in the working directory
        localResources.put(YARNBSPConstants.APP_MASTER_JAR_PATH, amJarRsrc);

        // add hama related jar files to localresources for container
        List<File> hamaJars;
        if (System.getProperty("hama.home.dir") != null)
            hamaJars = localJarfromPath(System.getProperty("hama.home.dir"));
        else
            hamaJars = localJarfromPath(getConf().get("hama.home.dir"));
        String hamaPath = getSystemDir() + "/hama";
        for (File fileEntry : hamaJars) {
            addToLocalResources(fs, fileEntry.getCanonicalPath(), hamaPath, fileEntry.getName(),
                    localResources);
        }

        // Set the local resources into the launch context
        amContainer.setLocalResources(localResources);

        // Set up the environment needed for the launch context
        Map<String, String> env = new HashMap<String, String>();
        // Assuming our classes or jars are available as local resources in the
        // working directory from which the command will be run, we need to append
        // "." to the path.
        // By default, all the hadoop specific classpaths will already be available
        // in $CLASSPATH, so we should be careful not to overwrite it.
        StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
                .append(File.pathSeparatorChar).append("./*");
        for (String c : yarnConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
                YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
            classPathEnv.append(File.pathSeparatorChar);
            classPathEnv.append(c.trim());
        }

        env.put(YARNBSPConstants.HAMA_YARN_LOCATION, jarPath.toUri().toString());
        env.put(YARNBSPConstants.HAMA_YARN_SIZE, Long.toString(jarStatus.getLen()));
        env.put(YARNBSPConstants.HAMA_YARN_TIMESTAMP, Long.toString(jarStatus.getModificationTime()));

        env.put(YARNBSPConstants.HAMA_LOCATION, hamaPath);
        env.put("CLASSPATH", classPathEnv.toString());
        amContainer.setEnvironment(env);

        // Set the necessary command to execute on the allocated container
        Vector<CharSequence> vargs = new Vector<CharSequence>(5);
        vargs.add("${JAVA_HOME}/bin/java");
        vargs.add("-cp " + classPathEnv + "");
        vargs.add(ApplicationMaster.class.getCanonicalName());
        vargs.add(submitJobFile.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString());

        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stderr");

        // Get final commmand
        StringBuilder command = new StringBuilder();
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        List<String> commands = new ArrayList<String>();
        commands.add(command.toString());
        amContainer.setCommands(commands);

        LOG.debug("Start command: " + command);

        Resource capability = Records.newRecord(Resource.class);
        // we have at least 3 threads, which comsumes 1mb each, for each bsptask and
        // a base usage of 100mb
        capability.setMemory(3 * job.getNumBspTask() + getConf().getInt("hama.appmaster.memory.mb", 100));
        LOG.info("Set memory for the application master to " + capability.getMemory() + "mb!");

        // Set the container launch content into the ApplicationSubmissionContext
        appContext.setResource(capability);

        // Setup security tokens
        if (UserGroupInformation.isSecurityEnabled()) {
            // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
            Credentials credentials = new Credentials();
            String tokenRenewer = yarnConf.get(YarnConfiguration.RM_PRINCIPAL);
            if (tokenRenewer == null || tokenRenewer.length() == 0) {
                throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
            }

            // For now, only getting tokens for the default file-system.
            final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
            if (tokens != null) {
                for (Token<?> token : tokens) {
                    LOG.info("Got dt for " + fs.getUri() + "; " + token);
                }
            }
            DataOutputBuffer dob = new DataOutputBuffer();
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            amContainer.setTokens(fsTokens);
        }

        appContext.setAMContainerSpec(amContainer);

        // Create the request to send to the ApplicationsManager
        ApplicationId appId = appContext.getApplicationId();
        yarnClient.submitApplication(appContext);

        return monitorApplication(appId) ? new NetworkedJob() : null;
    } catch (YarnException e) {
        e.printStackTrace();
        return null;
    }
}