Example usage for org.apache.hadoop.io DataOutputBuffer getLength

List of usage examples for org.apache.hadoop.io DataOutputBuffer getLength

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer getLength.

Prototype

public int getLength() 

Source Link

Document

Returns the length of the valid data currently in the buffer.

Usage

From source file:com.github.hdl.tensorflow.yarn.app.Client.java

License:Apache License

/**
 * Main run function for the client/*from w  ww  .  j ava2 s. c o  m*/
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress="
                + node.getHttpAddress() + ", nodeRackName=" + node.getRackName() + ", nodeNumContainers="
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed

    long maxMem = appResponse.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setApplicationName(appName);

    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }

    Set<String> tags = new HashSet<String>();
    appContext.setApplicationTags(tags);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    TFAmContainer tfAmContainer = new TFAmContainer(this);

    // Copy the application jar to the filesystem
    FileSystem fs = FileSystem.get(conf);
    String dstJarPath = copyLocalFileToDfs(fs, appId.toString(), appMasterJar, TFContainer.SERVER_JAR_PATH);
    tfAmContainer.addToLocalResources(fs, new Path(dstJarPath), TFAmContainer.APPMASTER_JAR_PATH,
            localResources);

    String jniSoDfsPath = "";
    if (jniSoFile != null && !jniSoFile.equals("")) {
        jniSoDfsPath = copyLocalFileToDfs(fs, appId.toString(), jniSoFile, "libbridge.so");
    }
    // Set the log4j properties if needed
    /*    if (!log4jPropFile.isEmpty()) {
          tfAmContainer.addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(),
              localResources, null);
        }*/

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    Map<String, String> env = tfAmContainer.setJavaEnv(conf);

    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }

    StringBuilder command = tfAmContainer.makeCommands(amMemory, appMasterMainClass, containerMemory,
            containerVirtualCores, workerNum, psNum, dstJarPath, containerRetryOptions, jniSoDfsPath);

    LOG.info("AppMaster command: " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());

    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null,
            null, null);

    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    // TODO - what is the range for priority? how to decide?
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);

    appContext.setQueue(amQueue);

    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);
    handleSignal(appId);
    return monitorApplication(appId);

}

From source file:com.hadoop.compression.fourmc.FourMcOutputStream.java

License:BSD License

protected static void write4mcHeader(OutputStream out) throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer();
    try {//from  ww  w  .j a  va2 s. co m
        dob.writeInt(FourMcCodec.FOURMC_MAGIC);
        dob.writeInt(FourMcCodec.FOURMC_VERSION);
        int checksum = Lz4Compressor.xxhash32(dob.getData(), 0, 8, 0);
        dob.writeInt(checksum);
        out.write(dob.getData(), 0, dob.getLength());
    } finally {
        dob.close();
    }
}

From source file:com.hadoop.compression.fourmc.FourMcOutputStream.java

License:BSD License

/**
 * Before closing the stream, 4mc footer must be written.
 *///from w ww.  ja v a2s .c o  m
@Override
public void close() throws IOException {
    if (closed)
        return;

    finish();

    // write last block marker
    rawWriteInt(0);
    rawWriteInt(0);
    rawWriteInt(0);

    // time to write footer with block index
    int footerSize = 20 + blockOffsets.size() * 4;
    DataOutputBuffer dob = new DataOutputBuffer();
    dob.writeInt(footerSize);
    dob.writeInt(FourMcCodec.FOURMC_VERSION);

    // write block deltas
    for (int i = 0; i < blockOffsets.size(); ++i) {
        long blockDelta = i == 0 ? (blockOffsets.get(i)) : (blockOffsets.get(i) - blockOffsets.get(i - 1));
        dob.writeInt((int) blockDelta);
    }

    // tail of footer and checksum
    dob.writeInt(footerSize);
    dob.writeInt(FourMcCodec.FOURMC_MAGIC);
    int checksum = Lz4Compressor.xxhash32(dob.getData(), 0, dob.getLength(), 0);
    dob.writeInt(checksum);
    out.write(dob.getData(), 0, dob.getLength());

    out.close();
    closed = true;

    // force release compressor and related direct buffers
    ((Lz4Compressor) compressor).releaseDirectBuffers();
    compressor = null;
}

From source file:com.hadoop.compression.fourmc.FourMzOutputStream.java

License:BSD License

protected static void write4mzHeader(OutputStream out) throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer();
    try {/* w  w  w.  ja v a 2  s .c om*/
        dob.writeInt(FourMzCodec.FOURMZ_MAGIC);
        dob.writeInt(FourMzCodec.FOURMZ_VERSION);
        int checksum = ZstdCompressor.xxhash32(dob.getData(), 0, 8, 0);
        dob.writeInt(checksum);
        out.write(dob.getData(), 0, dob.getLength());
    } finally {
        dob.close();
    }
}

From source file:com.hadoop.compression.fourmc.FourMzOutputStream.java

License:BSD License

/**
 * Before closing the stream, 4mc footer must be written.
 *///from w  w  w.  j  a  v  a2 s  .c  om
@Override
public void close() throws IOException {
    if (closed)
        return;

    finish();

    // write last block marker
    rawWriteInt(0);
    rawWriteInt(0);
    rawWriteInt(0);

    // time to write footer with block index
    int footerSize = 20 + blockOffsets.size() * 4;
    DataOutputBuffer dob = new DataOutputBuffer();
    dob.writeInt(footerSize);
    dob.writeInt(FourMzCodec.FOURMZ_VERSION);

    // write block deltas
    for (int i = 0; i < blockOffsets.size(); ++i) {
        long blockDelta = i == 0 ? (blockOffsets.get(i)) : (blockOffsets.get(i) - blockOffsets.get(i - 1));
        dob.writeInt((int) blockDelta);
    }

    // tail of footer and checksum
    dob.writeInt(footerSize);
    dob.writeInt(FourMzCodec.FOURMZ_MAGIC);
    int checksum = ZstdCompressor.xxhash32(dob.getData(), 0, dob.getLength(), 0);
    dob.writeInt(checksum);
    out.write(dob.getData(), 0, dob.getLength());

    out.close();
    closed = true;

    // force release compressor and related direct buffers
    ((ZstdCompressor) compressor).releaseDirectBuffers();
    compressor = null;
}

From source file:com.hadoop.compression.lzo.LzoIndex.java

License:Open Source License

/**
 * Read the index of the lzo file.// ww  w. ja  va 2s .com
        
 * @param fs The index file is on this file system.
 * @param lzoFile the file whose index we are reading -- NOT the index file itself.  That is,
 * pass in filename.lzo, not filename.lzo.index, for this parameter.
 * @throws IOException
 */
public static LzoIndex readIndex(FileSystem fs, Path lzoFile) throws IOException {
    FSDataInputStream indexIn = null;
    Path indexFile = lzoFile.suffix(LZO_INDEX_SUFFIX);

    try {
        indexIn = fs.open(indexFile);
    } catch (IOException fileNotFound) {
        // return empty index, fall back to the unsplittable mode
        return new LzoIndex();
    }

    int capacity = 16 * 1024 * 8; //size for a 4GB file (with 256KB lzo blocks)
    DataOutputBuffer bytes = new DataOutputBuffer(capacity);

    // copy indexIn and close it
    IOUtils.copyBytes(indexIn, bytes, 4 * 1024, true);

    ByteBuffer bytesIn = ByteBuffer.wrap(bytes.getData(), 0, bytes.getLength());
    int blocks = bytesIn.remaining() / 8;
    LzoIndex index = new LzoIndex(blocks);

    for (int i = 0; i < blocks; i++) {
        index.set(i, bytesIn.getLong());
    }

    return index;
}

From source file:com.hadoop.compression.lzo.LzopOutputStream.java

License:Open Source License

/**
 * Write an lzop-compatible header to the OutputStream provided.
 *///  w ww  . j  av a 2s . c o  m
protected static void writeLzopHeader(OutputStream out, LzoCompressor.CompressionStrategy strategy)
        throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer();
    try {
        dob.writeShort(LzopCodec.LZOP_VERSION);
        dob.writeShort(LzoCompressor.LZO_LIBRARY_VERSION);
        dob.writeShort(LzopCodec.LZOP_COMPAT_VERSION);
        switch (strategy) {
        case LZO1X_1:
            dob.writeByte(1);
            dob.writeByte(5);
            break;
        case LZO1X_15:
            dob.writeByte(2);
            dob.writeByte(1);
            break;
        case LZO1X_999:
            dob.writeByte(3);
            dob.writeByte(9);
            break;
        default:
            throw new IOException("Incompatible lzop strategy: " + strategy);
        }
        dob.writeInt(0); // all flags 0
        dob.writeInt(0x81A4); // mode
        dob.writeInt((int) (System.currentTimeMillis() / 1000)); // mtime
        dob.writeInt(0); // gmtdiff ignored
        dob.writeByte(0); // no filename
        Adler32 headerChecksum = new Adler32();
        headerChecksum.update(dob.getData(), 0, dob.getLength());
        int hc = (int) headerChecksum.getValue();
        dob.writeInt(hc);
        out.write(LzopCodec.LZO_MAGIC);
        out.write(dob.getData(), 0, dob.getLength());
    } finally {
        dob.close();
    }
}

From source file:com.ibm.jaql.lang.expr.hadoop.ChainedMapFn.java

License:Apache License

public JsonValue eval(final Context context) throws Exception {
    JsonRecord args = baseSetup(context);

    JsonValue state = args.getRequired(new JsonString("init"));
    Function mapFn = (Function) args.getRequired(new JsonString("map"));
    JsonValue schema = args.get(new JsonString("schema"));

    JaqlUtil.enforceNonNull(mapFn);//from w  w  w.  j  a v a 2  s  . co m

    conf.setNumReduceTasks(0);
    conf.setMapRunnerClass(MapEval.class);

    // setup serialization
    setupSerialization(false);
    if (schema != null) {
        conf.set(SCHEMA_NAME, schema.toString());
    }

    prepareFunction("map", 2, mapFn, 0);

    InputSplit[] splits = conf.getInputFormat().getSplits(conf, conf.getNumMapTasks());

    // Override the input format to select one partition
    int targetSplits = conf.getNumMapTasks();
    String oldFormat = conf.get("mapred.input.format.class");
    conf.set(SelectSplitInputFormat.INPUT_FORMAT, oldFormat);
    // It would be nice to know how many splits we are generating to avoid 
    // using an exception to quit...
    // int numSplits = oldFormat.getSplits(conf, ??);
    // This parameter is avoided in the new API
    conf.setInputFormat(SelectSplitInputFormat.class);
    conf.setNumMapTasks(1);

    DataOutputBuffer buffer = new DataOutputBuffer();
    for (int i = 0; i < splits.length; i++) {
        // TODO: we should move the model around using hdfs files instead of serializing
        conf.setClass(SelectSplitInputFormat.SPLIT_CLASS, splits[i].getClass(), InputSplit.class);
        conf.set(SelectSplitInputFormat.STATE, state.toString());
        buffer.reset();
        splits[i].write(buffer);
        ConfUtil.writeBinary(conf, SelectSplitInputFormat.SPLIT, buffer.getData(), 0, buffer.getLength());
        conf.setJobName("chainedMap " + (i + 1) + "/" + splits.length);

        // This causes the output file to be deleted.
        HadoopOutputAdapter outAdapter = (HadoopOutputAdapter) JaqlUtil.getAdapterStore().output
                .getAdapter(outArgs);
        outAdapter.setParallel(conf);

        try {
            JobClient.runJob(conf);
        } catch (EOFException ex) {
            // Thrown when we've processed all of the splits
            break;
        }

        // Read the new state
        final InputAdapter adapter = (InputAdapter) JaqlUtil.getAdapterStore().input.getAdapter(outArgs);
        adapter.open();
        ClosableJsonIterator reader = adapter.iter();
        state = null;
        if (reader.moveNext()) {
            state = reader.current();
        }
        reader.close();
    }

    return state;
}

From source file:com.ibm.jaql.lang.expr.index.HashtableServer.java

License:Apache License

@Override
public void run() {
    JsonValue readKey = null;//w ww  .ja  v  a2s.com
    JsonValue[] keys = new JsonValue[0];

    try {
        while (true) {
            byte command = in.readByte();
            switch (command) {
            // GET Key -> FOUND Value | NOT_FOUND
            case GET_CMD: {
                readKey = table.keySerializer.read(in, readKey);
                byte[] value = table.table.get(readKey);
                if (value == null) {
                    out.write(NOT_FOUND_CMD);
                } else {
                    out.write(FOUND_CMD);
                    out.write(value);
                }
                break;
            }
            // GETN n, [Key]*n -> OK n [FOUND Value | NOT_FOUND]*n  OK
            case GETN_CMD: {
                int n = BaseUtil.readVUInt(in);
                if (n > keys.length || // bigger array required
                        3 * n < keys.length) // array is way too big
                {
                    keys = new JsonValue[n];
                }
                for (int i = 0; i < n; i++) {
                    keys[i] = table.keySerializer.read(in, keys[i]);
                }
                out.write(OK_CMD);
                BaseUtil.writeVUInt(out, n);
                for (int i = 0; i < n; i++) {
                    byte[] value = table.table.get(keys[i]);
                    if (value == null) {
                        out.write(NOT_FOUND_CMD);
                    } else {
                        out.write(FOUND_CMD);
                        out.write(value);
                    }
                }
                out.write(OK_CMD);
                break;
            }
            // USE tableId string, age msec, lease msec
            //   -> OK lease, schema [ Key, Value ], 
            //    | BUILD 
            case USE_CMD: {
                if (table != null) {
                    HashMapCache.instance.release(table);
                    table = null;
                }
                JsonString tableId = (JsonString) defaultSerializer.read(in, null);
                long ageMS = BaseUtil.readVSLong(in);
                long leaseMS = BaseUtil.readVSLong(in);

                table = HashMapCache.instance.get(tableId.toString(), ageMS, leaseMS);
                if (table.isBuilt()) // The table is good to go
                {
                    out.write(OK_CMD);
                    BaseUtil.writeVSLong(out, 0); // TODO: implement leases
                    defaultSerializer.write(out, table.schema);
                } else // We need to build the table
                {
                    out.write(BUILD_CMD);
                    out.flush();

                    // SCHEMA schema [Key,Value] (PUT key, value)* OK -> OK
                    command = in.readByte();
                    if (command == RELEASE_CMD) {
                        // The client couldn't build the table, so just release it
                        HashMapCache.instance.release(table);
                        break;
                    }
                    if (command != SCHEMA_CMD) {
                        throw new ProtocolException("expected SCHEMA");
                    }
                    table.setSchema((JsonSchema) defaultSerializer.read(in, null));
                    DataOutputBuffer buf = new DataOutputBuffer();

                    System.err.println("building hashtable " + table.tableId);

                    while ((command = in.readByte()) == PUT_CMD) {
                        // TODO: we need to use a spilling hashtable to avoid memory overflows...
                        // TODO: we could at least pack the values more tightly 
                        buf.reset();
                        JsonValue key = table.keySerializer.read(in, null); // Be sure NOT to reuse the key here!
                        table.valueSerializer.copy(in, buf);
                        byte[] val = new byte[buf.getLength()];
                        System.arraycopy(buf.getData(), 0, val, 0, val.length);
                        table.table.put(key, val);
                    }
                    if (command != OK_CMD) {
                        throw new ProtocolException("expected OK");
                    }
                    HashMapCache.instance.doneBuilding(table);
                    out.write(OK_CMD);
                    System.err.println("built hashtable " + table.tableId);
                }
                break;
            }
            // RELEASE -> OK
            case RELEASE_CMD: {
                if (table != null) {
                    HashMapCache.instance.release(table);
                    table = null;
                }
                out.write(OK_CMD);
                break;
            }
            // LIST_TABLES -> (FOUND tableId built age lease schema numEntries)* OK
            // GET_ALL -> (FOUND key value)* OK
            // UNDEFINE tableId -> OK | NOT_FOUND
            // UNDEFINE_ALL -> OK
            default:
                throw new ProtocolException("invalid command code");
            }
            out.flush();
        }
    } catch (EOFException e) {
        // ignored
    } catch (Exception e) {
        // log and exit thread
        e.printStackTrace();
    } finally {
        if (table != null) {
            HashMapCache.instance.release(table);
        }
        try {
            socket.close();
        } catch (Exception e) {
            // log and exit thread
            e.printStackTrace();
        }
    }
}

From source file:com.inforefiner.hdata.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from  w ww.  j  a v  a 2  s  . c o m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    startTimelineClient(conf);
    if (timelineClient != null) {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START,
                domainId, appSubmitterUgi);
    }

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
}