Example usage for org.apache.hadoop.io DataOutputBuffer getLength

List of usage examples for org.apache.hadoop.io DataOutputBuffer getLength

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer getLength.

Prototype

public int getLength() 

Source Link

Document

Returns the length of the valid data currently in the buffer.

Usage

From source file:cn.ac.ncic.mastiff.io.coding.RedBlackTreeStringReader.java

License:Apache License

@Override
public byte[] ensureDecompressed() throws IOException {
    DataOutputBuffer transfer = new DataOutputBuffer();
    transfer.write(inBuf.getData(), 12, inBuf.getLength() - 12);
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(transfer.getData(), 0, transfer.getLength());
    int dictionarySize = dib.readInt();
    int length1 = dib.readInt();
    byte[] data = transfer.getData();
    transfer.close();/* ww  w  .j a v  a2s  . c o  m*/
    dib.reset(data, Integer.SIZE + Integer.SIZE, length1);
    FlexibleEncoding.ORC.StreamName name = new FlexibleEncoding.ORC.StreamName(0,
            OrcProto.Stream.Kind.DICTIONARY_DATA);
    ByteBuffer inBuf1 = ByteBuffer.allocate(length1);
    inBuf1.put(dib.getData(), 0, dib.getLength());
    inBuf1.flip();
    InStream in = InStream.create("test1", inBuf1, null, dictionarySize);
    if (in.available() > 0) {
        dictionaryBuffer = new DynamicByteArray(64, in.available());
        dictionaryBuffer.readAll(in);
        in.close();
        // read the lengths    google  proto buffer
        name = new StreamName(1, OrcProto.Stream.Kind.LENGTH);
        dib.reset(data, 4 + 4 + length1, 4);
        int length2 = dib.readInt();
        dib.reset(data, 4 + 4 + length1 + 4, length2);
        //  in = streams.get(name);
        ByteBuffer inBuf2 = ByteBuffer.allocate(length2);
        inBuf2.put(dib.getData(), 0, length2);
        inBuf2.flip();
        in = InStream.create("test2", inBuf2, null, dictionarySize);
        //    IntegerReader lenReader = createIntegerReader(encodings.get(columnId)
        //        .getKind(), in, false);
        IntegerReader lenReader = createIntegerReader(OrcProto.ColumnEncoding.Kind.DIRECT_V2, in, false);
        int offset = 0;
        dictionaryOffsets = new int[dictionarySize + 1];
        for (int i = 0; i < dictionarySize; ++i) {
            dictionaryOffsets[i] = offset;
            offset += (int) lenReader.next();
        }
        dictionaryOffsets[dictionarySize] = offset;
        in.close();
        name = new FlexibleEncoding.ORC.StreamName(2, OrcProto.Stream.Kind.DATA);
        dib.reset(data, 4 + 4 + length1 + 4 + length2, 4);
        int length3 = dib.readInt();
        dib.reset(data, 4 + 4 + length1 + 4 + length2 + 4, length3);
        ByteBuffer inBuf3 = ByteBuffer.allocate(length3);
        inBuf3.put(dib.getData(), 0, length3);
        inBuf3.flip();
        in = InStream.create("test3", inBuf3, null, dictionarySize);
        reader = createIntegerReader(OrcProto.ColumnEncoding.Kind.DIRECT_V2, in, false);
    }
    inBuf.close();
    DataOutputBuffer decoding = new DataOutputBuffer();
    DataOutputBuffer offsets = new DataOutputBuffer();
    decoding.writeInt(decompressedSize);
    decoding.writeInt(numPairs);
    decoding.writeInt(startPos);
    int dataoffset = 12;
    String str;
    for (int i = 0; i < numPairs; i++) {
        str = readEachValue(null);
        decoding.writeUTF(str);
        //      if(i<5){
        //        System.out.println("304  bin[i]  "+str+"  decoding    "+ decoding.size());
        //      }
        dataoffset = decoding.size();
        offsets.writeInt(dataoffset);
    }
    System.out.println("315  offset.size() " + offsets.size() + "  decoding.szie   " + decoding.size());
    System.out.println("316  dataoffet   " + dataoffset);
    decoding.write(offsets.getData(), 0, offsets.size());
    inBuf.close();
    offsets.close();
    dib.close();
    System.out.println("316   decoding   " + decoding.size() + decoding.getLength() + " decoding.getData()   "
            + decoding.getData().length);
    inBuf1.clear();
    return decoding.getData();
}

From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from  w w  w . j a va2s .c  o m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer
    // class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from
    // clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers;
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

}

From source file:cn.edu.buaa.act.petuumOnYarn.Client.java

License:Apache License

/**
 * Main run function for the client//from  ww  w  . jav a2s. c o  m
 * 
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();
    String[] s;
    s = conf.getStrings(YarnConfiguration.RM_ADDRESS);
    for (String ss : s)
        LOG.info("RM address: " + ss);
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers() + ", nodeIdHost" + node.getNodeId().getHost());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    YarnUtil.copyAndAddToLocalResources(fs, appMasterJar, petuumHDFSPathPrefix, appMasterJarPath,
            localResources, null);
    scriptHDFSPath = YarnUtil.copyToHDFS(fs, scriptPath, petuumHDFSPathPrefix, launchPath, null);
    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        YarnUtil.copyAndAddToLocalResources(fs, log4jPropFile, petuumHDFSPathPrefix, log4jPath, localResources,
                null);
    }

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_nodes " + String.valueOf(numNodes));
    vargs.add("--start_port " + String.valueOf(startPort));
    vargs.add("--priority " + String.valueOf(workerPriority));
    vargs.add("--script_hdfs_path " + scriptHDFSPath);

    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null,
            null, null);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and
    // vcores requirements
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and
        // MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // Monitor the application
    currentTime = System.currentTimeMillis();
    LOG.info("submit AM in " + (currentTime - startTime) + "ms");
    return monitorApplication(appId);
}

From source file:com.asakusafw.dmdl.java.emitter.driver.WritableDriverTest.java

License:Apache License

/**
 * primitives./*from w  w  w.  j  a v a2s  .c o  m*/
 * @throws Exception if test was failed
 */
@Test
public void primitives() throws Exception {
    ModelLoader loader = generate();
    ModelWrapper object = loader.newModel("Primitives");
    assertThat(object.unwrap(), instanceOf(Writable.class));

    object.set("type_boolean", true);
    object.set("type_byte", (byte) 64);
    object.set("type_short", (short) 256);
    object.set("type_int", 100);
    object.set("type_long", 200L);
    object.set("type_float", 300.f);
    object.set("type_double", 400.d);
    object.set("type_decimal", new BigDecimal("1234.567"));
    object.set("type_text", new Text("Hello, world!"));
    object.set("type_date", new Date(2011, 3, 31));
    object.set("type_datetime", new DateTime(2011, 3, 31, 23, 30, 1));

    Writable writable = (Writable) object.unwrap();

    DataOutputBuffer output = new DataOutputBuffer();
    writable.write(output);

    Writable copy = (Writable) loader.newModel("Primitives").unwrap();
    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), output.getLength());
    copy.readFields(input);

    assertThat(input.read(), is(-1));
    assertThat(writable, equalTo(copy));
}

From source file:com.asakusafw.dmdl.thundergate.driver.ModelInputDriverTest.java

License:Apache License

/**
 * input/output simple records.//ww w.j a  v  a 2s.  co m
 * @throws Exception if test was failed
 */
@SuppressWarnings("unchecked")
@Test
public void simple_record() throws Exception {
    ModelLoader loader = generateJava("simple_record");

    Class<?> type = loader.modelType("Simple");
    assertThat(type.isAnnotationPresent(ModelInputLocation.class), is(true));
    assertThat(type.isAnnotationPresent(ModelOutputLocation.class), is(true));

    ModelWrapper object = loader.newModel("Simple");
    DataOutputBuffer output = new DataOutputBuffer();
    try (ModelOutput<Object> modelOut = (ModelOutput<Object>) type.getAnnotation(ModelOutputLocation.class)
            .value().getDeclaredConstructor(RecordEmitter.class)
            .newInstance(new TsvEmitter(new OutputStreamWriter(output, "UTF-8")))) {
        object.set("sid", 1L);
        object.set("value", new Text("hello"));
        modelOut.write(object.unwrap());

        object.set("sid", 2L);
        object.set("value", new Text("world"));
        modelOut.write(object.unwrap());

        object.set("sid", 3L);
        object.set("value", null);
        modelOut.write(object.unwrap());
    }

    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), output.getLength());
    try (ModelInput<Object> modelIn = (ModelInput<Object>) type.getAnnotation(ModelInputLocation.class).value()
            .getDeclaredConstructor(RecordParser.class)
            .newInstance(new TsvParser(new InputStreamReader(input, "UTF-8")))) {
        ModelWrapper copy = loader.newModel("Simple");

        modelIn.readTo(copy.unwrap());
        assertThat(copy.get("sid"), is((Object) 1L));
        assertThat(copy.get("value"), is((Object) new Text("hello")));

        modelIn.readTo(copy.unwrap());
        assertThat(copy.get("sid"), is((Object) 2L));
        assertThat(copy.get("value"), is((Object) new Text("world")));

        modelIn.readTo(copy.unwrap());
        assertThat(copy.get("sid"), is((Object) 3L));
        assertThat(copy.getOption("value").isNull(), is(true));

        assertThat(input.read(), is(-1));
    }
}

From source file:com.asakusafw.dmdl.thundergate.driver.ModelInputDriverTest.java

License:Apache License

/**
 * all primitive types./* w  w w.j a v  a  2s  .c  o m*/
 * @throws Exception if test was failed
 */
@SuppressWarnings("unchecked")
@Test
public void primitives() throws Exception {
    ModelLoader loader = generateJava("primitives");

    Class<?> type = loader.modelType("Primitives");
    assertThat(type.isAnnotationPresent(ModelInputLocation.class), is(true));
    assertThat(type.isAnnotationPresent(ModelOutputLocation.class), is(true));

    ModelWrapper object = loader.newModel("Primitives");

    object.set("type_boolean", true);
    object.set("type_byte", (byte) 64);
    object.set("type_short", (short) 256);
    object.set("type_int", 100);
    object.set("type_long", 200L);
    object.set("type_float", 300.f);
    object.set("type_double", 400.d);
    object.set("type_decimal", new BigDecimal("1234.567"));
    object.set("type_text", new Text("Hello, world!"));
    object.set("type_date", new Date(2011, 3, 31));
    object.set("type_datetime", new DateTime(2011, 3, 31, 23, 30, 1));

    DataOutputBuffer output = new DataOutputBuffer();
    try (ModelOutput<Object> modelOut = (ModelOutput<Object>) type.getAnnotation(ModelOutputLocation.class)
            .value().getDeclaredConstructor(RecordEmitter.class)
            .newInstance(new TsvEmitter(new OutputStreamWriter(output, "UTF-8")))) {
        modelOut.write(object.unwrap());
        modelOut.write(object.unwrap());
        modelOut.write(object.unwrap());
    }

    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), output.getLength());
    try (ModelInput<Object> modelIn = (ModelInput<Object>) type.getAnnotation(ModelInputLocation.class).value()
            .getDeclaredConstructor(RecordParser.class)
            .newInstance(new TsvParser(new InputStreamReader(input, "UTF-8")))) {
        ModelWrapper copy = loader.newModel("Primitives");
        modelIn.readTo(copy.unwrap());
        assertThat(object.unwrap(), equalTo(copy.unwrap()));
        assertThat(input.read(), is(-1));
    }
}

From source file:com.asakusafw.runtime.io.util.WritableTestRoot.java

License:Apache License

static byte[] ser(Writable writable) throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    writable.write(out);//  w ww . jav a  2  s  .  c  om
    byte[] results = Arrays.copyOfRange(out.getData(), 0, out.getLength());
    return results;
}

From source file:com.asakusafw.runtime.io.util.WritableTestRoot.java

License:Apache License

static byte[] ser(WritableRawComparable writable) throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    writable.write(out);/* w w w. j  a  v a2s  .  com*/
    assertThat(writable.getSizeInBytes(out.getData(), 0), is(out.getLength()));
    byte[] results = Arrays.copyOfRange(out.getData(), 0, out.getLength());
    return results;
}

From source file:com.asakusafw.runtime.stage.collector.SortableSlotTest.java

License:Apache License

static byte[] write(Writable writable) {
    DataOutputBuffer buffer = new DataOutputBuffer();
    buffer.reset();/*ww  w.j  a v  a 2s.com*/
    try {
        writable.write(buffer);
    } catch (IOException e) {
        throw new AssertionError(e);
    }
    return Arrays.copyOf(buffer.getData(), buffer.getLength());
}

From source file:com.bigjob.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*  w w w  . j ava 2 s . c  o m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainers; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}