Example usage for org.apache.hadoop.io DataOutputBuffer getLength

List of usage examples for org.apache.hadoop.io DataOutputBuffer getLength

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer getLength.

Prototype

public int getLength() 

Source Link

Document

Returns the length of the valid data currently in the buffer.

Usage

From source file:CompressionTest.java

License:Open Source License

public static void main(String[] args) throws IOException {
    DataOutputBuffer chunksBuffer = new DataOutputBuffer();
    DataOutputBuffer metasBuffer = new DataOutputBuffer();

    byte[] data = "alskjdflkajsldfkja;s".getBytes();
    chunksBuffer.write(data);/*  w  ww.j  av a  2s . c  o m*/
    System.out.println(chunksBuffer.size());
    System.out.println(chunksBuffer.getLength());
    chunksBuffer.reset();
    chunksBuffer.write(data, 0, 10);
    System.out.println(chunksBuffer.size());
    System.out.println(chunksBuffer.getLength());

}

From source file:StreamWikiDumpInputFormat.java

License:Apache License

private static byte[] writeInSequence(DataOutputBuffer[] array) {
    int size = 0;
    for (DataOutputBuffer buf : array) {
        size += buf.getLength();
    }/*from   ww w  .j  a v a2 s.  co m*/
    byte[] dest = new byte[size];
    int n = 0;
    for (DataOutputBuffer buf : array) {
        System.arraycopy(buf.getData(), 0, dest, n, buf.getLength());
        n += buf.getLength();
    }
    return dest;
}

From source file:StreamWikiDumpInputFormat.java

License:Apache License

private static void offsetWrite(DataOutputBuffer to, int fromOffset, DataOutputBuffer from) throws IOException {
    if (from.getLength() <= fromOffset || fromOffset < 0) {
        throw new IllegalArgumentException(
                String.format("invalid offset: offset=%d length=%d", fromOffset, from.getLength()));
    }/*from www  .j a v a2 s  . c  o  m*/
    byte[] bytes = new byte[from.getLength() - fromOffset];
    System.arraycopy(from.getData(), fromOffset, bytes, 0, bytes.length);
    to.reset();
    to.write(bytes);
}

From source file:StreamWikiDumpInputFormat.java

License:Apache License

private static int findIndex(byte[] match, DataOutputBuffer from_) throws IOException {
    // TODO: faster string pattern match (KMP etc)
    int m = 0;/* w w w  .  j  a v  a  2  s .  com*/
    int i;
    byte[] from = from_.getData();
    for (i = 0; i < from_.getLength(); ++i) {
        if (from[i] == match[m]) {
            ++m;
        } else {
            m = 0;
        }
        if (m == match.length) {
            return i - m + 1;
        }
    }
    // throw new IllegalArgumentException("pattern not found: " + new
    // String(match) + " in " + new String(from));
    System.err.println(
            "pattern not found: " + new String(match) + " in " + new String(from, 0, from_.getLength()));// !
    return -1;
}

From source file:ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*www  .j av a  2 s.c o m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
}

From source file:TestCodec.java

License:Open Source License

public static void main(String[] args) throws IOException {
    Configuration conf = new Configuration();
    DefaultCodec codec = new DefaultCodec();
    codec.setConf(conf);/*w  w  w.  j a va2s  . co  m*/
    DataOutputBuffer chunksWriteBuffer = new DataOutputBuffer();
    CompressionOutputStream compressionOutputStream = codec.createOutputStream(chunksWriteBuffer);

    DataInputBuffer chunkReadBuffer = new DataInputBuffer();
    CompressionInputStream compressionInputStream = codec.createInputStream(chunkReadBuffer);
    String str = "laksjldfkjalskdjfl;aksjdflkajsldkfjalksjdflkajlsdkfjlaksjdflka";
    compressionOutputStream.write(str.getBytes());
    compressionOutputStream.finish();
    byte[] data = chunksWriteBuffer.getData();
    System.out.println(str.length());
    System.out.println(chunksWriteBuffer.getLength());

    chunkReadBuffer.reset(data, chunksWriteBuffer.getLength());

    DataOutputBuffer dob = new DataOutputBuffer();
    IOUtils.copyBytes(compressionInputStream, dob, conf);
    System.out.println(dob.getData());

}

From source file:alluxio.yarn.ApplicationMaster.java

License:Apache License

/**
 * Starts the application master.//from w w w.  j  ava 2s.  c o  m
 *
 * @throws IOException if registering the application master fails due to an IO error
 * @throws YarnException if registering the application master fails due to an internal Yarn error
 */
public void start() throws IOException, YarnException {
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer credentialsBuffer = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(credentialsBuffer);
        // Now remove the AM -> RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        mAllTokens = ByteBuffer.wrap(credentialsBuffer.getData(), 0, credentialsBuffer.getLength());
    }
    mNMClient.init(mYarnConf);
    mNMClient.start();

    mRMClient.init(mYarnConf);
    mRMClient.start();

    mYarnClient.init(mYarnConf);
    mYarnClient.start();

    // Register with ResourceManager
    String hostname = NetworkAddressUtils.getLocalHostName();
    mRMClient.registerApplicationMaster(hostname, 0 /* port */, "" /* tracking url */);
    LOG.info("ApplicationMaster registered");
}

From source file:alluxio.yarn.Client.java

License:Apache License

private void setupContainerLaunchContext() throws IOException, YarnException {
    Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers",
            Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath);

    final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER,
            applicationMasterArgs);/*from  w  w w.  jav  a  2 s  .c  o m*/

    System.out.println("ApplicationMaster command: " + amCommand);
    mAmContainer.setCommands(Collections.singletonList(amCommand));

    // Setup local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("alluxio.tar.gz",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz"));
    localResources.put("alluxio-yarn-setup.sh",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh"));
    localResources.put("alluxio.jar",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar"));
    mAmContainer.setLocalResources(localResources);

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    mAmContainer.setEnvironment(appMasterEnv);

    // Set up security tokens for launching our ApplicationMaster container.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf);
        // getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        // getting yarn resource manager token
        org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig();
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
                mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)),
                ClientRMProxy.getRMDelegationTokenService(config));
        LOG.info("Added RM delegation token: " + token);
        credentials.addToken(token.getService(), token);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        mAmContainer.setTokens(buffer);
    }
}

From source file:cn.ac.ncic.mastiff.io.coding.DeltaBinaryPackingStringReader.java

License:Apache License

public byte[] CompressensureDecompressed() throws IOException {
    System.out.println("280    inBuf.length   " + inBuf.getLength());
    FlexibleEncoding.Parquet.DeltaByteArrayReader reader = new FlexibleEncoding.Parquet.DeltaByteArrayReader();
    DataOutputBuffer transfer = new DataOutputBuffer();
    //   transfer.write(inBuf.getData(), 12, inBuf.getLength()-12);
    transfer.write(inBuf.getData(), 0, inBuf.getLength());
    byte[] data = transfer.getData();
    System.out.println("286   byte [] data  " + data.length + "  numPairs  " + numPairs);
    inBuf.close();//from   www. jav a2 s  .  c o m
    Binary[] bin = new Utils().readData(reader, data, numPairs);
    System.out.println("2998   Binary[] bin   " + bin.length);
    DataOutputBuffer decoding = new DataOutputBuffer();
    DataOutputBuffer offset = new DataOutputBuffer();
    decoding.writeInt(decompressedSize);
    decoding.writeInt(numPairs);
    decoding.writeInt(startPos);
    int dataoffset = 12;
    // int dataoffset=0 ;
    String str;
    for (int i = 0; i < numPairs; i++) {
        str = bin[i].toStringUsingUTF8();
        decoding.writeUTF(str);
        //      if(i<5){
        //        System.out.println("304  bin[i]  "+str+"  decoding    "+ decoding.size());
        //      }
        dataoffset = decoding.size();
        //  decoding.writeBytes(str);
        offset.writeInt(dataoffset);
    }

    //    System.out.println("315  offset.size() "+offset.getData().length+"  decoding.szie   "+decoding.size());
    //    System.out.println("316  dataoffet   "+dataoffset);
    //  System.out.println("number  of Pairs =  "+ceshi);
    decoding.write(offset.getData(), 0, offset.size());
    inBuf.close();
    offset.close();
    System.out.println("316   decoding   " + decoding.size() + "   " + decoding.getLength()
            + " decoding.getData()   " + decoding.getData().length);
    //   inBuf.reset(decoding.getData(), 0, decoding.size());
    System.out.println(" 280  " + inBuf.getLength() + "   " + inBuf.getData().length);
    return inBuf.getData();
}

From source file:cn.ac.ncic.mastiff.io.coding.RedBlackTreeStringReader.java

License:Apache License

public byte[] CompressensureDecompressed() throws IOException {
    FlexibleEncoding.Parquet.DeltaByteArrayReader reader = new FlexibleEncoding.Parquet.DeltaByteArrayReader();
    DataOutputBuffer transfer = new DataOutputBuffer();
    //   transfer.write(inBuf.getData(), 12, inBuf.getLength()-12);
    transfer.write(inBuf.getData(), 0, inBuf.getLength());
    byte[] data = transfer.getData();
    System.out.println("286   byte [] data  " + data.length + "  numPairs  " + numPairs);
    inBuf.close();//from   ww w .j a va2  s .co m
    Binary[] bin = new Utils().readData(reader, data, numPairs);
    System.out.println("2998   Binary[] bin   " + bin.length);
    // bb = ByteBuffer.wrap(page, 0, page.length);
    //  int  count=0 ;
    DataOutputBuffer decoding = new DataOutputBuffer();
    DataOutputBuffer offset = new DataOutputBuffer();
    decoding.writeInt(decompressedSize);
    decoding.writeInt(numPairs);
    decoding.writeInt(startPos);
    int dataoffset = 12;
    // int dataoffset=0 ;
    String str;
    for (int i = 0; i < numPairs; i++) {
        str = bin[i].toStringUsingUTF8();
        decoding.writeUTF(str);
        //      if(i<5){
        //        System.out.println("304  bin[i]  "+str+"  decoding    "+ decoding.size());
        //      }
        dataoffset = decoding.size();
        offset.writeInt(dataoffset);
    }
    System.out.println("315  offset.size() " + offset.size() + "  decoding.szie   " + decoding.size());
    System.out.println("316  dataoffet   " + dataoffset);
    //  System.out.println("number  of Pairs =  "+ceshi);
    decoding.write(offset.getData(), 0, offset.size());
    inBuf.close();
    offset.close();
    System.out.println("316   decoding   " + decoding.size() + "   " + decoding.getLength()
            + " decoding.getData()   " + decoding.getData().length);
    return decoding.getData();
}