Example usage for org.apache.hadoop.io DataOutputBuffer getLength

List of usage examples for org.apache.hadoop.io DataOutputBuffer getLength

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer getLength.

Prototype

public int getLength() 

Source Link

Document

Returns the length of the valid data currently in the buffer.

Usage

From source file:crunch.MaxTemperature.java

License:Apache License

private void check(String options, Text l1, Text l2, int c) throws IOException {
        JobConf conf = new JobConf();
        conf.setKeyFieldComparatorOptions(options);
        KeyFieldBasedComparator comp = new KeyFieldBasedComparator();
        comp.configure(conf);/*  w ww.  ja v a  2s . c  o  m*/

        DataOutputBuffer out1 = serialize(l1);
        DataOutputBuffer out2 = serialize(l2);
        assertThat(options, comp.compare(out1.getData(), 0, out1.getLength(), out2.getData(), 0, out2.getLength()),
                is(c));
    }

From source file:edu.cmu.graphchi.toolkits.collaborative_filtering.yarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from   w  w  w .  j  a v a  2 s. c om
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    yarnClient.start();
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    amRMClient = AMRMClient.createAMRMClient();
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);

    //TODO: Figure out how to do this.
    List<NodeReport> reports = this.yarnClient.getNodeReports();
    LOG.info("Cluster Status");
    List<Resource> availableResources = new ArrayList<Resource>();
    for (NodeReport nr : reports) {
        LOG.info("    NodeId: " + nr.getNodeId() + " Capabilities " + nr.getCapability() + " Used Resources "
                + nr.getUsed());
        int availableMem = nr.getCapability().getMemory() - nr.getUsed().getMemory();
        int availableVCores = nr.getCapability().getVirtualCores() - nr.getUsed().getVirtualCores();
        Resource resource = Resource.newInstance(availableMem, availableVCores);

        availableResources.add(resource);
    }

    /*Based on available resources scheduler should decide the best allocation of recommenders to resources
    and return a list of resources that should be requested from the ResourceManager*/
    DataSetDescription datasetDesc = new DataSetDescription(this.setup.dataMetadataFile);
    List<RecommenderPool> recommenderPools = RecommenderScheduler.splitRecommenderPool(availableResources,
            recommenders, datasetDesc, this.setup.nShards);

    for (RecommenderPool res : recommenderPools) {
        ContainerRequest containerAsk = setupContainerAskForRM(res.getTotalMemory(), requestPriority);
        LOG.info("CONTAINER ASK: " + containerAsk);
        amRMClient.addContainerRequest(containerAsk);
    }

    float progress = 0;

    List<RecommenderPool> pendingPools = new ArrayList<RecommenderPool>();
    for (RecommenderPool p : recommenderPools)
        pendingPools.add(p);

    this.numTotalContainers = recommenderPools.size();
    this.numCompletedContainers.set(0);
    this.numAllocatedContainers.set(0);

    while (numCompletedContainers.get() != numTotalContainers) {
        try {
            Thread.sleep(200);

            AllocateResponse allocResp = amRMClient.allocate(progress);
            List<Container> newContainers = allocResp.getAllocatedContainers();
            List<ContainerStatus> completedContainers = allocResp.getCompletedContainersStatuses();

            if (this.numAllocatedContainers.get() >= this.numTotalContainers && pendingPools.size() != 0) {
                //Ask for new containers for pending pools
                LOG.warn("The number of allocated containers has exceeded number of total containers, but "
                        + "the pending pool size is still not 0. Asking for new containers from RM");
                for (RecommenderPool res : pendingPools) {
                    ContainerRequest containerAsk = setupContainerAskForRM(res.getTotalMemory(),
                            requestPriority);
                    LOG.info("NEW CONTAINER ASK: " + containerAsk);
                    amRMClient.addContainerRequest(containerAsk);
                }

            }

            if (newContainers.size() > 0) {
                LOG.info("Allocated " + newContainers.size() + " new containers");
                numAllocatedContainers.addAndGet(newContainers.size());
                for (Container container : newContainers) {
                    //Find matching recommender pool from pendingRecommender pools.
                    RecommenderPool pool = null;
                    for (RecommenderPool p : pendingPools) {
                        if (p.getTotalMemory() == container.getResource().getMemory()) {
                            pool = p;
                            break;
                        }
                    }
                    if (pool == null) {
                        LOG.warn("No Takers for Container " + container + " Releasing container");
                        amRMClient.releaseAssignedContainer(container.getId());
                    } else {
                        startContainer(container, pool);
                        //This pool has now got a container. Remove it from pending pools
                        pendingPools.remove(pool);
                    }
                }

            }

            onContainersCompleted(completedContainers);

        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}

From source file:eu.stratosphere.yarn.ApplicationMaster.java

License:Apache License

private void run() throws Exception {
    //Utils.logFilesInCurrentDirectory(LOG);
    // Initialize clients to ResourceManager and NodeManagers
    Configuration conf = Utils.initializeYarnConfiguration();
    FileSystem fs = FileSystem.get(conf);
    Map<String, String> envs = System.getenv();
    final String currDir = envs.get(Environment.PWD.key());
    final String logDirs = envs.get(Environment.LOG_DIRS.key());
    final String ownHostname = envs.get(Environment.NM_HOST.key());
    final String appId = envs.get(Client.ENV_APP_ID);
    final String clientHomeDir = envs.get(Client.ENV_CLIENT_HOME_DIR);
    final String applicationMasterHost = envs.get(Environment.NM_HOST.key());
    final String remoteStratosphereJarPath = envs.get(Client.STRATOSPHERE_JAR_PATH);
    final String shipListString = envs.get(Client.ENV_CLIENT_SHIP_FILES);
    final String yarnClientUsername = envs.get(Client.ENV_CLIENT_USERNAME);
    final int taskManagerCount = Integer.valueOf(envs.get(Client.ENV_TM_COUNT));
    final int memoryPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_MEMORY));
    final int coresPerTaskManager = Integer.valueOf(envs.get(Client.ENV_TM_CORES));

    int heapLimit = Utils.calculateHeapSize(memoryPerTaskManager);

    if (currDir == null) {
        throw new RuntimeException("Current directory unknown");
    }/* www .ja v a2 s  . co m*/
    if (ownHostname == null) {
        throw new RuntimeException("Own hostname (" + Environment.NM_HOST + ") not set.");
    }
    LOG.info("Working directory " + currDir);

    // load Stratosphere configuration.
    Utils.getStratosphereConfiguration(currDir);

    final String localWebInterfaceDir = currDir + "/resources/"
            + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME;

    // Update yaml conf -> set jobManager address to this machine's address.
    FileInputStream fis = new FileInputStream(currDir + "/stratosphere-conf.yaml");
    BufferedReader br = new BufferedReader(new InputStreamReader(fis));
    Writer output = new BufferedWriter(new FileWriter(currDir + "/stratosphere-conf-modified.yaml"));
    String line;
    while ((line = br.readLine()) != null) {
        if (line.contains(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
        } else if (line.contains(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY)) {
            output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + "\n");
        } else {
            output.append(line + "\n");
        }
    }
    // just to make sure.
    output.append(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY + ": " + ownHostname + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_ROOT_PATH_KEY + ": " + localWebInterfaceDir + "\n");
    output.append(ConfigConstants.JOB_MANAGER_WEB_LOG_PATH_KEY + ": " + logDirs + "\n");
    output.close();
    br.close();
    File newConf = new File(currDir + "/stratosphere-conf-modified.yaml");
    if (!newConf.exists()) {
        LOG.warn("modified yaml does not exist!");
    }

    Utils.copyJarContents("resources/" + ConfigConstants.DEFAULT_JOB_MANAGER_WEB_PATH_NAME,
            ApplicationMaster.class.getProtectionDomain().getCodeSource().getLocation().getPath());

    JobManager jm;
    {
        String pathToNepheleConfig = currDir + "/stratosphere-conf-modified.yaml";
        String[] args = { "-executionMode", "cluster", "-configDir", pathToNepheleConfig };

        // start the job manager
        jm = JobManager.initialize(args);

        // Start info server for jobmanager
        jm.startInfoServer();
    }

    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();

    NMClient nmClient = NMClient.createNMClient();
    nmClient.init(conf);
    nmClient.start();

    // Register with ResourceManager
    LOG.info("registering ApplicationMaster");
    rmClient.registerApplicationMaster(applicationMasterHost, 0, "http://" + applicationMasterHost + ":"
            + GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_WEB_PORT_KEY, "undefined"));

    // Priority for worker containers - priorities are intra-application
    Priority priority = Records.newRecord(Priority.class);
    priority.setPriority(0);

    // Resource requirements for worker containers
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(memoryPerTaskManager);
    capability.setVirtualCores(coresPerTaskManager);

    // Make container requests to ResourceManager
    for (int i = 0; i < taskManagerCount; ++i) {
        ContainerRequest containerAsk = new ContainerRequest(capability, null, null, priority);
        LOG.info("Requesting TaskManager container " + i);
        rmClient.addContainerRequest(containerAsk);
    }

    LocalResource stratosphereJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);

    // register Stratosphere Jar with remote HDFS
    final Path remoteJarPath = new Path(remoteStratosphereJarPath);
    Utils.registerLocalResource(fs, remoteJarPath, stratosphereJar);

    // register conf with local fs.
    Path remoteConfPath = Utils.setupLocalResource(conf, fs, appId,
            new Path("file://" + currDir + "/stratosphere-conf-modified.yaml"), stratosphereConf,
            new Path(clientHomeDir));
    LOG.info("Prepared localresource for modified yaml: " + stratosphereConf);

    boolean hasLog4j = new File(currDir + "/log4j.properties").exists();
    // prepare the files to ship
    LocalResource[] remoteShipRsc = null;
    String[] remoteShipPaths = shipListString.split(",");
    if (!shipListString.isEmpty()) {
        remoteShipRsc = new LocalResource[remoteShipPaths.length];
        { // scope for i
            int i = 0;
            for (String remoteShipPathStr : remoteShipPaths) {
                if (remoteShipPathStr == null || remoteShipPathStr.isEmpty()) {
                    continue;
                }
                remoteShipRsc[i] = Records.newRecord(LocalResource.class);
                Path remoteShipPath = new Path(remoteShipPathStr);
                Utils.registerLocalResource(fs, remoteShipPath, remoteShipRsc[i]);
                i++;
            }
        }
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Obtain allocated containers and launch
    int allocatedContainers = 0;
    int completedContainers = 0;
    while (allocatedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(0);
        for (Container container : response.getAllocatedContainers()) {
            LOG.info("Got new Container for TM " + container.getId() + " on host "
                    + container.getNodeId().getHost());
            ++allocatedContainers;

            // Launch container by create ContainerLaunchContext
            ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

            String tmCommand = "$JAVA_HOME/bin/java -Xmx" + heapLimit + "m " + javaOpts;
            if (hasLog4j) {
                tmCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                        + "/taskmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
            }
            tmCommand += " eu.stratosphere.yarn.YarnTaskManagerRunner -configDir . " + " 1>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stdout.log" + " 2>"
                    + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/taskmanager-stderr.log";
            ctx.setCommands(Collections.singletonList(tmCommand));

            LOG.info("Starting TM with command=" + tmCommand);

            // copy resources to the TaskManagers.
            Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
            localResources.put("stratosphere.jar", stratosphereJar);
            localResources.put("stratosphere-conf.yaml", stratosphereConf);

            // add ship resources
            if (!shipListString.isEmpty()) {
                Preconditions.checkNotNull(remoteShipRsc);
                for (int i = 0; i < remoteShipPaths.length; i++) {
                    localResources.put(new Path(remoteShipPaths[i]).getName(), remoteShipRsc[i]);
                }
            }

            ctx.setLocalResources(localResources);

            // Setup CLASSPATH for Container (=TaskTracker)
            Map<String, String> containerEnv = new HashMap<String, String>();
            Utils.setupEnv(conf, containerEnv); //add stratosphere.jar to class path.
            containerEnv.put(Client.ENV_CLIENT_USERNAME, yarnClientUsername);

            ctx.setEnvironment(containerEnv);

            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            try {
                Credentials credentials = user.getCredentials();
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.writeTokenStorageToStream(dob);
                ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
                ctx.setTokens(securityTokens);
            } catch (IOException e) {
                LOG.warn("Getting current user info failed when trying to launch the container"
                        + e.getMessage());
            }

            LOG.info("Launching container " + allocatedContainers);
            nmClient.startContainer(container, ctx);
        }
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container (while allocating) " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(100);
    }

    // Now wait for containers to complete

    while (completedContainers < taskManagerCount) {
        AllocateResponse response = rmClient.allocate(completedContainers / taskManagerCount);
        for (ContainerStatus status : response.getCompletedContainersStatuses()) {
            ++completedContainers;
            LOG.info("Completed container " + status.getContainerId() + ". Total Completed:"
                    + completedContainers);
            LOG.info("Diagnostics " + status.getDiagnostics());
        }
        Thread.sleep(5000);
    }
    LOG.info("Shutting down JobManager");
    jm.shutdown();

    // Un-register with ResourceManager
    rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "", "");

}

From source file:eu.stratosphere.yarn.Utils.java

License:Apache License

public static void setTokensFor(ContainerLaunchContext amContainer, Path[] paths, Configuration conf)
        throws IOException {
    Credentials credentials = new Credentials();
    // for HDFS//from   ww w. j ava 2  s .  c  om
    TokenCache.obtainTokensForNamenodes(credentials, paths, conf);
    // for user
    UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

    Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.info("Adding user token " + id + " with " + token);
        credentials.addToken(id, token);
    }
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());

    ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    amContainer.setTokens(securityTokens);
}

From source file:FormatStorage.Unit.java

License:Open Source License

public void transfer(long newOffset) throws Exception {
    long adjust = newOffset - offset;

    boolean VAR = segment.formatData.isVar();
    if (VAR) {//  w w  w.j a v  a 2s . com
        if (!compressed) {
            int tnum = ((DataOutputBuffer) metasBuffer).getLength() / ConstVar.Sizeof_Long;
            if (tnum != recordNum) {
                throw new SEException.InnerException("tnum != recordNum");
            }

            DataOutputBuffer tmpOuputBuffer = new DataOutputBuffer();
            DataInputBuffer tmpinput = new DataInputBuffer();
            tmpinput.reset(((DataOutputBuffer) metasBuffer).getData(), 0,
                    ((DataOutputBuffer) metasBuffer).getLength());
            for (int i = 0; i < recordNum; i++) {
                long value = tmpinput.readLong() + adjust;
                tmpOuputBuffer.writeLong(value);
            }

            tmpinput.reset(tmpOuputBuffer.getData(), 0, tmpOuputBuffer.getLength());
            ((DataOutputBuffer) metasBuffer).reset();
            for (int i = 0; i < recordNum; i++) {
                ((DataOutputBuffer) metasBuffer).writeLong(tmpinput.readLong());
            }

            tmpOuputBuffer = null;
            tmpinput = null;
        } else {
            compressedMetasOutput.finish();

            InputStream tmpMetasInputStream = new DataInputBuffer();
            ((DataInputBuffer) tmpMetasInputStream).reset(((DataOutputBuffer) metasBuffer).getData(), 0,
                    ((DataOutputBuffer) metasBuffer).getLength());
            CompressionInputStream tmpCompressedMetasInput = codec.createInputStream(tmpMetasInputStream);

            DataOutputBuffer tmpOutputBuffer = new DataOutputBuffer();
            for (int i = 0; i < recordNum; i++) {
                int count = 0;
                try {
                    count = tmpCompressedMetasInput.read(metaOffsetBytes, 0, ConstVar.Sizeof_Long);
                    long meta = Util.bytes2long(metaOffsetBytes, 0, ConstVar.Sizeof_Long) + adjust;

                    tmpOutputBuffer.writeLong(meta);

                } catch (Exception e) {
                    e.printStackTrace();
                    System.out.println("i:" + i + ",count:" + count);

                    throw e;
                }
            }

            ((DataOutputBuffer) metasBuffer).reset();
            compressedMetasOutput.resetState();

            DataInputBuffer tmpInputBuffer = new DataInputBuffer();
            tmpInputBuffer.reset(tmpOutputBuffer.getData(), 0, tmpOutputBuffer.getLength());
            for (int i = 0; i < recordNum; i++) {
                long newMeta = tmpInputBuffer.readLong();
                Util.long2bytes(metaOffsetBytes, newMeta);
                compressedMetasOutput.write(metaOffsetBytes, 0, ConstVar.Sizeof_Long);
            }
        }
    }

    metaOffset += adjust;
    setOffset(newOffset);
}

From source file:gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer");
    }/*from   w w w.  j av a  2 s. c om*/

    // For now, only getting tokens for the default file-system.
    Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials);
    if (tokens != null) {
        for (Token<?> token : tokens) {
            LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token);
        }
    }

    Closer closer = Closer.create();
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);
        ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
        containerLaunchContext.setTokens(fsTokens);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:gobblin.yarn.YarnService.java

License:Apache License

private ByteBuffer getSecurityTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    Closer closer = Closer.create();/*w  w w  . j a v a2 s.  com*/
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);

        // Remove the AM->RM token so that containers cannot access it
        Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
        while (tokenIterator.hasNext()) {
            Token<?> token = tokenIterator.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                tokenIterator.remove();
            }
        }

        return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:hadoop.yarn.distributedshell.DshellClient.java

License:Apache License

/**
 * Main run function for the client/*from w  ww.  j  a  v  a 2 s . c o m*/
 * 
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask
    // if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource
    // manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null);

    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed.
    // To do this, we need to first copy into the filesystem that is visible
    // to the yarn framework.
    // We do not need to set this as a local resource for the application
    // master as the application master does not need it.
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH;
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }

    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand);
    }

    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources,
                StringUtils.join(shellArgs, " "));
    }
    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct
    // local resource for the
    // eventual containers that will be launched to execute the shell
    // scripts
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));

    // ========================================jar?
    if (containerJarPaths.length != 0) {
        for (int i = 0; i < containerJarPaths.length; i++) {
            String hdfsJarLocation = "";
            String[] jarNameSplit = containerJarPaths[i].split("/");
            String jarName = jarNameSplit[jarNameSplit.length - 1];

            long hdfsJarLen = 0;
            long hdfsJarTimestamp = 0;
            if (!containerJarPaths[i].isEmpty()) {
                Path jarSrc = new Path(containerJarPaths[i]);
                String jarPathSuffix = appName + "/" + appId.toString() + "/" + jarName;
                Path jarDst = new Path(fs.getHomeDirectory(), jarPathSuffix);
                fs.copyFromLocalFile(false, true, jarSrc, jarDst);
                hdfsJarLocation = jarDst.toUri().toString();
                FileStatus jarFileStatus = fs.getFileStatus(jarDst);
                hdfsJarLen = jarFileStatus.getLen();
                hdfsJarTimestamp = jarFileStatus.getModificationTime();
                env.put(DshellDSConstants.DISTRIBUTEDJARLOCATION + i, hdfsJarLocation);
                env.put(DshellDSConstants.DISTRIBUTEDJARTIMESTAMP + i, Long.toString(hdfsJarTimestamp));
                env.put(DshellDSConstants.DISTRIBUTEDJARLEN + i, Long.toString(hdfsJarLen));
            }
        }
    }
    // ========================================jar?

    // ========================================archive?
    if (containerArchivePaths.length != 0) {
        for (int i = 0; i < containerArchivePaths.length; i++) {
            String hdfsArchiveLocation = "";
            String[] archiveNameSplit = containerArchivePaths[i].split("/");
            String archiveName = archiveNameSplit[archiveNameSplit.length - 1];

            long hdfsArchiveLen = 0;
            long hdfsArchiveTimestamp = 0;
            if (!containerArchivePaths[i].isEmpty()) {
                Path archiveSrc = new Path(containerArchivePaths[i]);
                String archivePathSuffix = appName + "/" + appId.toString() + "/" + archiveName;
                Path archiveDst = new Path(fs.getHomeDirectory(), archivePathSuffix);
                fs.copyFromLocalFile(false, true, archiveSrc, archiveDst);
                hdfsArchiveLocation = archiveDst.toUri().toString();
                FileStatus archiveFileStatus = fs.getFileStatus(archiveDst);
                hdfsArchiveLen = archiveFileStatus.getLen();
                hdfsArchiveTimestamp = archiveFileStatus.getModificationTime();
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVELOCATION + i, hdfsArchiveLocation);
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVETIMESTAMP + i, Long.toString(hdfsArchiveTimestamp));
                env.put(DshellDSConstants.DISTRIBUTEDARCHIVELEN + i, Long.toString(hdfsArchiveLen));
            }
        }
    }
    // ========================================archive?

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(shellCmdPriority));

    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and
    // vcores requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    capability.setVirtualCores(amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:io.github.dlmarion.clowncar.hdfs.TestBloscCompressorDecompressor.java

License:Apache License

@Test
public void testCompressorDecompressorLogicWithCompressionStreams() {
    DataOutputStream deflateOut = null;
    DataInputStream inflateIn = null;
    int BYTE_SIZE = 1024 * 100;
    byte[] bytes = generate(BYTE_SIZE);
    int bufferSize = 262144;
    int compressionOverhead = (bufferSize / 6) + 32;
    try {/* w ww  .j  av a2s.  c  om*/
        Configuration conf = new Configuration(false);
        conf.set(BloscCompressor.COMPRESSOR_NAME_KEY, compressor);
        conf.set(BloscCompressor.COMPRESSION_LEVEL_KEY, Integer.toString(level));
        conf.set(BloscCompressor.BYTES_FOR_TYPE_KEY, Integer.toString(Integer.BYTES));
        conf.set(BloscCompressor.SHUFFLE_TYPE_KEY, Integer.toString(shuffle));
        conf.set(BloscCompressor.NUM_THREADS_KEY, Integer.toString(threads));
        DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
        CompressionOutputStream deflateFilter = new BlockCompressorStream(compressedDataBuffer,
                new BloscCompressor(bufferSize, conf), bufferSize, compressionOverhead);
        deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
        deflateOut.write(bytes, 0, bytes.length);
        deflateOut.flush();
        deflateFilter.finish();

        DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
        deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength());

        CompressionInputStream inflateFilter = new BlockDecompressorStream(deCompressedDataBuffer,
                new BloscDecompressor(bufferSize), bufferSize);

        inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));

        byte[] result = new byte[BYTE_SIZE];
        inflateIn.read(result);

        assertArrayEquals("original array not equals compress/decompressed array", result, bytes);
    } catch (IOException e) {
        e.printStackTrace();
        fail("testBloscCompressorDecopressorLogicWithCompressionStreams ex error !!!");
    } finally {
        try {
            if (deflateOut != null)
                deflateOut.close();
            if (inflateIn != null)
                inflateIn.close();
        } catch (Exception e) {
        }
    }
}

From source file:io.hops.ha.common.TransactionStateImpl.java

License:Apache License

private void persistAppAttempt() throws IOException {
    if (!appAttempts.isEmpty()) {
        ApplicationAttemptStateDataAccess DA = (ApplicationAttemptStateDataAccess) RMStorageFactory
                .getDataAccess(ApplicationAttemptStateDataAccess.class);
        List<ApplicationAttemptState> toAdd = new ArrayList<ApplicationAttemptState>();
        for (String appAttemptIdStr : appAttempts.keySet()) {
            RMAppAttempt appAttempt = appAttempts.get(appAttemptIdStr);
            String appIdStr = appAttempt.getAppAttemptId().getApplicationId().toString();

            Credentials credentials = appAttempt.getCredentials();
            ByteBuffer appAttemptTokens = null;

            if (credentials != null) {
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.writeTokenStorageToStream(dob);
                appAttemptTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            }//from   w  w w. ja v a2  s.c  o m
            ApplicationAttemptStateDataPBImpl attemptStateData = (ApplicationAttemptStateDataPBImpl) ApplicationAttemptStateDataPBImpl
                    .newApplicationAttemptStateData(appAttempt.getAppAttemptId(),
                            appAttempt.getMasterContainer(), appAttemptTokens, appAttempt.getStartTime(),
                            appAttempt.getState(), appAttempt.getOriginalTrackingUrl(),
                            appAttempt.getDiagnostics(), appAttempt.getFinalApplicationStatus(),
                            appAttempt.getRanNodes(), appAttempt.getJustFinishedContainers(),
                            appAttempt.getProgress(), appAttempt.getHost(), appAttempt.getRpcPort());

            byte[] attemptIdByteArray = attemptStateData.getProto().toByteArray();
            LOG.debug("adding appAttempt : " + appAttempt.getAppAttemptId() + " with state "
                    + appAttempt.getState());
            toAdd.add(new ApplicationAttemptState(appIdStr, appAttemptIdStr, attemptIdByteArray,
                    appAttempt.getHost(), appAttempt.getRpcPort(), appAttemptTokens,
                    appAttempt.getTrackingUrl()));
        }
        DA.addAll(toAdd);
    }
}