Example usage for org.apache.hadoop.security Credentials Credentials

List of usage examples for org.apache.hadoop.security Credentials Credentials

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials Credentials.

Prototype

public Credentials() 

Source Link

Document

Create an empty credentials instance.

Usage

From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java

License:Apache License

@Test
public void testGetSplits() throws IOException {
    Configuration config = new Configuration();

    boolean exceptionThrown = false;
    try {//from www.  ja v  a2s  . c  o m
        PlanUtil.getSplits(config, new Credentials());
    } catch (IllegalArgumentException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("No input specified"));
    }
    assertTrue(exceptionThrown);

    // Set db/table and make sure it works.
    config.set(ConfVars.TBL_NAME_CONF.name, "tpch.nation");
    PlanUtil.getSplits(config, new Credentials());

    // Also set input. This should fail.
    config.set(FileInputFormat.INPUT_DIR, "/test");
    exceptionThrown = false;
    try {
        PlanUtil.getSplits(config, new Credentials());
    } catch (IllegalArgumentException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage(), e.getMessage().contains("More than one input specified"));
    }
    assertTrue(exceptionThrown);

    // Unset the table and set columns. INPUT_DIR and columns don't work now.
    config.unset(ConfVars.TBL_NAME_CONF.name);
    config.setStrings(ConfVars.COL_NAMES_CONF.name, "a");
    exceptionThrown = false;
    try {
        PlanUtil.getSplits(config, new Credentials());
    } catch (IllegalArgumentException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("Column projections can only be specified with table inputs."));
    }
    assertTrue(exceptionThrown);

    // Test some cases that work
    verifyInputSplitsTable(1, 4, "tpch.nation");
    verifyInputSplitsTable(2, 12, "rs.alltypes");
    verifyInputSplitsTable(1, 1, "tpch.nation", "n_name");
    verifyInputSplitsTable(2, 3, "rs.alltypes", "int_col", "double_col", "string_col");
    verifyInputSplitsPath(1, 1, "/test-warehouse/tpch.nation");

    // Test some cases using the config utility.
    config.clear();
    RecordServiceConfig.setInputTable(config, null, "tpch.nation", "n_nationkey", "n_comment");
    verifyInputSplits(1, 2, config);

    exceptionThrown = false;
    try {
        verifyInputSplitsTable(1, 1, "tpch.nation", "bad");
    } catch (IOException e) {
        exceptionThrown = true;
        assertTrue(e.getCause() instanceof RecordServiceException);
        RecordServiceException ex = (RecordServiceException) e.getCause();
        assertEquals(RecordServiceException.ErrorCode.INVALID_REQUEST, ex.code);
    }
    assertTrue(exceptionThrown);

    exceptionThrown = false;
    try {
        verifyInputSplitsPath(1, 1, "/test-warehouse/tpch.nation,/test-warehouse/tpch.nation");
    } catch (IllegalArgumentException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("Only reading a single directory is currently supported."));
    }
    assertTrue(exceptionThrown);
}

From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java

License:Apache License

@Test
// TODO: make this generic. This should be extensible to test all the input
// formats we support. How do we do this?
public void testReadNation() throws IOException, InterruptedException {
    Configuration config = new Configuration();
    RecordServiceInputFormat.RecordServiceRecordReader reader = new RecordServiceInputFormat.RecordServiceRecordReader();

    try {/*w  w w.  j  ava 2  s .  c  o  m*/
        RecordServiceConfig.setInputTable(config, null, "tpch.nation");
        List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;
        reader.initialize(splits.get(0), new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));

        int numRows = 0;
        while (reader.nextKeyValue()) {
            RecordServiceRecord value = reader.getCurrentValue();
            ++numRows;

            if (numRows == 10) {
                assertEquals("INDONESIA", value.getColumnValue(1).toString());
            }
        }
        assertFalse(reader.nextKeyValue());
        assertFalse(reader.nextRecord());
        assertEquals(25, numRows);

        config.clear();
        RecordServiceConfig.setInputTable(config, "tpch", "nation", "n_comment");
        splits = PlanUtil.getSplits(config, new Credentials()).splits;
        reader.initialize(splits.get(0), new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
        numRows = 0;
        while (reader.nextKeyValue()) {
            RecordServiceRecord value = reader.getCurrentValue();
            if (numRows == 12) {
                assertEquals("ously. final, express gifts cajole a", value.getColumnValue(0).toString());
            }
            ++numRows;
        }
        assertEquals(25, numRows);
    } finally {
        reader.close();
    }
}

From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java

License:Apache License

@Test
public void testReadAllTypes() throws IOException, InterruptedException {
    Configuration config = new Configuration();
    RecordServiceInputFormat.RecordServiceRecordReader reader = new RecordServiceInputFormat.RecordServiceRecordReader();

    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd");
    format.setTimeZone(TimeZone.getTimeZone("GMT"));

    try {/*  ww w. j a  v  a 2  s.  co  m*/
        RecordServiceConfig.setInputTable(config, null, "rs.alltypes");
        List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;

        int numRows = 0;
        for (InputSplit split : splits) {
            reader.initialize(split, new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
            while (reader.nextKeyValue()) {
                RecordServiceRecord value = reader.getCurrentValue();
                if (((BooleanWritable) value.getColumnValue(0)).get()) {
                    assertEquals(0, ((ByteWritable) value.getColumnValue(1)).get());
                    assertEquals(1, ((ShortWritable) value.getColumnValue(2)).get());
                    assertEquals(2, ((IntWritable) value.getColumnValue(3)).get());
                    assertEquals(3, ((LongWritable) value.getColumnValue(4)).get());
                    assertEquals(4.0, ((FloatWritable) value.getColumnValue(5)).get(), 0.1);
                    assertEquals(5.0, ((DoubleWritable) value.getColumnValue(6)).get(), 0.1);
                    assertEquals("hello", value.getColumnValue(7).toString());
                    assertEquals("vchar1", value.getColumnValue(8).toString());
                    assertEquals("char1", value.getColumnValue(9).toString());
                    assertEquals("2015-01-01", format
                            .format(((TimestampNanosWritable) value.getColumnValue(10)).get().toTimeStamp()));
                    assertEquals(new BigDecimal("3.1415920000"),
                            ((DecimalWritable) value.getColumnValue(11)).get().toBigDecimal());
                } else {
                    assertEquals(6, ((ByteWritable) value.getColumnValue(1)).get());
                    assertEquals(7, ((ShortWritable) value.getColumnValue(2)).get());
                    assertEquals(8, ((IntWritable) value.getColumnValue(3)).get());
                    assertEquals(9, ((LongWritable) value.getColumnValue(4)).get());
                    assertEquals(10.0, ((FloatWritable) value.getColumnValue(5)).get(), 0.1);
                    assertEquals(11.0, ((DoubleWritable) value.getColumnValue(6)).get(), 0.1);
                    assertEquals("world", value.getColumnValue(7).toString());
                    assertEquals("vchar2", value.getColumnValue(8).toString());
                    assertEquals("char2", value.getColumnValue(9).toString());
                    assertEquals("2016-01-01", format
                            .format(((TimestampNanosWritable) value.getColumnValue(10)).get().toTimeStamp()));
                    assertEquals(new BigDecimal("1234.5678900000"),
                            ((DecimalWritable) value.getColumnValue(11)).get().toBigDecimal());
                }
                ++numRows;
            }
        }
        assertEquals(2, numRows);
    } finally {
        reader.close();
    }
}

From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java

License:Apache License

@Test
public void testReadAllTypesNull() throws IOException, InterruptedException {
    Configuration config = new Configuration();
    RecordServiceInputFormat.RecordServiceRecordReader reader = new RecordServiceInputFormat.RecordServiceRecordReader();

    try {/*from   ww  w .  j a  v a2 s.  c  o m*/
        RecordServiceConfig.setInputTable(config, null, "rs.alltypes_null");
        List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;

        int numRows = 0;
        for (InputSplit split : splits) {
            reader.initialize(split, new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
            while (reader.nextKeyValue()) {
                RecordServiceRecord value = reader.getCurrentValue();
                for (int i = 0; i < value.getSchema().getNumColumns(); ++i) {
                    assertTrue(value.getColumnValue(i) == null);
                }
                ++numRows;
            }
        }
        assertEquals(1, numRows);
    } finally {
        reader.close();
    }
}

From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java

License:Apache License

@Test
public void testCountStar() throws IOException, InterruptedException {
    Configuration config = new Configuration();
    TextInputFormat.TextRecordReader reader = new TextInputFormat.TextRecordReader();

    try {// w w  w. j  a v  a  2s  . c  o  m
        RecordServiceConfig.setInputQuery(config, "select count(*) from tpch.nation");
        List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;
        int numRows = 0;
        for (InputSplit split : splits) {
            reader.initialize(split, new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
            while (reader.nextKeyValue()) {
                ++numRows;
            }
        }
        assertEquals(25, numRows);
    } finally {
        reader.close();
    }
}

From source file:com.cloudera.recordservice.pig.HCatRSLoader.java

License:Apache License

@Override
public void setLocation(String location, Job job) throws IOException {
    HCatContext.INSTANCE.setConf(job.getConfiguration()).getConf().get()
            .setBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, true);
    UDFContext udfContext = UDFContext.getUDFContext();
    Properties udfProps = udfContext.getUDFProperties(this.getClass(), new String[] { signature });
    job.getConfiguration().set(INNER_SIGNATURE, INNER_SIGNATURE_PREFIX + "_" + signature);

    RequiredFieldList requiredFieldsInfo = (RequiredFieldList) udfProps.get(PRUNE_PROJECTION_INFO);
    // get partitionFilterString stored in the UDFContext - it would have
    // been stored there by an earlier call to setPartitionFilter
    // call setInput on HCatInputFormat only in the frontend because internally
    // it makes calls to the hcat server - we don't want these to happen in
    // the backend
    // in the hadoop front end mapred.task.id property will not be set in
    // the Configuration
    if (udfProps.containsKey(HCatConstants.HCAT_PIG_LOADER_LOCATION_SET)) {
        for (Enumeration<Object> emr = udfProps.keys(); emr.hasMoreElements();) {
            PigHCatUtil.getConfigFromUDFProperties(udfProps, job.getConfiguration(),
                    emr.nextElement().toString());
        }/*from   www.  j  av  a 2  s. co  m*/
        if (!HCatUtil.checkJobContextIfRunningFromBackend(job)) {
            //Combine credentials and credentials from job takes precedence for freshness
            Credentials crd = jobCredentials.get(INNER_SIGNATURE_PREFIX + "_" + signature);
            job.getCredentials().addAll(crd);
        }
    } else {
        Job clone = new Job(job.getConfiguration());
        HCatRSInputFormat.setInput(job, location, getPartitionFilterString());
        InputJobInfo inputJobInfo = (InputJobInfo) HCatRSUtil
                .deserialize(job.getConfiguration().get(HCatConstants.HCAT_KEY_JOB_INFO));

        // TODO: Add back special cases call when I find out where the code has moved.
        addSpecialCasesParametersForHCatLoader(job.getConfiguration(), inputJobInfo.getTableInfo());

        // We will store all the new /changed properties in the job in the
        // udf context, so the the HCatInputFormat.setInput method need not
        //be called many times.
        for (Entry<String, String> keyValue : job.getConfiguration()) {
            String oldValue = clone.getConfiguration().getRaw(keyValue.getKey());
            if ((oldValue == null) || (keyValue.getValue().equals(oldValue) == false)) {
                udfProps.put(keyValue.getKey(), keyValue.getValue());
            }
        }
        udfProps.put(HCatConstants.HCAT_PIG_LOADER_LOCATION_SET, true);
        //Store credentials in a private hash map and not the udf context to
        // make sure they are not public.
        Credentials crd = new Credentials();
        crd.addAll(job.getCredentials());
        jobCredentials.put(INNER_SIGNATURE_PREFIX + "_" + signature, crd);
        clone.setInputFormatClass(HCatRSInputFormat.class);
    }

    // Need to also push projections by calling setOutputSchema on
    // HCatInputFormat - we have to get the RequiredFields information
    // from the UdfContext, translate it to an Schema and then pass it
    // The reason we do this here is because setLocation() is called by
    // Pig runtime at InputFormat.getSplits() and
    // InputFormat.createRecordReader() time - we are not sure when
    // HCatInputFormat needs to know about pruned projections - so doing it
    // here will ensure we communicate to HCatInputFormat about pruned
    // projections at getSplits() and createRecordReader() time

    if (requiredFieldsInfo != null) {
        // convert to hcatschema and pass to HCatInputFormat
        try {
            outputSchema = phutil.getHCatSchema(requiredFieldsInfo.getFields(), signature, this.getClass());
            HCatRSInputFormat.setOutputSchema(job, outputSchema);
        } catch (Exception e) {
            throw new IOException(e);
        }
    } else {
        // else - this means pig's optimizer never invoked the pushProjection
        // method - so we need all fields and hence we should not call the
        // setOutputSchema on HCatInputFormat
        if (HCatUtil.checkJobContextIfRunningFromBackend(job)) {
            try {
                HCatSchema hcatTableSchema = (HCatSchema) udfProps.get(HCatConstants.HCAT_TABLE_SCHEMA);
                outputSchema = hcatTableSchema;
                HCatRSInputFormat.setOutputSchema(job, outputSchema);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("outputSchema=" + outputSchema);
    }
    job.setInputFormatClass(HCatRSInputFormat.class);
}

From source file:com.continuuity.weave.yarn.YarnSecureStore.java

License:Apache License

public static SecureStore create() {
    return create(new Credentials());
}

From source file:com.datatorrent.stram.LaunchContainerRunnable.java

License:Apache License

public static ByteBuffer getTokens(UserGroupInformation ugi,
        Token<StramDelegationTokenIdentifier> delegationToken) {
    try {//from  www. j a  v a2s.  c  o  m
        Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
        Credentials credentials = new Credentials();
        for (Token<? extends TokenIdentifier> token : tokens) {
            if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                credentials.addToken(token.getService(), token);
                LOG.info("Passing container token {}", token);
            }
        }
        credentials.addToken(delegationToken.getService(), delegationToken);
        DataOutputBuffer dataOutput = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dataOutput);
        byte[] tokenBytes = dataOutput.getData();
        ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes);
        return cTokenBuf.duplicate();
    } catch (IOException e) {
        throw new RuntimeException("Error generating delegation token", e);
    }
}

From source file:com.datatorrent.stram.security.StramUserLogin.java

License:Apache License

public static long refreshTokens(long tokenLifeTime, String destinationDir, String destinationFile,
        final Configuration conf, String hdfsKeyTabFile, final Credentials credentials,
        final InetSocketAddress rmAddress, final boolean renewRMToken) throws IOException {
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    //renew tokens
    final String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
    }//  w w  w . j a  v a2s  . co m
    FileSystem fs = FileSystem.newInstance(conf);
    File keyTabFile;
    try {
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);
    } finally {
        fs.close();
    }
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            UserGroupInformation.getCurrentUser().getUserName(), keyTabFile.getAbsolutePath());
    try {
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                FileSystem fs1 = FileSystem.newInstance(conf);
                YarnClient yarnClient = null;
                if (renewRMToken) {
                    yarnClient = YarnClient.createYarnClient();
                    yarnClient.init(conf);
                    yarnClient.start();
                }
                Credentials creds = new Credentials();
                try {
                    fs1.addDelegationTokens(tokenRenewer, creds);
                    if (renewRMToken) {
                        org.apache.hadoop.yarn.api.records.Token rmDelToken = yarnClient
                                .getRMDelegationToken(new Text(tokenRenewer));
                        Token<RMDelegationTokenIdentifier> rmToken = ConverterUtils.convertFromYarn(rmDelToken,
                                rmAddress);
                        creds.addToken(rmToken.getService(), rmToken);
                    }
                } finally {
                    fs1.close();
                    if (renewRMToken) {
                        yarnClient.stop();
                    }
                }
                credentials.addAll(creds);
                return null;
            }
        });
        UserGroupInformation.getCurrentUser().addCredentials(credentials);
    } catch (InterruptedException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    } catch (IOException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    }
    LOG.debug("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("updated token: {}", token);
    }
    keyTabFile.delete();
    return expiryTime;
}

From source file:com.datatorrent.stram.StramClient.java

License:Apache License

/**
 * Launch application for the dag represented by this client.
 *
 * @throws YarnException/*ww  w.j a v  a  2s  .  com*/
 * @throws IOException
 */
public void startApplication() throws YarnException, IOException {
    Class<?>[] defaultClasses;

    if (applicationType.equals(YARN_APPLICATION_TYPE)) {
        //TODO restrict the security check to only check if security is enabled for webservices.
        if (UserGroupInformation.isSecurityEnabled()) {
            defaultClasses = DATATORRENT_SECURITY_CLASSES;
        } else {
            defaultClasses = DATATORRENT_CLASSES;
        }
    } else {
        throw new IllegalStateException(applicationType + " is not a valid application type.");
    }

    LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses);

    if (resources != null) {
        localJarFiles.addAll(resources);
    }

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    //GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
    //GetClusterNodesResponse clusterNodesResp = rmClient.clientRM.getClusterNodes(clusterNodesReq);
    //LOG.info("Got Cluster node info from ASM");
    //for (NodeReport node : clusterNodesResp.getNodeReports()) {
    //  LOG.info("Got node report from ASM for"
    //           + ", nodeId=" + node.getNodeId()
    //           + ", nodeAddress" + node.getHttpAddress()
    //           + ", nodeRackName" + node.getRackName()
    //           + ", nodeNumContainers" + node.getNumContainers()
    //           + ", nodeHealthStatus" + node.getHealthReport());
    //}
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication newApp = yarnClient.createApplication();
    appId = newApp.getNewApplicationResponse().getApplicationId();

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
    int amMemory = dag.getMasterMemoryMB();
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) {
        dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString());
    }

    // Create launch context for app master
    LOG.info("Setting up application submission context for ASM");
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    // set the application id
    appContext.setApplicationId(appId);
    // set the application name
    appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME));
    appContext.setApplicationType(this.applicationType);
    if (YARN_APPLICATION_TYPE.equals(this.applicationType)) {
        //appContext.setMaxAppAttempts(1); // no retries until Stram is HA
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Setup security tokens
    // If security is enabled get ResourceManager and NameNode delegation tokens.
    // Set these tokens on the container so that they are sent as part of application submission.
    // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken
    // is also used by ResourceManager to fetch the jars from HDFS and set them up for the
    // application master launch.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
        try {
            final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
            if (tokens != null) {
                for (Token<?> token : tokens) {
                    LOG.info("Got dt for " + fs.getUri() + "; " + token);
                }
            }
        } finally {
            fs.close();
        }

        addRMDelegationToken(tokenRenewer, credentials);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // copy required jar files to dfs, to be localized for containers
    FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
    try {
        Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS);
        Path appPath = new Path(appsBasePath, appId.toString());

        String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {}));

        LOG.info("libjars: {}", libJarsCsv);
        dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv);
        LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, libJarsCsv, localResources,
                fs);

        if (archives != null) {
            String[] localFiles = archives.split(",");
            String archivesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("archives: {}", archivesCsv);
            dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.ARCHIVE, archivesCsv,
                    localResources, fs);
        }

        if (files != null) {
            String[] localFiles = files.split(",");
            String filesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("files: {}", filesCsv);
            dag.getAttributes().put(LogicalPlan.FILES, filesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, filesCsv, localResources,
                    fs);
        }

        dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString());
        if (dag.getAttributes()
                .get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */
            Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS);
            // use conf client side to pickup any proxy settings from dt-site.xml
            dag.setAttribute(OperatorContext.STORAGE_AGENT,
                    new FSStorageAgent(checkpointPath.toString(), conf));
        }
        if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) {
            dag.setAttribute(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator());
        }

        // Set the log4j properties if needed
        if (!log4jPropFile.isEmpty()) {
            Path log4jSrc = new Path(log4jPropFile);
            Path log4jDst = new Path(appPath, "log4j.props");
            fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
            FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
            LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
            log4jRsrc.setType(LocalResourceType.FILE);
            log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
            log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
            log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
            log4jRsrc.setSize(log4jFileStatus.getLen());
            localResources.put("log4j.properties", log4jRsrc);
        }

        if (originalAppId != null) {
            Path origAppPath = new Path(appsBasePath, this.originalAppId);
            LOG.info("Restart from {}", origAppPath);
            copyInitialState(origAppPath);
        }

        // push logical plan to DFS location
        Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME);
        FSDataOutputStream outStream = fs.create(cfgDst, true);
        LogicalPlan.write(this.dag, outStream);
        outStream.close();

        Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
        outStream = fs.create(launchConfigDst, true);
        conf.writeXml(outStream);
        outStream.close();

        FileStatus topologyFileStatus = fs.getFileStatus(cfgDst);
        LocalResource topologyRsrc = Records.newRecord(LocalResource.class);
        topologyRsrc.setType(LocalResourceType.FILE);
        topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri()));
        topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime());
        topologyRsrc.setSize(topologyFileStatus.getLen());
        localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc);

        // Set local resource info into app master container launch context
        amContainer.setLocalResources(localResources);

        // Set the necessary security tokens as needed
        //amContainer.setContainerTokens(containerToken);
        // Set the env variables to be setup in the env where the application master will be run
        LOG.info("Set the environment for the application master");
        Map<String, String> env = new HashMap<String, String>();

        // Add application jar(s) location to classpath
        // At some point we should not be required to add
        // the hadoop specific classpaths to the env.
        // It should be provided out of the box.
        // For now setting all required classpaths including
        // the classpath to "." for the application jar(s)
        // including ${CLASSPATH} will duplicate the class path in app master, removing it for now
        //StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*");
        StringBuilder classPathEnv = new StringBuilder("./*");
        String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
        for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH
                : classpath.split(",")) {
            if (c.equals("$HADOOP_CLIENT_CONF_DIR")) {
                // SPOI-2501
                continue;
            }
            classPathEnv.append(':');
            classPathEnv.append(c.trim());
        }
        env.put("CLASSPATH", classPathEnv.toString());
        // propagate to replace node managers user name (effective in non-secure mode)
        env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());

        amContainer.setEnvironment(env);

        // Set the necessary command to execute the application master
        ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30);

        // Set java executable command
        LOG.info("Setting up app master command");
        vargs.add(javaCmd);
        if (dag.isDebug()) {
            vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n");
        }
        // Set Xmx based on am memory size
        // default heap size 75% of total memory
        if (dag.getMasterJVMOptions() != null) {
            vargs.add(dag.getMasterJVMOptions());
        }
        vargs.add("-Xmx" + (amMemory * 3 / 4) + "m");
        vargs.add("-XX:+HeapDumpOnOutOfMemoryError");
        vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin");
        vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA");
        vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
        vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath()));
        if (dag.isDebug()) {
            vargs.add("-Dlog4j.debug=true");
        }

        String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL);
        if (loggersLevel != null) {
            vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel));
        }
        vargs.add(StreamingAppMaster.class.getName());
        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

        // Get final command
        StringBuilder command = new StringBuilder(9 * vargs.size());
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        LOG.info("Completed setting up app master command " + command.toString());
        List<String> commands = new ArrayList<String>();
        commands.add(command.toString());
        amContainer.setCommands(commands);

        // Set up resource type requirements
        // For now, only memory is supported so we set memory requirements
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(amMemory);
        appContext.setResource(capability);

        // Service data is a binary blob that can be passed to the application
        // Not needed in this scenario
        // amContainer.setServiceData(serviceData);
        appContext.setAMContainerSpec(amContainer);

        // Set the priority for the application master
        Priority pri = Records.newRecord(Priority.class);
        pri.setPriority(amPriority);
        appContext.setPriority(pri);
        // Set the queue to which this application is to be submitted in the RM
        appContext.setQueue(queueName);

        // Submit the application to the applications manager
        // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest);
        // Ignore the response as either a valid response object is returned on success
        // or an exception thrown to denote some form of a failure
        String specStr = Objects.toStringHelper("Submitting application: ")
                .add("name", appContext.getApplicationName()).add("queue", appContext.getQueue())
                .add("user", UserGroupInformation.getLoginUser()).add("resource", appContext.getResource())
                .toString();
        LOG.info(specStr);
        if (dag.isDebug()) {
            //LOG.info("Full submission context: " + appContext);
        }
        yarnClient.submitApplication(appContext);
    } finally {
        fs.close();
    }
}