Example usage for org.apache.hadoop.security Credentials addToken

List of usage examples for org.apache.hadoop.security Credentials addToken

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials addToken.

Prototype

public void addToken(Text alias, Token<? extends TokenIdentifier> t) 

Source Link

Document

Add a token in the storage (in memory).

Usage

From source file:org.apache.tez.dag.app.rm.container.TestAMContainer.java

License:Apache License

@SuppressWarnings("unchecked")
@Test(timeout = 5000)/*w w  w .  java2  s .  co m*/
public void testCredentialsTransfer() {
    WrappedContainerMultipleDAGs wc = new WrappedContainerMultipleDAGs();

    TezDAGID dagID2 = TezDAGID.getInstance("800", 500, 2);
    TezDAGID dagID3 = TezDAGID.getInstance("800", 500, 3);
    TezVertexID vertexID2 = TezVertexID.getInstance(dagID2, 1);
    TezVertexID vertexID3 = TezVertexID.getInstance(dagID3, 1);
    TezTaskID taskID2 = TezTaskID.getInstance(vertexID2, 1);
    TezTaskID taskID3 = TezTaskID.getInstance(vertexID3, 1);

    TezTaskAttemptID attempt11 = TezTaskAttemptID.getInstance(wc.taskID, 200);
    TezTaskAttemptID attempt12 = TezTaskAttemptID.getInstance(wc.taskID, 300);
    TezTaskAttemptID attempt21 = TezTaskAttemptID.getInstance(taskID2, 200);
    TezTaskAttemptID attempt22 = TezTaskAttemptID.getInstance(taskID2, 300);
    TezTaskAttemptID attempt31 = TezTaskAttemptID.getInstance(taskID3, 200);
    TezTaskAttemptID attempt32 = TezTaskAttemptID.getInstance(taskID3, 300);

    Map<String, LocalResource> LRs = new HashMap<String, LocalResource>();
    AMContainerTask fetchedTask = null;
    ArgumentCaptor<AMContainerTask> argumentCaptor = null;

    Token<TokenIdentifier> amGenToken = mock(Token.class);
    Token<TokenIdentifier> token1 = mock(Token.class);
    Token<TokenIdentifier> token3 = mock(Token.class);

    Credentials containerCredentials = new Credentials();
    TokenCache.setSessionToken(amGenToken, containerCredentials);

    Text token1Name = new Text("tokenDag1");
    Text token3Name = new Text("tokenDag3");

    Credentials dag1Credentials = new Credentials();
    dag1Credentials.addToken(new Text(token1Name), token1);
    Credentials dag3Credentials = new Credentials();
    dag3Credentials.addToken(new Text(token3Name), token3);

    wc.launchContainer(new HashMap<String, LocalResource>(), containerCredentials);
    wc.containerLaunched();
    wc.assignTaskAttempt(attempt11, LRs, dag1Credentials);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(1)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(0);
    assertTrue(fetchedTask.haveCredentialsChanged());
    assertNotNull(fetchedTask.getCredentials());
    assertNotNull(fetchedTask.getCredentials().getToken(token1Name));
    wc.taskAttemptSucceeded(attempt11);

    wc.assignTaskAttempt(attempt12, LRs, dag1Credentials);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(2)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(1);
    assertFalse(fetchedTask.haveCredentialsChanged());
    assertNull(fetchedTask.getCredentials());
    wc.taskAttemptSucceeded(attempt12);

    // Move to running a second DAG, with no credentials.
    wc.setNewDAGID(dagID2);
    wc.assignTaskAttempt(attempt21, LRs, null);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(3)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(2);
    assertTrue(fetchedTask.haveCredentialsChanged());
    assertNull(fetchedTask.getCredentials());
    wc.taskAttemptSucceeded(attempt21);

    wc.assignTaskAttempt(attempt22, LRs, null);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(4)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(3);
    assertFalse(fetchedTask.haveCredentialsChanged());
    assertNull(fetchedTask.getCredentials());
    wc.taskAttemptSucceeded(attempt22);

    // Move to running a third DAG, with Credentials this time
    wc.setNewDAGID(dagID3);
    wc.assignTaskAttempt(attempt31, LRs, dag3Credentials);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(5)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(4);
    assertTrue(fetchedTask.haveCredentialsChanged());
    assertNotNull(fetchedTask.getCredentials());
    assertNotNull(fetchedTask.getCredentials().getToken(token3Name));
    assertNull(fetchedTask.getCredentials().getToken(token1Name));
    wc.taskAttemptSucceeded(attempt31);

    wc.assignTaskAttempt(attempt32, LRs, dag1Credentials);
    argumentCaptor = ArgumentCaptor.forClass(AMContainerTask.class);
    verify(wc.tal, times(6)).registerTaskAttempt(argumentCaptor.capture(), eq(wc.containerID));
    fetchedTask = argumentCaptor.getAllValues().get(5);
    assertFalse(fetchedTask.haveCredentialsChanged());
    assertNull(fetchedTask.getCredentials());
    wc.taskAttemptSucceeded(attempt32);
}

From source file:org.apache.tez.dag.app.TestDAGAppMaster.java

License:Apache License

@SuppressWarnings("deprecation")
private void testDagCredentials(boolean doMerge) throws IOException {
    TezConfiguration conf = new TezConfiguration();
    conf.setBoolean(TezConfiguration.TEZ_AM_CREDENTIALS_MERGE, doMerge);
    conf.setBoolean(TezConfiguration.TEZ_LOCAL_MODE, true);
    conf.set(TezConfiguration.TEZ_AM_STAGING_DIR, TEST_DIR.toString());
    ApplicationId appId = ApplicationId.newInstance(1, 1);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);

    // create some sample AM credentials
    Credentials amCreds = new Credentials();
    JobTokenSecretManager jtsm = new JobTokenSecretManager();
    JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(appId.toString()));
    Token<JobTokenIdentifier> sessionToken = new Token<JobTokenIdentifier>(identifier, jtsm);
    sessionToken.setService(identifier.getJobId());
    TokenCache.setSessionToken(sessionToken, amCreds);
    TestTokenSecretManager ttsm = new TestTokenSecretManager();
    Text tokenAlias1 = new Text("alias1");
    Token<TestTokenIdentifier> amToken1 = new Token<TestTokenIdentifier>(
            new TestTokenIdentifier(new Text("amtoken1")), ttsm);
    amCreds.addToken(tokenAlias1, amToken1);
    Text tokenAlias2 = new Text("alias2");
    Token<TestTokenIdentifier> amToken2 = new Token<TestTokenIdentifier>(
            new TestTokenIdentifier(new Text("amtoken2")), ttsm);
    amCreds.addToken(tokenAlias2, amToken2);

    FileSystem fs = FileSystem.getLocal(conf);
    FSDataOutputStream sessionJarsPBOutStream = TezCommonUtils.createFileForAM(fs,
            new Path(TEST_DIR.toString(), TezConstants.TEZ_AM_LOCAL_RESOURCES_PB_FILE_NAME));
    DAGProtos.PlanLocalResourcesProto.getDefaultInstance().writeDelimitedTo(sessionJarsPBOutStream);
    sessionJarsPBOutStream.close();// w w w  .j av a 2  s .co  m
    DAGAppMaster am = new DAGAppMaster(attemptId, ContainerId.newInstance(attemptId, 1), "127.0.0.1", 0, 0,
            new SystemClock(), 1, true, TEST_DIR.toString(), new String[] { TEST_DIR.toString() },
            new String[] { TEST_DIR.toString() }, new TezApiVersionInfo().getVersion(), 1, amCreds, "someuser",
            null);
    am.init(conf);
    am.start();

    // create some sample DAG credentials
    Credentials dagCreds = new Credentials();
    Token<TestTokenIdentifier> dagToken1 = new Token<TestTokenIdentifier>(
            new TestTokenIdentifier(new Text("dagtoken1")), ttsm);
    dagCreds.addToken(tokenAlias2, dagToken1);
    Text tokenAlias3 = new Text("alias3");
    Token<TestTokenIdentifier> dagToken2 = new Token<TestTokenIdentifier>(
            new TestTokenIdentifier(new Text("dagtoken2")), ttsm);
    dagCreds.addToken(tokenAlias3, dagToken2);

    TezDAGID dagId = TezDAGID.getInstance(appId, 1);
    DAGPlan dagPlan = DAGPlan.newBuilder().setName("somedag")
            .setCredentialsBinary(DagTypeConverters.convertCredentialsToProto(dagCreds)).build();
    DAGImpl dag = am.createDAG(dagPlan, dagId);
    Credentials fetchedDagCreds = dag.getCredentials();
    am.stop();

    Token<? extends TokenIdentifier> fetchedToken1 = fetchedDagCreds.getToken(tokenAlias1);
    if (doMerge) {
        assertNotNull("AM creds missing from DAG creds", fetchedToken1);
        compareTestTokens(amToken1, fetchedDagCreds.getToken(tokenAlias1));
    } else {
        assertNull("AM creds leaked to DAG creds", fetchedToken1);
    }
    compareTestTokens(dagToken1, fetchedDagCreds.getToken(tokenAlias2));
    compareTestTokens(dagToken2, fetchedDagCreds.getToken(tokenAlias3));
}

From source file:org.apache.tez.engine.common.security.TokenCache.java

License:Apache License

/**
 * store job token//from   www.j a  v  a  2s  . c  o  m
 * @param t
 */
@InterfaceAudience.Private
public static void setJobToken(Token<? extends TokenIdentifier> t, Credentials credentials) {
    credentials.addToken(JOB_TOKEN, t);
}

From source file:org.apache.twill.internal.yarn.Hadoop23YarnAppClient.java

License:Apache License

/**
 * Overrides parent method to adds RM delegation token to the given context. If YARN is running with HA RM,
 * delegation tokens for each RM service will be added.
 *//*w ww .j a  v  a2s. c  om*/
protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return;
    }

    try {
        Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
        org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);

        // The following logic is copied from ClientRMProxy.getRMDelegationTokenService, which is not available in
        // YARN older than 2.4
        List<String> services = new ArrayList<>();
        if (HAUtil.isHAEnabled(configuration)) {
            // If HA is enabled, we need to enumerate all RM hosts
            // and add the corresponding service name to the token service
            // Copy the yarn conf since we need to modify it to get the RM addresses
            YarnConfiguration yarnConf = new YarnConfiguration(configuration);
            for (String rmId : HAUtil.getRMHAIds(configuration)) {
                yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                        YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                services.add(SecurityUtil.buildTokenService(address).toString());
            }
        } else {
            services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
        }

        Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());

        // casting needed for later Hadoop version
        @SuppressWarnings("RedundantCast")
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                (InetSocketAddress) null);

        token.setService(new Text(Joiner.on(',').join(services)));
        credentials.addToken(new Text(token.getService()), token);

        LOG.debug("Added RM delegation token {} for application {}", token, appId);
        credentials.addToken(token.getService(), token);

        context.setTokens(YarnUtils.encodeCredentials(credentials));

    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.twill.internal.yarn.YarnUtils.java

License:Apache License

/**
 * Helper method to get delegation tokens for the given LocationFactory.
 * @param config The hadoop configuration.
 * @param locationFactory The LocationFactory for generating tokens.
 * @param credentials Credentials for storing tokens acquired.
 * @return List of delegation Tokens acquired.
 *///from  w  ww  .j a v a2  s .  c  om
public static List<Token<?>> addDelegationTokens(Configuration config, LocationFactory locationFactory,
        Credentials credentials) throws IOException {
    if (!UserGroupInformation.isSecurityEnabled()) {
        LOG.debug("Security is not enabled");
        return ImmutableList.of();
    }

    LocationFactory factory = unwrap(locationFactory);
    String renewer = getYarnTokenRenewer(config);
    List<Token<?>> tokens = ImmutableList.of();

    if (factory instanceof HDFSLocationFactory) {
        FileSystem fs = ((HDFSLocationFactory) factory).getFileSystem();
        Token<?>[] fsTokens = fs.addDelegationTokens(renewer, credentials);
        if (fsTokens != null) {
            tokens = ImmutableList.copyOf(fsTokens);
        }
    } else if (factory instanceof FileContextLocationFactory) {
        FileContext fc = ((FileContextLocationFactory) locationFactory).getFileContext();
        tokens = fc.getDelegationTokens(new Path(locationFactory.create("/").toURI()), renewer);
    }

    for (Token<?> token : tokens) {
        credentials.addToken(token.getService(), token);
    }

    return ImmutableList.copyOf(tokens);
}

From source file:org.openflamingo.remote.thrift.thriftfs.ThriftUtils.java

License:Apache License

public static ThriftDelegationToken toThrift(Token<? extends AbstractDelegationTokenIdentifier> delegationToken,
        InetSocketAddress address) throws java.io.IOException {
    String serviceAddress = InetAddress.getByName(address.getHostName()).getHostAddress() + ":"
            + address.getPort();/*from  ww  w  . j  a  v  a  2s  .c o  m*/
    delegationToken.setService(new Text(serviceAddress));

    DataOutputBuffer out = new DataOutputBuffer();
    Credentials ts = new Credentials();
    ts.addToken(new Text(serviceAddress), delegationToken);
    ts.writeTokenStorageToStream(out);

    byte[] tokenData = new byte[out.getLength()];
    System.arraycopy(out.getData(), 0, tokenData, 0, tokenData.length);
    return new ThriftDelegationToken(ByteBuffer.wrap(tokenData));
}

From source file:origin.hadoop.yarn.unmanagedamlauncher.UnmanagedAMLauncher.java

License:Apache License

public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    // Service will be empty but that's okay, we are just passing down only
    // AMRMToken down to the real AM which eventually sets the correct
    // service-address.
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {//ww  w  .j a v a 2s .c o  m
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true));
    credentials.writeTokenStorageToStream(os);
    os.close();

    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }

    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newInstance(attemptId, 0);

    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());

    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());

    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));

    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream()));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream()));

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }

    // wait for the process to finish and check the exit code
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }

    try {
        // make sure that the error thread exits
        // on Windows these threads sometimes get stuck and hang the execution
        // timeout and join later after destroying the process.
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

License:Open Source License

protected int yarnJob(final JobInformation ji, final String requestorUserName) throws IOException {
    assert ji.scriptLocation != null;
    assert ji.folderLocation != null;
    final PBSJob job = ji.jobSpec;
    PrivilegedExceptionAction<Integer> submitAction = new PrivilegedExceptionAction<Integer>() {
        public Integer run() throws Exception {
            File luaFile = writeJobKittenSpec(job, ji.scriptLocation, ji.jobId, false);
            Configuration kConf = new Configuration(yConf);
            kConf.set(LocalDataHelper.APP_BASE_DIR, ji.folderLocation.toUri().toString());
            YarnClientParameters params = new LuaYarnClientParameters(luaFile.toString(),
                    Constants.PRODUCT_NAME, kConf, extraLuaValues, extraLocalResources);
            ji.jobSpec.setQueue(params.getQueue());

            Credentials creds = new Credentials();

            //create delegation tokens
            //interactive rpc
            InetSocketAddress addr = NetUtils.getConnectAddress(interactiveRpcserver);
            Text host = new Text(addr.getAddress().getHostAddress() + ":" + addr.getPort());
            ProbosDelegationTokenIdentifier tokenId = secretManager.createIdentifier();
            Token<ProbosDelegationTokenIdentifier> delgationToken = new Token<ProbosDelegationTokenIdentifier>(
                    tokenId, secretManager);
            delgationToken.setService(host);
            creds.addToken(host, delgationToken);
            LOG.info("Interactive: Generated token for " + creds.toString() + " : " + delgationToken);

            //client rpc
            tokenId = secretManager.createIdentifier();
            delgationToken = new Token<ProbosDelegationTokenIdentifier>(tokenId, secretManager);
            addr = NetUtils.getConnectAddress(clientRpcserver);
            host = new Text(addr.getAddress().getHostAddress() + ":" + addr.getPort());
            delgationToken.setService(host);
            creds.addToken(host, delgationToken);
            LOG.info("Client: Generated token for " + creds.toString() + " : " + delgationToken);

            //master rpc
            tokenId = secretManager.createIdentifier();
            delgationToken = new Token<ProbosDelegationTokenIdentifier>(tokenId, secretManager);
            addr = NetUtils.getConnectAddress(masterRpcserver);
            host = new Text(addr.getAddress().getHostAddress() + ":" + addr.getPort());
            delgationToken.setService(host);
            creds.addToken(host, delgationToken);
            LOG.info("Master: Generated token for " + creds.toString() + " : " + delgationToken);

            YarnClientService service = new YarnClientServiceImpl(params, creds);
            service.startAndWait();//from   ww  w.j a  va 2s.c o m
            if (!service.isRunning()) {
                LOG.error("YarnClientService failed to startup, exiting...");
                jobArray.remove(ji.jobId);
                return ji.jobId;
            }
            ji.kitten = service;
            ji.modify();
            return ji.jobId;
        }
    };
    //setuid to the requestor's user id
    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(requestorUserName,
            UserGroupInformation.getLoginUser());
    Integer rtr = null;
    try {
        if (UserGroupInformation.isSecurityEnabled())
            rtr = proxyUser.doAs(submitAction);
        else
            rtr = submitAction.run();
        ji.proxyUser = proxyUser;
        ji.modify();
        runningJobs.inc();
        return rtr.intValue();
    } catch (Exception e) {
        LOG.error("job did not submit!", e);
        return -1;
    }

}