Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hbase.security.SaslClientHandler.java

private byte[] evaluateChallenge(final byte[] challenge) throws Exception {
    return ticket.doAs(new PrivilegedExceptionAction<byte[]>() {

        @Override//from ww w .  j a  v  a 2 s  .c  o m
        public byte[] run() throws Exception {
            return saslClient.evaluateChallenge(challenge);
        }
    });
}

From source file:org.apache.hadoop.hive.ql.txn.compactor.CompactorThread.java

/**
 * Determine which user to run an operation as, based on the owner of the directory to be
 * compacted.  It is asserted that either the user running the hive metastore or the table
 * owner must be able to stat the directory and determine the owner.
 * @param location directory that will be read or written to.
 * @param t metastore table object//  www  .  j a v a  2  s  .  co m
 * @return username of the owner of the location.
 * @throws java.io.IOException if neither the hive metastore user nor the table owner can stat
 * the location.
 */
protected String findUserToRunAs(String location, Table t) throws IOException, InterruptedException {
    LOG.debug("Determining who to run the job as.");
    final Path p = new Path(location);
    final FileSystem fs = p.getFileSystem(conf);
    try {
        FileStatus stat = fs.getFileStatus(p);
        LOG.debug("Running job as " + stat.getOwner());
        return stat.getOwner();
    } catch (AccessControlException e) {
        // TODO not sure this is the right exception
        LOG.debug("Unable to stat file as current user, trying as table owner");

        // Now, try it as the table owner and see if we get better luck.
        final List<String> wrapper = new ArrayList<String>(1);
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
                UserGroupInformation.getLoginUser());
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                FileStatus stat = fs.getFileStatus(p);
                wrapper.add(stat.getOwner());
                return null;
            }
        });

        if (wrapper.size() == 1) {
            LOG.debug("Running job as " + wrapper.get(0));
            return wrapper.get(0);
        }
    }
    LOG.error("Unable to stat file as either current user or table owner, giving up");
    throw new IOException("Unable to stat file");
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestSubtreeLockACL.java

@Test
public void testSubtreeMoveNotBlockedByDeepAcl() throws IOException, InterruptedException {
    try {//w ww  .  j  a va  2  s  .c  o  m
        setup();

        //Deny access via default acl down level1folder1
        setDenyUserAccessAcl(user2.getShortUserName(), level2folder1);

        //Try to delete subtree1. Should fail because of access acl down the tree.
        FileSystem user2fs = user2.doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(conf);
            }
        });

        try {
            user2fs.rename(subtree1, new Path(subtree2, "newname"));
        } catch (AccessControlException expected) {
            fail("Operation should complete without errors");
        }

    } finally {
        teardown();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer.java

public void catchupDuringFailover() throws IOException {
    Preconditions.checkState(tailerThread == null || !tailerThread.isAlive(),
            "Tailer thread should not be running once failover starts");
    // Important to do tailing as the login user, in case the shared
    // edits storage is implemented by a JournalManager that depends
    // on security credentials to access the logs (eg QuorumJournalManager).
    SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<Void>() {
        @Override/*from w  ww.  j a  v a  2s.c o m*/
        public Void run() throws Exception {
            try {
                // It is already under the full name system lock and the checkpointer
                // thread is already stopped. No need to acqure any other lock.
                doTailEdits();
            } catch (InterruptedException e) {
                throw new IOException(e);
            }
            return null;
        }
    });
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsConnectionConfig.java

public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) {
    conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);

    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/* w  w  w.j  a  v a2 s . c o  m*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(context.createConfigIssue(Groups.HDFS.name(), null,
                        HdfsMetadataErrors.HDFS_METADATA_001, ex.toString()));
            }
        }
    }

    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if (!hadoopConfigDir.isAbsolute()) {
            hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
        }
        if (!hadoopConfigDir.exists()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_002, hadoopConfigDir.getPath()));
        } else if (!hadoopConfigDir.isDirectory()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_003, hadoopConfigDir.getPath()));
        } else {
            File coreSite = new File(hadoopConfigDir, "core-site.xml");
            if (coreSite.exists()) {
                if (!coreSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, coreSite.getPath()));
                }
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            }
            File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                if (!hdfsSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, hdfsSite.getPath()));
                }
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
        }
    }

    // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value
    if (StringUtils.isEmpty(hdfsUri)) {
        hdfsUri = conf.get("fs.defaultFS");
    }

    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }

    try {
        loginUgi = HadoopSecurityUtil.getLoginUser(conf);
    } catch (IOException e) {
        LOG.error("Can't create login UGI", e);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                e.getMessage(), e));
    }

    if (!issues.isEmpty()) {
        return;
    }

    try {
        fs = getUGI().doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.newInstance(new URI(hdfsUri), conf);
            }
        });
    } catch (Exception ex) {
        LOG.error("Can't retrieve FileSystem instance", ex);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                ex.getMessage(), ex));
    }
}

From source file:com.bigstep.datalake.KerberosIdentityAuthenticator.java

/**
 * Implements the SPNEGO authentication sequence interaction using the current default principal
 * in the Kerberos cache (normally set via kinit).
 *
 * @param atoken the authentication token being used for the user.
 * @throws IOException             if an IO error occurred.
 * @throws AuthenticationException if an authentication error occurred.
 *//*from   w  ww  . j  a  v a2  s.  c o m*/
private void doSpnegoSequence(AuthenticatedURL.Token atoken) throws IOException, AuthenticationException {
    try {

        kerberosIdentity.doAsPriviledged(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                final Oid KERB_V5_OID = new Oid("1.2.840.113554.1.2.2");

                GSSContext gssContext = null;
                try {
                    GSSManager gssManager = GSSManager.getInstance();

                    final GSSName clientName = gssManager.createName(kerberosIdentity.getPrincipalName(),
                            GSSName.NT_USER_NAME);
                    LOG.info("doSpnegoSequence() using principal:" + kerberosIdentity.getPrincipalName());
                    final GSSCredential clientCred = gssManager.createCredential(clientName, 8 * 3600,
                            KERB_V5_OID, GSSCredential.INITIATE_ONLY);

                    final String applicationPrincipal = "HTTP@" + kerberosIdentity.getRealm();

                    final GSSName serverName = gssManager.createName(applicationPrincipal,
                            GSSName.NT_HOSTBASED_SERVICE);

                    gssContext = gssManager.createContext(serverName, KERB_V5_OID, clientCred,
                            GSSContext.DEFAULT_LIFETIME);

                    gssContext.requestCredDeleg(true);
                    gssContext.requestMutualAuth(true);
                    gssContext.requestConf(false);
                    gssContext.requestInteg(true);

                    byte[] inToken = new byte[0];
                    byte[] outToken;
                    boolean established = false;

                    // Loop while the context is still not established
                    while (!established) {
                        LOG.info("doSpnegoSequence() using token:" + new BASE64Encoder().encode(inToken));
                        outToken = gssContext.initSecContext(inToken, 0, 0);
                        LOG.info("initSecContext() out token:" + new BASE64Encoder().encode(outToken));
                        if (outToken != null) {
                            sendToken(outToken);
                        }

                        if (!gssContext.isEstablished()) {
                            inToken = readToken();
                        } else {
                            established = true;
                        }
                    }
                } finally {
                    if (gssContext != null) {
                        gssContext.dispose();
                        gssContext = null;
                    }
                }
                return null;
            }
        });
    } catch (PrivilegedActionException ex) {
        throw new AuthenticationException(ex.getException());
    }
    AuthenticatedURL.extractToken(conn, atoken);
}

From source file:org.apache.hadoop.hdfs.security.TestDelegationToken.java

@Test
public void testDelegationTokenDFSApi() throws Exception {
    DelegationTokenSecretManager dtSecretManager = cluster.getNameNode().getNamesystem()
            .getDelegationTokenSecretManager();
    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
    final Token<DelegationTokenIdentifier> token = dfs.getDelegationToken(new Text("JobTracker"));
    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
    byte[] tokenId = token.getIdentifier();
    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    LOG.info("A valid token should have non-null password, and should be renewed successfully");
    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
    dtSecretManager.renewToken(token, "JobTracker");
    UserGroupInformation.createRemoteUser("JobTracker").doAs(new PrivilegedExceptionAction<Object>() {
        @Override//from ww  w.j  a v  a 2  s  . c  o  m
        public Object run() throws Exception {
            token.renew(config);
            token.cancel(config);
            return null;
        }
    });
}

From source file:org.apache.hadoop.hive.ql.txn.compactor.Worker.java

@Override
public void run() {
    do {/* ww  w  .  j a v  a2 s  .c om*/
        boolean launchedJob = false;
        // Make sure nothing escapes this run method and kills the metastore at large,
        // so wrap it in a big catch Throwable statement.
        try {
            CompactionInfo ci = txnHandler.findNextToCompact(name);

            if (ci == null && !stop.get()) {
                try {
                    Thread.sleep(SLEEP_TIME);
                    continue;
                } catch (InterruptedException e) {
                    LOG.warn("Worker thread sleep interrupted " + e.getMessage());
                    continue;
                }
            }

            // Find the table we will be working with.
            Table t1 = null;
            try {
                t1 = resolveTable(ci);
                if (t1 == null) {
                    LOG.info("Unable to find table " + ci.getFullTableName()
                            + ", assuming it was dropped and moving on.");
                    txnHandler.markCleaned(ci);
                    continue;
                }
            } catch (MetaException e) {
                txnHandler.markCleaned(ci);
                continue;
            }
            // This chicanery is to get around the fact that the table needs to be final in order to
            // go into the doAs below.
            final Table t = t1;

            // Find the partition we will be working with, if there is one.
            Partition p = null;
            try {
                p = resolvePartition(ci);
                if (p == null && ci.partName != null) {
                    LOG.info("Unable to find partition " + ci.getFullPartitionName()
                            + ", assuming it was dropped and moving on.");
                    txnHandler.markCleaned(ci);
                    continue;
                }
            } catch (Exception e) {
                txnHandler.markCleaned(ci);
                continue;
            }

            // Find the appropriate storage descriptor
            final StorageDescriptor sd = resolveStorageDescriptor(t, p);

            // Check that the table or partition isn't sorted, as we don't yet support that.
            if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) {
                LOG.error("Attempt to compact sorted table, which is not yet supported!");
                txnHandler.markCleaned(ci);
                continue;
            }

            final boolean isMajor = ci.isMajorCompaction();
            final ValidTxnList txns = CompactionTxnHandler
                    .createValidCompactTxnList(txnHandler.getOpenTxnsInfo());
            LOG.debug("ValidCompactTxnList: " + txns.writeToString());
            final StringBuilder jobName = new StringBuilder(name);
            jobName.append("-compactor-");
            jobName.append(ci.getFullPartitionName());

            // Determine who to run as
            String runAs;
            if (ci.runAs == null) {
                runAs = findUserToRunAs(sd.getLocation(), t);
                txnHandler.setRunAs(ci.id, runAs);
            } else {
                runAs = ci.runAs;
            }

            LOG.info("Starting " + ci.type.toString() + " compaction for " + ci.getFullPartitionName());

            final StatsUpdater su = StatsUpdater.init(ci, txnHandler.findColumnsWithStats(ci), conf,
                    runJobAsSelf(runAs) ? runAs : t.getOwner());
            final CompactorMR mr = new CompactorMR();
            launchedJob = true;
            try {
                if (runJobAsSelf(runAs)) {
                    mr.run(conf, jobName.toString(), t, sd, txns, isMajor, su);
                } else {
                    UserGroupInformation ugi = UserGroupInformation.createProxyUser(t.getOwner(),
                            UserGroupInformation.getLoginUser());
                    ugi.doAs(new PrivilegedExceptionAction<Object>() {
                        @Override
                        public Object run() throws Exception {
                            mr.run(conf, jobName.toString(), t, sd, txns, isMajor, su);
                            return null;
                        }
                    });
                }
                txnHandler.markCompacted(ci);
            } catch (Exception e) {
                LOG.error("Caught exception while trying to compact " + ci.getFullPartitionName()
                        + ".  Marking clean to avoid repeated failures, " + StringUtils.stringifyException(e));
                txnHandler.markCleaned(ci);
            }
        } catch (Throwable t) {
            LOG.error("Caught an exception in the main loop of compactor worker " + name + ", "
                    + StringUtils.stringifyException(t));
        }

        // If we didn't try to launch a job it either means there was no work to do or we got
        // here as the result of a communication failure with the DB.  Either way we want to wait
        // a bit before we restart the loop.
        if (!launchedJob && !stop.get()) {
            try {
                Thread.sleep(SLEEP_TIME);
            } catch (InterruptedException e) {
            }
        }
    } while (!stop.get());
}

From source file:org.apache.axis2.jaxws.message.databinding.impl.DataSourceBlockImpl.java

/**
 * Return the class for this name/*from  ww w  .  j a v  a  2 s  . c o m*/
 * @return Class
 */
private static Class forName(final String className) throws ClassNotFoundException {
    // NOTE: This method must remain private because it uses AccessController
    Class cl = null;
    try {
        cl = (Class) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws ClassNotFoundException {
                return Class.forName(className);
            }
        });
    } catch (PrivilegedActionException e) {
        if (log.isDebugEnabled()) {
            log.debug("Exception thrown from AccessController: " + e);
        }
        throw (ClassNotFoundException) e.getException();
    }

    return cl;
}

From source file:com.trendmicro.hdfs.webdav.test.MiniClusterTestUtil.java

public void startHDFSWebDAVServlet(UserGroupInformation gatewayUser) throws Exception {
    gatewayPort = getConfiguration().getInt("hadoop.webdav.port", DEFAULT_GATEWAY_PORT);
    while (true)/*  w w w.  j a va 2  s  .  co m*/
        try {
            gatewayUser.doAs(new PrivilegedExceptionAction<Void>() {
                public Void run() throws Exception {
                    startServletServer(gatewayPort);
                    return null;
                }
            });
            break;
        } catch (Exception e) {
            LOG.info("Unable to start Jetty on port " + gatewayPort, e);
            gatewayPort++;
        }
    getConfiguration().setInt("hadoop.webdav.port", gatewayPort);
}