Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java

@SuppressWarnings("rawtypes")
@POST/*from  w ww.j  ava  2s .  co m*/
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" + KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response decryptEncryptedKey(@PathParam("versionName") final String versionName,
        @QueryParam(KMSRESTConstants.EEK_OP) String eekOp, Map jsonPayload) throws Exception {
    try {
        LOG.trace("Entering decryptEncryptedKey method.");
        UserGroupInformation user = HttpUserGroupInformation.get();
        KMSClientProvider.checkNotEmpty(versionName, "versionName");
        KMSClientProvider.checkNotNull(eekOp, "eekOp");
        LOG.debug("Decrypting key for {}, the edek Operation is {}.", versionName, eekOp);

        final String keyName = (String) jsonPayload.get(KMSRESTConstants.NAME_FIELD);
        String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
        String encMaterialStr = (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
        Object retJSON;
        if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
            assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
            KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
            final byte[] iv = Base64.decodeBase64(ivStr);
            KMSClientProvider.checkNotNull(encMaterialStr, KMSRESTConstants.MATERIAL_FIELD);
            final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);

            KeyProvider.KeyVersion retKeyVersion = user.doAs(new PrivilegedExceptionAction<KeyVersion>() {
                @Override
                public KeyVersion run() throws Exception {
                    return provider.decryptEncryptedKey(new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
                            versionName, iv, KeyProviderCryptoExtension.EEK, encMaterial));
                }
            });

            retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
            kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
        } else {
            StringBuilder error;
            error = new StringBuilder("IllegalArgumentException Wrong ");
            error.append(KMSRESTConstants.EEK_OP);
            error.append(" value, it must be ");
            error.append(KMSRESTConstants.EEK_GENERATE);
            error.append(" or ");
            error.append(KMSRESTConstants.EEK_DECRYPT);
            LOG.error(error.toString());
            throw new IllegalArgumentException(error.toString());
        }
        KMSWebApp.getDecryptEEKCallsMeter().mark();
        LOG.trace("Exiting decryptEncryptedKey method.");
        return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON).build();
    } catch (Exception e) {
        LOG.debug("Exception in decryptEncryptedKey.", e);
        throw e;
    }
}

From source file:org.apache.hadoop.crypto.key.kms.KMSClientProvider.java

private HttpURLConnection createConnection(final URL url, String method) throws IOException {
    HttpURLConnection conn;/*  w  ww  .  j  ava2 s  .  c  o m*/
    try {
        final String doAsUser = getDoAsUser();
        conn = getActualUgi().doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
            @Override
            public HttpURLConnection run() throws Exception {
                DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(configurator);
                return authUrl.openConnection(url, authToken, doAsUser);
            }
        });
    } catch (IOException ex) {
        if (ex instanceof SocketTimeoutException) {
            LOG.warn("Failed to connect to {}:{}", url.getHost(), url.getPort());
        }
        throw ex;
    } catch (UndeclaredThrowableException ex) {
        throw new IOException(ex.getUndeclaredThrowable());
    } catch (Exception ex) {
        throw new IOException(ex);
    }
    conn.setUseCaches(false);
    conn.setRequestMethod(method);
    if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
        conn.setDoOutput(true);
    }
    conn = configureConnection(conn);
    return conn;
}

From source file:org.apache.axis2.jaxws.server.EndpointController.java

/**
 * Return the class for this name//from w ww . j  a v  a2 s  .  c  om
 *
 * @return Class
 */
private static Class forName(final String className, final boolean initialize, final ClassLoader classloader)
        throws ClassNotFoundException {
    // NOTE: This method must remain private because it uses AccessController
    Class cl = null;
    try {
        cl = (Class) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws ClassNotFoundException {
                return Class.forName(className, initialize, classloader);
            }
        });
    } catch (PrivilegedActionException e) {
        if (log.isDebugEnabled()) {
            log.debug("PrivilegedActionException thrown from AccessController: " + e);
            log.debug("Real Cause is " + e.getException().getCause());
        }
        throw (ClassNotFoundException) e.getException();
    }

    return cl;
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

@Test(timeout = 60000)
public void testCloseWithFailingFlush() throws Exception {
    final Configuration conf = HBaseConfiguration.create(CONF);
    // Only retry once.
    conf.setInt("hbase.hstore.flush.retries.number", 1);
    final User user = User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" });
    // Inject our faulty LocalFileSystem
    conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
    user.runAs(new PrivilegedExceptionAction<Object>() {
        @Override//from   w  w  w .j  av  a 2 s.c o m
        public Object run() throws Exception {
            // Make sure it worked (above is sensitive to caching details in hadoop core)
            FileSystem fs = FileSystem.get(conf);
            Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
            FaultyFileSystem ffs = (FaultyFileSystem) fs;
            HRegion region = null;
            try {
                // Initialize region
                region = initHRegion(tableName, name.getMethodName(), conf, COLUMN_FAMILY_BYTES);
                long size = region.getMemstoreSize().get();
                Assert.assertEquals(0, size);
                // Put one item into memstore.  Measure the size of one item in memstore.
                Put p1 = new Put(row);
                p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null));
                region.put(p1);
                // Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only.
                Store store = region.getStore(COLUMN_FAMILY_BYTES);
                StoreFlushContext storeFlushCtx = store.createFlushContext(12345);
                storeFlushCtx.prepare();
                // Now add two entries to the foreground memstore.
                Put p2 = new Put(row);
                p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null));
                p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null));
                region.put(p2);
                // Now try close on top of a failing flush.
                region.close();
                fail();
            } catch (DroppedSnapshotException dse) {
                // Expected
                LOG.info("Expected DroppedSnapshotException");
            } finally {
                // Make it so all writes succeed from here on out so can close clean
                ffs.fault.set(false);
                HRegion.closeHRegion(region);
            }
            return null;
        }
    });
    FileSystem.closeAllForUGI(user.getUGI());
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids.//from   www .  ja  v  a  2s . c om
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    HLog wal = createWAL(this.conf);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flushcache();
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.close();
    runWALSplit(this.conf);
    HLog wal2 = createWAL(this.conf);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());

    // Next test.  Add more edits, then 'crash' this region by stealing its wal
    // out from under it and assert that replay of the log adds the edits back
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    // Set down maximum recovery so we dfsclient doesn't linger retrying something
    // long gone.
    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal2).getOutputStream(), 1);
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            HLog wal3 = createWAL(newConf);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
                @Override
                protected boolean restoreEdit(Store s, KeyValue kv) {
                    boolean b = super.restoreEdit(s, kv);
                    countOfRestoredEdits.incrementAndGet();
                    return b;
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());

            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.closeAndDelete();
            return null;
        }
    });
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

@Override
public void deleteHivePolicy(String categoryName, String feedName) {
    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {//from  ww w.  j ava 2 s  .co  m

            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
                        if (sentryClientObject.checkIfRoleExists(sentryPolicyName)) {
                            try {
                                sentryClientObject.dropRole(sentryPolicyName);
                            } catch (SentryClientException e) {
                                log.error("Unable to delete policy  " + sentryPolicyName + " in Sentry  "
                                        + e.getMessage());
                                throw new RuntimeException(e);
                            }
                        }
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Failed to Delete Hive Policy With Kerberos" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
        if (sentryClientObject.checkIfRoleExists(sentryPolicyName)) {
            try {
                sentryClientObject.dropRole(sentryPolicyName);
            } catch (SentryClientException e) {
                log.error("Unable to delete policy  " + sentryPolicyName + " in Sentry  " + e.getMessage());
                throw new RuntimeException(e);
            }
        }

    }

}

From source file:org.apache.axis2.deployment.AxisConfigBuilder.java

/**
 * Processes AxisObservers./* ww  w. ja va2  s .  c o m*/
 *
 * @param oservers
 */
private void processObservers(Iterator oservers) {
    while (oservers.hasNext()) {
        try {
            OMElement observerelement = (OMElement) oservers.next();
            AxisObserver observer;
            OMAttribute trsClas = observerelement.getAttribute(new QName(TAG_CLASS_NAME));
            if (trsClas == null) {
                log.info(Messages.getMessage(DeploymentErrorMsgs.OBSERVER_ERROR));
                return;
            }
            final String clasName = trsClas.getAttributeValue();

            Class observerclass;
            try {
                observerclass = (Class) org.apache.axis2.java.security.AccessController
                        .doPrivileged(new PrivilegedExceptionAction() {
                            public Object run() throws ClassNotFoundException {
                                return Loader.loadClass(clasName);
                            }
                        });
            } catch (PrivilegedActionException e) {
                throw (ClassNotFoundException) e.getException();
            }
            observer = (AxisObserver) observerclass.newInstance();
            // processing Parameters
            // Processing service level parameters
            Iterator itr = observerelement.getChildrenWithName(new QName(TAG_PARAMETER));
            processParameters(itr, observer, axisConfig);
            // initialization
            try {
                observer.init(axisConfig);
            } catch (Throwable e) {
                //Observer init may throw runtime exception , but we can stil
                // start Axis2
                log.info(e.getMessage());
            }
            axisConfig.addObservers(observer);
        } catch (Exception e) {
            log.info(e.getMessage());
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.JspHelper.java

public static DFSClient getDFSClient(final UserGroupInformation user, final InetSocketAddress addr,
        final Configuration conf) throws IOException, InterruptedException {
    return user.doAs(new PrivilegedExceptionAction<DFSClient>() {
        public DFSClient run() throws IOException {
            return new DFSClient(addr, conf);
        }/* w  w w. j  ava 2s . c o  m*/
    });
}

From source file:org.apache.hadoop.fs.TestFileSystem.java

@SuppressWarnings("unchecked")
public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
    final Configuration conf = new Configuration();
    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
    UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
    UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
    FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }//from w  w w. j ava2s .co  m
    });
    FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    //Since the UGIs are the same, we should have the same filesystem for both
    assertSame(fsA, fsA1);

    FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    //Since the UGIs are different, we should end up with different filesystems
    //corresponding to the two UGIs
    assertNotSame(fsA, fsB);

    Token<T> t1 = mock(Token.class);
    UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo");

    fsA = ugiA2.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    // Although the users in the UGI are same, they have different subjects
    // and so are different.
    assertNotSame(fsA, fsA1);

    ugiA.addToken(t1);

    fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    // Make sure that different UGI's with the same subject lead to the same
    // file system.
    assertSame(fsA, fsA1);
}

From source file:org.apache.hadoop.hbase.master.procedure.EnableTableProcedure.java

/**
 * Coprocessor Action./*from w w w  . j a v  a  2  s  .  c  o  m*/
 * @param env MasterProcedureEnv
 * @param state the procedure state
 * @throws IOException
 * @throws InterruptedException
 */
private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state)
        throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                switch (state) {
                case ENABLE_TABLE_PRE_OPERATION:
                    cpHost.preEnableTableHandler(getTableName());
                    break;
                case ENABLE_TABLE_POST_OPERATION:
                    cpHost.postEnableTableHandler(getTableName());
                    break;
                default:
                    throw new UnsupportedOperationException(this + " unhandled state=" + state);
                }
                return null;
            }
        });
    }
}