Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.axis2.jaxws.description.impl.URIResolverImpl.java

/**
 * Gets input stream from the uri given. If we cannot find the stream, <code>null</code> is
 * returned./*from  ww  w  .  j  av a2s.  c  o m*/
 *
 * @param uri
 * @return
 */
protected InputStream getInputStreamForURI(String uri) {
    URL streamURL = null;
    InputStream is = null;
    URI pathURI = null;

    try {
        streamURL = new URL(uri);
        is = openStream_doPriv(streamURL);
    } catch (Throwable t) {
        //Exception handling not needed
        if (log.isDebugEnabled()) {
            log.debug("Exception occured in getInputStreamForURI, ignoring exception continuing processing: "
                    + t.getMessage());
        }
    }

    if (is == null) {
        try {
            pathURI = new URI(uri);
            streamURL = pathURI.toURL();
            is = openStream_doPriv(streamURL);
        } catch (Throwable t) {
            //Exception handling not needed
            if (log.isDebugEnabled()) {
                log.debug(
                        "Exception occured in getInputStreamForURI, ignoring exception continuing processing: "
                                + t.getMessage());
            }
        }
    }

    if (is == null) {
        try {
            final File file = new File(uri);
            streamURL = (URL) AccessController.doPrivileged(new PrivilegedExceptionAction() {
                public Object run() throws MalformedURLException {
                    return file.toURL();
                }
            });
            is = openStream_doPriv(streamURL);
        } catch (Throwable t) {
            //Exception handling not needed
            if (log.isDebugEnabled()) {
                log.debug(
                        "Exception occured in getInputStreamForURI, ignoring exception continuing processing: "
                                + t.getMessage());
            }
        }
    }
    return is;
}

From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.
 * Ensures we do not lose edits./* www  .ja v  a 2  s  .c o m*/
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    final int numWriters = 3;
    Thread zombie = new ZombieLastLogWriterRegionServer(counter, stop, region, numWriters);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n");
                for (FileStatus status : fs.listStatus(WALDIR)) {
                    ls.append("\t").append(status.toString()).append("\n");
                }
                LOG.debug(ls);
                LOG.info("Splitting WALs out from under zombie. Expecting " + numWriters + " files.");
                WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf2, wals);
                LOG.info("Finished splitting out from under zombie.");
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals("wrong number of split files for region", numWriters, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countWAL(logfile);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. "
                        + "Zombie could write " + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:org.apache.hadoop.mapred.JobLocalizer.java

public static void main(String[] argv) throws IOException, InterruptedException {
    // $logdir//w w w  . ja va  2s. c o  m
    // let $x = $root/tasktracker for some $mapred.local.dir
    //   create $x/$user/jobcache/$jobid/work
    //   fetch  $x/$user/jobcache/$jobid/jars/job.jar
    //   setup  $x/$user/distcache
    //   verify $x/distcache
    //   write  $x/$user/jobcache/$jobid/job.xml
    final String user = argv[0];
    final String jobid = argv[1];
    final InetSocketAddress ttAddr = new InetSocketAddress(argv[2], Integer.parseInt(argv[3]));
    final String uid = UserGroupInformation.getCurrentUser().getShortUserName();
    if (!user.equals(uid)) {
        LOG.warn("Localization running as " + uid + " not " + user);
    }

    // Pull in user's tokens to complete setup
    final JobConf conf = new JobConf();
    final JobLocalizer localizer = new JobLocalizer(conf, user, jobid);
    final Path jobTokenFile = localizer.findCredentials();
    final Credentials creds = TokenCache.loadTokens(jobTokenFile.toUri().toString(), conf);
    LOG.debug("Loaded tokens from " + jobTokenFile);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) {
        ugi.addToken(token);
    }

    UserGroupInformation ugiJob = UserGroupInformation.createRemoteUser(jobid);
    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(creds);
    SecurityUtil.setTokenService(jt, ttAddr);
    ugiJob.addToken(jt);

    final TaskUmbilicalProtocol taskTracker = ugiJob
            .doAs(new PrivilegedExceptionAction<TaskUmbilicalProtocol>() {
                public TaskUmbilicalProtocol run() throws IOException {
                    TaskUmbilicalProtocol taskTracker = (TaskUmbilicalProtocol) RPC.getProxy(
                            TaskUmbilicalProtocol.class, TaskUmbilicalProtocol.versionID, ttAddr, conf);
                    return taskTracker;
                }
            });
    System.exit(ugi.doAs(new PrivilegedExceptionAction<Integer>() {
        public Integer run() {
            try {
                return localizer.runSetup(user, jobid, jobTokenFile, taskTracker);
            } catch (Throwable e) {
                e.printStackTrace(System.out);
                return -1;
            }
        }
    }));
}

From source file:com.lucidworks.security.authentication.client.KerberosAuthenticator.java

/**
 * Implements the SPNEGO authentication sequence interaction using the current default principal
 * in the Kerberos cache (normally set via kinit).
 *
 * @param token the authentication token being used for the user.
 *
 * @throws IOException if an IO error occurred.
 * @throws AuthenticationException if an authentication error occurred.
 *//*ww w .  j av a2  s.c  om*/
private void doSpnegoSequence(AuthenticatedURL.Token token) throws IOException, AuthenticationException {
    try {
        AccessControlContext context = AccessController.getContext();
        Subject subject = Subject.getSubject(context);
        if (subject == null) {
            LOG.debug("No subject in context, logging in");
            subject = new Subject();
            LoginContext login = new LoginContext("", subject, null, new KerberosConfiguration());
            login.login();
        }

        if (LOG.isDebugEnabled()) {
            LOG.debug("Using subject: " + subject);
        }
        Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                GSSContext gssContext = null;
                try {
                    GSSManager gssManager = GSSManager.getInstance();
                    String servicePrincipal = KerberosUtil.getServicePrincipal("HTTP",
                            KerberosAuthenticator.this.url.getHost());
                    Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
                    GSSName serviceName = gssManager.createName(servicePrincipal, oid);
                    oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
                    gssContext = gssManager.createContext(serviceName, oid, null, GSSContext.DEFAULT_LIFETIME);
                    gssContext.requestCredDeleg(true);
                    gssContext.requestMutualAuth(true);

                    byte[] inToken = new byte[0];
                    byte[] outToken;
                    boolean established = false;

                    // Loop while the context is still not established
                    while (!established) {
                        outToken = gssContext.initSecContext(inToken, 0, inToken.length);
                        if (outToken != null) {
                            sendToken(outToken);
                        }

                        if (!gssContext.isEstablished()) {
                            inToken = readToken();
                        } else {
                            established = true;
                        }
                    }
                } finally {
                    if (gssContext != null) {
                        gssContext.dispose();
                        gssContext = null;
                    }
                }
                return null;
            }
        });
    } catch (PrivilegedActionException ex) {
        throw new AuthenticationException(ex.getException());
    } catch (LoginException ex) {
        throw new AuthenticationException(ex);
    }
    AuthenticatedURL.extractToken(conn, token);
}

From source file:org.apache.hadoop.hbase.coprocessor.TestWALObserver.java

/**
 * Test WAL replay behavior with WALObserver.
 *//* ww w  . java  2s  .co m*/
@Test
public void testWALCoprocessorReplay() throws Exception {
    // WAL replay is handled at HRegion::replayRecoveredEdits(), which is
    // ultimately called by HRegion::initialize()
    TableName tableName = TableName.valueOf("testWALCoprocessorReplay");
    final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
    final AtomicLong sequenceId = new AtomicLong(0);
    // final HRegionInfo hri =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    // final HRegionInfo hri1 =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    final HRegionInfo hri = new HRegionInfo(tableName, null, null);

    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    fs.mkdirs(new Path(basedir, hri.getEncodedName()));

    final Configuration newConf = HBaseConfiguration.create(this.conf);

    // HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
    HLog wal = createWAL(this.conf);
    // Put p = creatPutWith2Families(TEST_ROW);
    WALEdit edit = new WALEdit();
    long now = EnvironmentEdgeManager.currentTimeMillis();
    // addFamilyMapToWALEdit(p.getFamilyMap(), edit);
    final int countPerFamily = 1000;
    // for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        // addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        // EnvironmentEdgeManager.getDelegate(), wal);
        addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
                EnvironmentEdgeManager.getDelegate(), wal, htd, sequenceId);
    }
    wal.append(hri, tableName, edit, now, htd, sequenceId);
    // sync to fs.
    wal.sync();

    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            Path p = runWALSplit(newConf);
            LOG.info("WALSplit path == " + p);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            HLog wal2 = createWAL(newConf);
            HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2,
                    TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
            long seqid2 = region.getOpenSeqNum();

            SampleRegionWALObserver cp2 = (SampleRegionWALObserver) region.getCoprocessorHost()
                    .findCoprocessor(SampleRegionWALObserver.class.getName());
            // TODO: asserting here is problematic.
            assertNotNull(cp2);
            assertTrue(cp2.isPreWALRestoreCalled());
            assertTrue(cp2.isPostWALRestoreCalled());
            region.close();
            wal2.closeAndDelete();
            return null;
        }
    });
}

From source file:org.apache.hadoop.hive.ql.txn.compactor.Cleaner.java

private void clean(CompactionInfo ci) throws MetaException {
    LOG.info("Starting cleaning for " + ci.getFullPartitionName());
    try {//  w ww .  j av  a 2 s.co  m
        Table t = resolveTable(ci);
        if (t == null) {
            // The table was dropped before we got around to cleaning it.
            LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped");
            return;
        }
        Partition p = null;
        if (ci.partName != null) {
            p = resolvePartition(ci);
            if (p == null) {
                // The partition was dropped before we got around to cleaning it.
                LOG.info("Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped");
                return;
            }
        }
        StorageDescriptor sd = resolveStorageDescriptor(t, p);
        final String location = sd.getLocation();

        // Create a bogus validTxnList with a high water mark set to MAX_LONG and no open
        // transactions.  This assures that all deltas are treated as valid and all we return are
        // obsolete files.
        final ValidTxnList txnList = new ValidReadTxnList();

        if (runJobAsSelf(ci.runAs)) {
            removeFiles(location, txnList);
        } else {
            LOG.info("Cleaning as user " + ci.runAs);
            UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs,
                    UserGroupInformation.getLoginUser());
            ugi.doAs(new PrivilegedExceptionAction<Object>() {
                @Override
                public Object run() throws Exception {
                    removeFiles(location, txnList);
                    return null;
                }
            });
        }

    } catch (Exception e) {
        LOG.error("Caught exception when cleaning, unable to complete cleaning "
                + StringUtils.stringifyException(e));
    } finally {
        // We need to clean this out one way or another.
        txnHandler.markCleaned(ci);
    }
}

From source file:org.apache.axiom.om.util.StAXUtils.java

public static XMLStreamReader createXMLStreamReader(StAXParserConfiguration configuration, final Reader in)
        throws XMLStreamException {

    final XMLInputFactory inputFactory = getXMLInputFactory(configuration);
    try {//from  w  w w  .j  a v a 2  s .co m
        XMLStreamReader reader = (XMLStreamReader) AccessController
                .doPrivileged(new PrivilegedExceptionAction() {
                    public Object run() throws XMLStreamException {
                        return inputFactory.createXMLStreamReader(in);
                    }
                });
        if (isDebugEnabled) {
            log.debug("XMLStreamReader is " + reader.getClass().getName());
        }
        return reader;
    } catch (PrivilegedActionException pae) {
        throw (XMLStreamException) pae.getException();
    }
}

From source file:org.apache.hadoop.mapred.TestJobACLs.java

private void verifyModifyJobAsAuthorizedUser(final JobConf clusterConf, final JobID jobId,
        String authorizedUser) throws IOException, InterruptedException {
    UserGroupInformation authorizedUGI = UserGroupInformation.createUserForTesting(authorizedUser,
            new String[] {});
    authorizedUGI.doAs(new PrivilegedExceptionAction<Object>() {
        @SuppressWarnings("null")
        @Override//from  ww w  .j  ava2s.  c  om
        public Object run() throws Exception {
            RunningJob myJob = null;
            try {
                JobClient client = new JobClient(clusterConf);
                myJob = client.getJob(jobId);
            } catch (Exception e) {
                fail("Exception .." + e);
            }

            assertNotNull("Job " + jobId + " is not known to the JobTracker!", myJob);

            // Test authorization success with setJobPriority
            try {
                myJob.setJobPriority(JobPriority.HIGH.toString());
            } catch (IOException ioe) {
                fail("Unexpected.. exception.. " + ioe);
            }

            // Test authorization success with killJob
            try {
                myJob.killJob();
            } catch (IOException ioe) {
                fail("Unexpected.. exception.. " + ioe);
            }

            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testVisibilityLabelsWithDeleteFamily() throws Exception {
    setAuths();//from w w  w.ja  va2s.co  m
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    try (Table table = createTableAndWriteDataWithLabels(SECRET, CONFIDENTIAL + "|" + TOPSECRET);) {
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Delete d = new Delete(row2);
                    d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL));
                    d.addFamily(fam);
                    table.delete(d);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);

        TEST_UTIL.getAdmin().flush(tableName);
        Scan s = new Scan();
        s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertTrue(next.length == 1);
        CellScanner cellScanner = next[0].cellScanner();
        cellScanner.advance();
        Cell current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0,
                row1.length));
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods.java

/** Handle HTTP POST request. */
@POST//from  w ww  . j  a v  a 2  s.c  om
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({ "*/*" })
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response post(final InputStream in, @Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT) final PostOpParam op,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize)
        throws IOException, InterruptedException {

    init(ugi, delegation, path, op, bufferSize);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException {

            final String fullpath = path.getAbsolutePath();
            final DataNode datanode = (DataNode) context.getAttribute("datanode");

            switch (op.getValue()) {
            case APPEND: {
                final Configuration conf = new Configuration(datanode.getConf());
                final int b = bufferSize.getValue(conf);
                DFSClient dfsclient = new DFSClient(conf);
                FSDataOutputStream out = null;
                try {
                    out = dfsclient.append(fullpath, b, null, null);
                    IOUtils.copyBytes(in, out, b);
                    out.close();
                    out = null;
                    dfsclient.close();
                    dfsclient = null;
                } finally {
                    IOUtils.cleanup(LOG, out);
                    IOUtils.cleanup(LOG, dfsclient);
                }
                return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
            }
            default:
                throw new UnsupportedOperationException(op + " is not supported");
            }
        }
    });
}