Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods.java

/** Handle HTTP GET request. */
@GET/*from ww  w  .  j a va2  s  .c om*/
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response get(@Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT) final GetOpParam op,
        @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) final OffsetParam offset,
        @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT) final LengthParam length,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize)
        throws IOException, InterruptedException {

    init(ugi, delegation, path, op, offset, length, bufferSize);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException {

            final String fullpath = path.getAbsolutePath();
            final DataNode datanode = (DataNode) context.getAttribute("datanode");
            final Configuration conf = new Configuration(datanode.getConf());
            final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);

            switch (op.getValue()) {
            case OPEN: {
                final int b = bufferSize.getValue(conf);
                final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
                DFSDataInputStream in = null;
                try {
                    in = new DFSClient.DFSDataInputStream(dfsclient.open(fullpath, b, true, null));
                    in.seek(offset.getValue());
                } catch (IOException ioe) {
                    IOUtils.cleanup(LOG, in);
                    IOUtils.cleanup(LOG, dfsclient);
                    throw ioe;
                }
                final DFSDataInputStream dis = in;
                final StreamingOutput streaming = new StreamingOutput() {
                    @Override
                    public void write(final OutputStream out) throws IOException {
                        final Long n = length.getValue();
                        DFSDataInputStream dfsin = dis;
                        DFSClient client = dfsclient;
                        try {
                            if (n == null) {
                                IOUtils.copyBytes(dfsin, out, b);
                            } else {
                                IOUtils.copyBytes(dfsin, out, n, b, false);
                            }
                            dfsin.close();
                            dfsin = null;
                            client.close();
                            client = null;
                        } finally {
                            IOUtils.cleanup(LOG, dfsin);
                            IOUtils.cleanup(LOG, client);
                        }
                    }
                };

                return Response.ok(streaming).type(MediaType.APPLICATION_OCTET_STREAM).build();
            }
            case GETFILECHECKSUM: {
                MD5MD5CRC32FileChecksum checksum = null;
                DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
                try {
                    checksum = dfsclient.getFileChecksum(fullpath);
                    dfsclient.close();
                    dfsclient = null;
                } finally {
                    IOUtils.cleanup(LOG, dfsclient);
                }
                final String js = JsonUtil.toJsonString(checksum);
                return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
            }
            default:
                throw new UnsupportedOperationException(op + " is not supported");
            }
        }
    });
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestCacheDirectives.java

private static long addAsUnprivileged(final CacheDirectiveInfo directive) throws Exception {
    return unprivilegedUser.doAs(new PrivilegedExceptionAction<Long>() {
        @Override/*from w  w w.  ja v  a 2  s . c o  m*/
        public Long run() throws IOException {
            DistributedFileSystem myDfs = (DistributedFileSystem) FileSystem.get(conf);
            return myDfs.addCacheDirective(directive);
        }
    });
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.TestPipelinesFailover.java

private DistributedFileSystem createFsAsOtherUser(final MiniDFSCluster cluster, final Configuration conf)
        throws IOException, InterruptedException {
    return (DistributedFileSystem) UserGroupInformation
            .createUserForTesting("otheruser", new String[] { "othergroup" })
            .doAs(new PrivilegedExceptionAction<FileSystem>() {
                @Override/* w  w w . j  av a 2 s . com*/
                public FileSystem run() throws Exception {
                    return cluster.getFileSystem(0);
                }
            });
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testVisibilityLabelsWithDeleteColumnExactVersion() throws Exception {
    setAuths();//  w  w w .j  ava  2  s. co m
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    long[] ts = new long[] { 123l, 125l };
    try (Table table = createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET);) {
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Delete d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL));
                    d.addColumn(fam, qual, 123l);
                    table.delete(d);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);

        TEST_UTIL.getAdmin().flush(tableName);
        Scan s = new Scan();
        s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertTrue(next.length == 1);
        CellScanner cellScanner = next[0].cellScanner();
        cellScanner.advance();
        Cell current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0,
                row2.length));
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.TestDelegationTokensWithHA.java

private static void doRenewOrCancel(final Token<DelegationTokenIdentifier> token, final Configuration conf,
        final TokenTestAction action) throws IOException, InterruptedException {
    UserGroupInformation.createRemoteUser("JobTracker").doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from  w  w w  . j  a v a 2s  . c om
        public Void run() throws Exception {
            switch (action) {
            case RENEW:
                token.renew(conf);
                break;
            case CANCEL:
                token.cancel(conf);
                break;
            default:
                fail("bad action:" + action);
            }
            return null;
        }
    });
}

From source file:org.apache.axiom.om.util.StAXUtils.java

public static XMLStreamWriter createXMLStreamWriter(StAXWriterConfiguration configuration,
        final OutputStream out) throws XMLStreamException {
    final XMLOutputFactory outputFactory = getXMLOutputFactory(configuration);
    try {/* w  ww.  ja va 2 s  . c o  m*/
        XMLStreamWriter writer = (XMLStreamWriter) AccessController
                .doPrivileged(new PrivilegedExceptionAction() {
                    public Object run() throws XMLStreamException {
                        return outputFactory.createXMLStreamWriter(out, OMConstants.DEFAULT_CHAR_SET_ENCODING);
                    }
                });

        if (isDebugEnabled) {
            log.debug("XMLStreamWriter is " + writer.getClass().getName());
        }
        return writer;
    } catch (PrivilegedActionException pae) {
        throw (XMLStreamException) pae.getException();
    }
}

From source file:org.apache.falcon.recipe.RecipeTool.java

private FileSystem createFileSystem(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws Exception {
    try {/*from   w w  w .j  a v  a 2 s  .  c o m*/
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            return FileSystem.get(uri, conf);
        }

        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                return FileSystem.get(uri, conf);
            }
        });
    } catch (InterruptedException ex) {
        throw new IOException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:org.apache.hadoop.mapred.TestJobACLs.java

private void verifyACLPersistence() throws IOException, InterruptedException {

    // Set the job up.
    final JobConf myConf = mr.createJobConf();
    myConf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague + " group2");

    // Submit the job as user1
    RunningJob job = submitJobAsUser(myConf, jobSubmitter);

    final JobID jobId = job.getID();

    // Kill the job and wait till it is actually killed so that it is written to
    // CompletedJobStore
    job.killJob();//  ww w .j  a  va2  s  .c  o m
    while (job.getJobState() != JobStatus.KILLED) {
        LOG.info("Waiting for the job to be killed successfully..");
        Thread.sleep(200);
    }

    // Now kill the cluster, so that the job is 'forgotten'
    tearDown();

    // Re-start the cluster
    startCluster(true);

    final JobConf myNewJobConf = mr.createJobConf();
    // Now verify view-job works off CompletedJobStore
    verifyViewJobAsAuthorizedUser(myNewJobConf, jobId, viewColleague);
    verifyViewJobAsAuthorizedUser(myNewJobConf, jobId, qAdmin);

    // Only JobCounters is persisted on the JobStore. So test counters only.
    UserGroupInformation unauthorizedUGI = UserGroupInformation.createUserForTesting(modifyColleague,
            new String[] {});

    unauthorizedUGI.doAs(new PrivilegedExceptionAction<Object>() {
        @SuppressWarnings("null")
        @Override
        public Object run() {
            RunningJob myJob = null;
            try {
                JobClient client = new JobClient(myNewJobConf);
                myJob = client.getJob(jobId);
            } catch (Exception e) {
                fail("Exception .." + e);
            }

            assertNotNull("Job " + jobId + " is not known to the JobTracker!", myJob);

            // Tests authorization failure with getCounters
            try {
                myJob.getCounters();
                fail("AccessControlException expected..");
            } catch (IOException ioe) {
                assertTrue(ioe.getMessage().contains("AccessControlException"));
            }

            return null;
        }
    });

}

From source file:org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure.java

/**
 * Coprocessor Action.//from www .  jav  a  2 s  .  c o m
 * @param env MasterProcedureEnv
 * @param state the procedure state
 * @throws IOException
 * @throws InterruptedException
 */
private void runCoprocessorAction(final MasterProcedureEnv env, final AddColumnFamilyState state)
        throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                switch (state) {
                case ADD_COLUMN_FAMILY_PRE_OPERATION:
                    cpHost.preAddColumnHandler(tableName, cfDescriptor);
                    break;
                case ADD_COLUMN_FAMILY_POST_OPERATION:
                    cpHost.postAddColumnHandler(tableName, cfDescriptor);
                    break;
                default:
                    throw new UnsupportedOperationException(this + " unhandled state=" + state);
                }
                return null;
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Test case of HRegion that is only made out of bulk loaded files.  Assert
 * that we don't 'crash'.//from ww w.  j av  a  2  s  . com
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException,
        IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region2);
    HLog wal = createWAL(this.conf);
    HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);

    byte[] family = htd.getFamilies().iterator().next().getName();
    Path f = new Path(basedir, "hfile");
    HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(""), Bytes.toBytes("z"), 10);
    List<Pair<byte[], String>> hfs = new ArrayList<Pair<byte[], String>>(1);
    hfs.add(Pair.newPair(family, f.toString()));
    region.bulkLoadHFiles(hfs, true);

    // Add an edit so something in the WAL
    byte[] row = tableName.getName();
    region.put((new Put(row)).add(family, family, family));
    wal.sync();
    final int rowsInsertedCount = 11;

    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));

    // Now 'crash' the region by stealing its wal
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            HLog wal2 = createWAL(newConf);

            HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd,
                    wal2);
            long seqid2 = region2.getOpenSeqNum();
            assertTrue(seqid2 > -1);
            assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));

            // I can't close wal1.  Its been appropriated when we split.
            region2.close();
            wal2.closeAndDelete();
            return null;
        }
    });
}