Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.java

/** Handle HTTP GET request. */
@GET/*  ww  w . j  av a  2s  . c  o  m*/
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response get(@Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT) final UserParam username,
        @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) final DoAsParam doAsUser,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT) final GetOpParam op,
        @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) final OffsetParam offset,
        @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT) final LengthParam length,
        @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT) final RenewerParam renewer,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize)
        throws IOException, InterruptedException {

    init(ugi, delegation, username, doAsUser, path, op, offset, length, renewer, bufferSize);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException, URISyntaxException {
            REMOTE_ADDRESS.set(request.getRemoteAddr());
            try {

                final NameNode namenode = (NameNode) context.getAttribute("name.node");
                final String fullpath = path.getAbsolutePath();

                switch (op.getValue()) {
                case OPEN: {
                    final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath,
                            op.getValue(), offset.getValue(), offset, length, bufferSize);
                    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case GET_BLOCK_LOCATIONS: {
                    final long offsetValue = offset.getValue();
                    final Long lengthValue = length.getValue();
                    final LocatedBlocks locatedblocks = namenode.getBlockLocations(fullpath, offsetValue,
                            lengthValue != null ? lengthValue : Long.MAX_VALUE);
                    final String js = JsonUtil.toJsonString(locatedblocks);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case GETFILESTATUS: {
                    final HdfsFileStatus status = namenode.getFileInfo(fullpath);
                    if (status == null) {
                        throw new FileNotFoundException("File does not exist: " + fullpath);
                    }

                    final String js = JsonUtil.toJsonString(status, true);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case LISTSTATUS: {
                    final StreamingOutput streaming = getListingStream(namenode, fullpath);
                    return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
                }
                case GETCONTENTSUMMARY: {
                    final ContentSummary contentsummary = namenode.getContentSummary(fullpath);
                    final String js = JsonUtil.toJsonString(contentsummary);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case GETFILECHECKSUM: {
                    final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath,
                            op.getValue(), -1L);
                    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case GETDELEGATIONTOKEN: {
                    if (delegation.getValue() != null) {
                        throw new IllegalArgumentException(delegation.getName() + " parameter is not null.");
                    }
                    final Token<? extends TokenIdentifier> token = generateDelegationToken(namenode, ugi,
                            renewer.getValue());
                    final String js = JsonUtil.toJsonString(token);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case GETHOMEDIRECTORY: {
                    final String js = JsonUtil.toJsonString(org.apache.hadoop.fs.Path.class.getSimpleName(),
                            WebHdfsFileSystem.getHomeDirectoryString(ugi));
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                default:
                    throw new UnsupportedOperationException(op + " is not supported");
                }

            } finally {
                REMOTE_ADDRESS.set(null);
            }
        }
    });
}

From source file:org.apache.hadoop.ha.ZKFailoverController.java

/**
 * Coordinate a graceful failover to this node.
 * @throws ServiceFailedException if the node fails to become active
 * @throws IOException some other error occurs
 *///from w  w  w  .j  av  a2  s  .c o m
void gracefulFailoverToYou() throws ServiceFailedException, IOException {
    try {
        UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                doGracefulFailover();
                return null;
            }

        });
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testVisibilityLabelsWithDeleteFamilyWithMultipleVersionsNoTimestamp() throws Exception {
    setAuths();/* w w w.  ja  v  a 2 s  .c  o  m*/
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    try (Table table = doPuts(tableName)) {
        TEST_UTIL.getAdmin().flush(tableName);
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Delete d1 = new Delete(row1);
                    d1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
                    d1.addFamily(fam);
                    table.delete(d1);

                    Delete d2 = new Delete(row1);
                    d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET));
                    d2.addFamily(fam);
                    table.delete(d2);

                    Delete d3 = new Delete(row1);
                    d3.setCellVisibility(new CellVisibility(
                            "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")"));
                    d3.addFamily(fam);
                    table.delete(d3);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);
        Scan s = new Scan();
        s.setMaxVersions(5);
        s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertEquals(1, next.length);
        CellScanner cellScanner = next[0].cellScanner();
        cellScanner.advance();
        Cell current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0,
                row2.length));
    }
}

From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java

@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + KMSRESTConstants.VERSIONS_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)/*from  ww w.  j  a va2  s .com*/
public Response getKeyVersions(@PathParam("name") final String name) throws Exception {
    try {
        LOG.trace("Entering getKeyVersions method.");
        UserGroupInformation user = HttpUserGroupInformation.get();
        KMSClientProvider.checkNotEmpty(name, "name");
        KMSWebApp.getKeyCallsMeter().mark();
        assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
        LOG.debug("Getting key versions for key {}", name);

        List<KeyVersion> ret = user.doAs(new PrivilegedExceptionAction<List<KeyVersion>>() {
            @Override
            public List<KeyVersion> run() throws Exception {
                return provider.getKeyVersions(name);
            }
        });

        Object json = KMSServerJSONUtils.toJSON(ret);
        kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
        LOG.trace("Exiting getKeyVersions method.");
        return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
    } catch (Exception e) {
        LOG.debug("Exception in getKeyVersions.", e);
        throw e;
    }
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

private void validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    hadoopConf = getHadoopConfiguration(issues);
    String hdfsUriInConf;//from   w  w w . j  a  v  a 2 s .c o m
    if (conf.hdfsUri != null && !conf.hdfsUri.isEmpty()) {
        hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, conf.hdfsUri);
    } else {
        hdfsUriInConf = hadoopConf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
        if (hdfsUriInConf == null) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                    ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_19));
            return;
        } else {
            conf.hdfsUri = hdfsUriInConf;
        }
    }
    if (conf.hdfsUri.contains("://")) {
        try {
            URI uri = new URI(conf.hdfsUri);
            if (!"hdfs".equals(uri.getScheme())) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_12,
                        conf.hdfsUri, uri.getScheme()));
                validHapoopFsUri = false;
            } else if (uri.getAuthority() == null) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_13,
                        conf.hdfsUri));
                validHapoopFsUri = false;
            }
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                    ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_22,
                    conf.hdfsUri, ex.getMessage(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_02,
                conf.hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        loginUgi = HadoopSecurityUtil.getLoginUser(hadoopConf);
        if (conf.hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hadoopConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Error connecting to FileSystem: " + ex, ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_11,
                conf.hdfsUri, String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

@Override
public void deleteHdfsPolicy(String categoryName, String feedName, List<String> hdfsPaths) {

    /**//from   ww w  .  j  a  v  a 2  s . c o m
     * Delete ACL from list of HDFS Paths
     */
    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {

            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        String allPathForAclDeletion = convertListToString(hdfsPaths, ",");
                        try {
                            sentryClientObject.flushACL(sentryConnection.getHadoopConfiguration(),
                                    allPathForAclDeletion);
                        } catch (Exception e) {
                            log.error("Unable to remove ACL from HDFS Paths" + e.getMessage());
                            throw new RuntimeException(e);
                        }

                        return null;
                    }

                });
            }
        } catch (Exception e) {
            log.error("Failed to clear HDFS ACL policy with Kerberos" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        String allPathForAclDeletion = convertListToString(hdfsPaths, ",");
        try {
            sentryClientObject.flushACL(sentryConnection.getHadoopConfiguration(), allPathForAclDeletion);
        } catch (Exception e) {
            log.error("Unable to remove ACL from HDFS Paths" + e.getMessage());
            throw new RuntimeException(e);
        }

    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids./*w  w  w. j a  v  a2  s. c om*/
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flush(true);
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.shutdown();
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());

    // Next test.  Add more edits, then 'crash' this region by stealing its wal
    // out from under it and assert that replay of the log adds the edits back
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            WAL wal3 = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
                @Override
                protected boolean restoreEdit(Store s, Cell cell) {
                    boolean b = super.restoreEdit(s, cell);
                    countOfRestoredEdits.incrementAndGet();
                    return b;
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());

            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.close();
            return null;
        }
    });
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

private FileSystem getFileSystemForInitDestroy() throws IOException {
    try {/*from  w w w. j a  v a 2 s .  c  o  m*/
        return getUGI().doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(new URI(conf.hdfsUri), hadoopConf);
            }
        });
    } catch (IOException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.hadoop.fs.TestFileSystem.java

public void testCloseAllForUGI() throws Exception {
    final Configuration conf = new Configuration();
    conf.set("fs.cachedfile.impl", conf.get("fs.file.impl"));
    UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
    FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }/*  w  ww . ja  va 2s.c om*/
    });
    //Now we should get the cached filesystem
    FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    assertSame(fsA, fsA1);

    FileSystem.closeAllForUGI(ugiA);

    //Now we should get a different (newly created) filesystem
    fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    assertNotSame(fsA, fsA1);
}

From source file:org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions.java

@Test
public void testCellPermissionsWithDeleteExactVersion() throws Exception {
    final byte[] TEST_ROW1 = Bytes.toBytes("r1");
    final byte[] TEST_Q1 = Bytes.toBytes("q1");
    final byte[] TEST_Q2 = Bytes.toBytes("q2");
    final byte[] ZERO = Bytes.toBytes(0L);

    final User user1 = User.createUserForTesting(conf, "user1", new String[0]);
    final User user2 = User.createUserForTesting(conf, "user2", new String[0]);

    verifyAllowed(new AccessTestAction() {
        @Override/*  www .j  av  a  2s  .  c om*/
        public Object run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Map<String, Permission> permsU1andOwner = new HashMap<String, Permission>();
                permsU1andOwner.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Map<String, Permission> permsU2andOwner = new HashMap<String, Permission>();
                permsU2andOwner.put(user2.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU2andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 123, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q2, 123, ZERO);
                p.setACL(permsU2andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY2, TEST_Q1, 123, ZERO);
                p.add(TEST_FAMILY2, TEST_Q2, 123, ZERO);
                p.setACL(permsU2andOwner);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY2, TEST_Q1, 125, ZERO);
                p.add(TEST_FAMILY2, TEST_Q2, 125, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 127, ZERO);
                p.setACL(permsU2andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q2, 127, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY2, TEST_Q1, 129, ZERO);
                p.add(TEST_FAMILY2, TEST_Q2, 129, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    }, USER_OWNER);

    // user1 should be allowed to delete TEST_ROW1 as he is having write permission on both
    // versions of the cells
    user1.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1);
                d.deleteColumn(TEST_FAMILY1, TEST_Q1, 123);
                d.deleteColumn(TEST_FAMILY1, TEST_Q2);
                d.deleteFamilyVersion(TEST_FAMILY2, 125);
                t.delete(d);
            } finally {
                t.close();
            }
            return null;
        }
    });

    user2.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1, 127);
                d.deleteColumns(TEST_FAMILY1, TEST_Q1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q2);
                d.deleteFamily(TEST_FAMILY2, 129);
                t.delete(d);
                fail("user2 can not do the delete");
            } catch (Exception e) {

            } finally {
                t.close();
            }
            return null;
        }
    });
}