Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:com.sogou.dockeronyarn.service.DockerApplicationMaster_24.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/* ww  w. j  a  v  a 2s  . c o m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
    numRetryCount.set(0);
}

From source file:com.splicemachine.db.impl.drda.DRDAConnThread.java

License:Open Source License

/**
 *  Parse Access Security//  w  w  w.  j  a  va2  s.c  om
 *
 *   If the target server supports the SECMEC requested by the application requester
 *   then a single value is returned and it is identical to the SECMEC value
 *   in the ACCSEC command. If the target server does not support the SECMEC
 *   requested, then one or more values are returned and the application requester
 *  must choose one of these values for the security mechanism.
 *  We currently support
 *      - user id and password (default for JCC)
 *      - encrypted user id and password
  *      - strong password substitute (USRSSBPWD w/
  *                                    Derby network client only)
 *
  *  Instance variables
 *    SECMGRNM  - security manager name - optional
 *     SECMEC    - security mechanism - required
 *     RDBNAM   - relational database name - optional
 *      SECTKN   - security token - optional, (required if sec mech. needs it)
  *
 *  @return security check code - 0 if everything O.K.
 */
private int parseACCSEC() throws DRDAProtocolException {
    int securityCheckCode = 0;
    int securityMechanism = 0;
    byte[] secTokenIn = null;

    reader.markCollection();
    int codePoint = reader.getCodePoint();
    while (codePoint != -1) {
        switch (codePoint) {
        //optional
        case CodePoint.SECMGRNM:
            // this is defined to be 0 length
            if (reader.getDdmLength() != 0)
                badObjectLength(CodePoint.SECMGRNM);
            break;
        //required
        case CodePoint.SECMEC:
            checkLength(CodePoint.SECMEC, 2);
            securityMechanism = reader.readNetworkShort();
            if (SanityManager.DEBUG)
                trace("parseACCSEC - Security mechanism = " + securityMechanism);

            // if Property.DRDA_PROP_SECURITYMECHANISM has been set, then
            // network server only accepts connections which use that
            // security mechanism. No other types of connections 
            // are accepted.
            // Make check to see if this property has been set.
            // if set, and if the client requested security mechanism 
            // is not the same, then return a security check code 
            // that the server does not support/allow this security 
            // mechanism
            if ((server.getSecurityMechanism() != NetworkServerControlImpl.INVALID_OR_NOTSET_SECURITYMECHANISM)
                    && securityMechanism != server.getSecurityMechanism()) {
                securityCheckCode = CodePoint.SECCHKCD_NOTSUPPORTED;
                if (SanityManager.DEBUG) {
                    trace("parseACCSEC - SECCHKCD_NOTSUPPORTED [1] - " + securityMechanism + " <> "
                            + server.getSecurityMechanism() + "\n");
                }
            } else {
                // for plain text userid,password USRIDPWD, and USRIDONL
                // no need of decryptionManager
                if (securityMechanism != CodePoint.SECMEC_USRIDPWD
                        && securityMechanism != CodePoint.SECMEC_USRIDONL) {
                    if (securityMechanism == CodePoint.SECMEC_KERSEC) {

                        try {
                            user = UserGroupInformation.getCurrentUser();

                            Exception exception = user.doAs(new PrivilegedAction<Exception>() {
                                @Override
                                public Exception run() {
                                    try {
                                        // Get own Kerberos credentials for accepting connection
                                        GSSManager manager = GSSManager.getInstance();
                                        Oid krb5Mechanism = new Oid("1.2.840.113554.1.2.2");
                                        GSSCredential serverCreds = manager.createCredential(null,
                                                GSSCredential.DEFAULT_LIFETIME, krb5Mechanism,
                                                GSSCredential.ACCEPT_ONLY);

                                        /*
                                         * Create a GSSContext to receive the incoming request
                                         * from the client. Use null for the server credentials
                                         * passed in. This tells the underlying mechanism
                                         * to use whatever credentials it has available that
                                         * can be used to accept this connection.
                                         */
                                        gssContext = manager.createContext((GSSCredential) serverCreds);
                                    } catch (Exception e) {
                                        return e;
                                    }
                                    return null;
                                }
                            });

                            if (exception != null) {
                                throw exception;
                            }
                        } catch (Exception e) {
                            println2Log(null, session.drdaID, e.getMessage());
                            // Local security service non-retryable error.
                            securityCheckCode = CodePoint.SECCHKCD_0A;
                        }

                    }
                    // These are the only other mechanisms we understand
                    else if (((securityMechanism != CodePoint.SECMEC_EUSRIDPWD)
                            || (securityMechanism == CodePoint.SECMEC_EUSRIDPWD && !server.supportsEUSRIDPWD()))
                            && (securityMechanism != CodePoint.SECMEC_USRSSBPWD))
                    //securityCheckCode = CodePoint.SECCHKCD_NOTSUPPORTED;
                    {
                        securityCheckCode = CodePoint.SECCHKCD_NOTSUPPORTED;
                        if (SanityManager.DEBUG) {
                            trace("parseACCSEC - SECCHKCD_NOTSUPPORTED [2]\n");
                        }
                    } else {
                        // We delay the initialization and required
                        // processing for SECMEC_USRSSBPWD as we need
                        // to ensure the database is booted so that
                        // we can verify that the current auth scheme
                        // is set to BUILT-IN or NONE. For this we need
                        // to have the RDBNAM codepoint available.
                        //
                        // See validateSecMecUSRSSBPWD() call below
                        if (securityMechanism == CodePoint.SECMEC_USRSSBPWD)
                            break;

                        // SECMEC_EUSRIDPWD initialization
                        try {
                            if (decryptionManager == null)
                                decryptionManager = new DecryptionManager();
                            myPublicKey = decryptionManager.obtainPublicKey();
                        } catch (SQLException e) {
                            println2Log(null, session.drdaID, e.getMessage());
                            // Local security service non-retryable error.
                            securityCheckCode = CodePoint.SECCHKCD_0A;
                        }
                    }
                }
            }
            break;
        //optional (currently required for Derby - needed for
        //          DERBY-528 as well)
        case CodePoint.RDBNAM:
            String dbname = parseRDBNAM();
            Database d = session.getDatabase(dbname);
            if (d == null)
                initializeDatabase(dbname);
            else {
                // reset database for connection re-use 
                // DERBY-3596
                // If we are reusing resources for a new physical
                // connection, reset the database object. If the client
                // is in the process of creating a new logical
                // connection only, don't reset the database object.
                if (!deferredReset) {
                    d.reset();
                }
                database = d;
            }
            break;
        //optional - depending on security Mechanism 
        case CodePoint.SECTKN:
            secTokenIn = reader.readBytes();
            break;
        default:
            invalidCodePoint(codePoint);
        }
        codePoint = reader.getCodePoint();
    }

    // check for required CodePoint's
    if (securityMechanism == 0)
        missingCodePoint(CodePoint.SECMEC);

    if (database == null)
        initializeDatabase(null);
    database.securityMechanism = securityMechanism;
    database.secTokenIn = secTokenIn;

    // If security mechanism is SECMEC_USRSSBPWD, then ensure it can be
    // used for the database or system based on the client's connection
    // URL and its identity.
    if (securityCheckCode == 0 && (database.securityMechanism == CodePoint.SECMEC_USRSSBPWD)) {
        if (SanityManager.DEBUG)
            SanityManager.ASSERT((securityCheckCode == 0), "SECMEC_USRSSBPWD: securityCheckCode should not "
                    + "already be set, found it initialized with " + "a value of '" + securityCheckCode + "'.");
        securityCheckCode = validateSecMecUSRSSBPWD();
    }

    // need security token
    if (securityCheckCode == 0 && (database.securityMechanism == CodePoint.SECMEC_EUSRIDPWD
            || database.securityMechanism == CodePoint.SECMEC_USRSSBPWD) && database.secTokenIn == null)
        securityCheckCode = CodePoint.SECCHKCD_SECTKNMISSING_OR_INVALID;

    // shouldn't have security token
    if (securityCheckCode == 0 && (database.securityMechanism == CodePoint.SECMEC_USRIDPWD
            || database.securityMechanism == CodePoint.SECMEC_USRIDONL) && database.secTokenIn != null)
        securityCheckCode = CodePoint.SECCHKCD_SECTKNMISSING_OR_INVALID;

    if (SanityManager.DEBUG)
        trace("** ACCSECRD securityCheckCode is: " + securityCheckCode);

    // If the security check was successful set the session state to
    // security accesseed.  Otherwise go back to attributes exchanged so we
    // require another ACCSEC
    if (securityCheckCode == 0)
        session.setState(session.SECACC);
    else
        session.setState(session.ATTEXC);

    return securityCheckCode;
}

From source file:com.splicemachine.yarn.test.BareYarnTest.java

License:Apache License

/**
 * All we really need to do here is to create a yarn client, configure it using the same
 * yarn-site.xml as was used by the server to start up.
 * @throws YarnException/*from w  w w . ja va 2  s . co  m*/
 * @throws IOException
 */
@Test(timeout = 60000)
@Ignore("Broken by dependency change")
public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException {
    // create, submit new app
    ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    // set the application name
    appContext.setApplicationName("Test");
    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(0);
    appContext.setPriority(pri);
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue("default");
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(
            Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(),
            Arrays.asList("sleep", "100"), new HashMap<String, ByteBuffer>(), null,
            new HashMap<ApplicationAccessType, String>());
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(Resource.newInstance(1024, 1));
    // Create the request to send to the applications manager
    SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
    appRequest.setApplicationSubmissionContext(appContext);
    // Submit the application to the applications manager
    yarnClient.submitApplication(appContext);

    // wait for app to start
    RMAppAttempt appAttempt;
    while (true) {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            ApplicationAttemptId attemptId = appReport.getCurrentApplicationAttemptId();
            appAttempt = yarnPlatform.getResourceManager().getRMContext().getRMApps()
                    .get(attemptId.getApplicationId()).getCurrentAppAttempt();
            while (true) {
                if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
                    break;
                }
            }
            break;
        }
    }
    // Just dig into the ResourceManager and get the AMRMToken just for the sake
    // of testing.
    UserGroupInformation.setLoginUser(
            UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
}

From source file:com.srini.hadoopYarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*  w ww . j ava  2  s.c o  m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainers; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}

From source file:com.tito.easyyarn.appmaster.ApplicationMaster.java

License:Apache License

private void extractTokens() {
    // Credentials, Token, UserGroupInformation, DataOutputBuffer
    Credentials credentials;//from w ww  . j a v a 2  s . c o  m
    try {
        credentials = UserGroupInformation.getCurrentUser().getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        // Now remove the AM->RM token so that containers cannot access it.
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        LOG.info("Executing with tokens:");
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            LOG.info(token);
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }
        allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    } catch (IOException e) {
        LOG.error("extractTokens error={}", e);

    }

}

From source file:com.trendmicro.hdfs.webdav.HDFSResource.java

License:Apache License

public void setProxyUser(final String user) throws IOException {
    if (user != null) {
        this.user = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
    }//from w  w  w  . ja v  a 2s  . co m
    if (this.user == null) {
        this.user = UserGroupInformation.getCurrentUser();
    }
}

From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/rw/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);//  w w w. j  ava2  s .  c om
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestDeleteSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/private"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/private/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);//from w  w  w . j  a va 2  s .co  m
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/private/file2"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file3"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file4"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestGetSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            FSDataOutputStream os;//from   www . j  a v  a2 s. co  m
            os = fs.create(new Path("/test/pubdata"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testPublicData.getBytes());
            os.close();
            os = fs.create(new Path("/test/privdata"),
                    new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testPrivateData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestMkcolSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/private"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            return null;
        }/*www  . j  av  a  2  s  .c  o m*/
    });
}