Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:org.apache.accumulo.test.replication.KerberosReplicationIT.java

License:Apache License

@Test
public void dataReplicatedToCorrectTable() throws Exception {
    // Login as the root user
    final UserGroupInformation ugi = UserGroupInformation
            .loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().toURI().toString());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from ww w. ja  va2 s  . c o  m
        public Void run() throws Exception {
            log.info("testing {}", ugi);
            final KerberosToken token = new KerberosToken();
            final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
            final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);

            ClusterUser replicationUser = kdc.getClientPrincipal(0);

            // Create user for replication to the peer
            peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);

            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME,
                    replicationUser.getPrincipal());
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME,
                    replicationUser.getKeytab().getAbsolutePath());

            // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME,
                    ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
                            AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(),
                                    peerConn.getInstance().getZooKeepers())));

            String primaryTable1 = "primary", peerTable1 = "peer";

            // Create tables
            primaryConn.tableOperations().create(primaryTable1);
            String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
            Assert.assertNotNull(masterTableId1);

            peerConn.tableOperations().create(peerTable1);
            String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
            Assert.assertNotNull(peerTableId1);

            // Grant write permission
            peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1,
                    TablePermission.WRITE);

            // Replicate this table to the peerClusterName in a table with the peerTableId table id
            primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(),
                    "true");
            primaryConn.tableOperations().setProperty(primaryTable1,
                    Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);

            // Write some data to table1
            BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
            long masterTable1Records = 0l;
            for (int rows = 0; rows < 2500; rows++) {
                Mutation m = new Mutation(primaryTable1 + rows);
                for (int cols = 0; cols < 100; cols++) {
                    String value = Integer.toString(cols);
                    m.put(value, "", value);
                    masterTable1Records++;
                }
                bw.addMutation(m);
            }

            bw.close();

            log.info("Wrote all data to primary cluster");

            Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);

            // Restart the tserver to force a close on the WAL
            for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
                primary.killProcess(ServerType.TABLET_SERVER, proc);
            }
            primary.exec(TabletServer.class);

            log.info("Restarted the tserver");

            // Read the data -- the tserver is back up and running and tablets are assigned
            Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());

            // Wait for both tables to be replicated
            log.info("Waiting for {} for {}", filesFor1, primaryTable1);
            primaryConn.replicationOperations().drain(primaryTable1, filesFor1);

            long countTable = 0l;
            for (Entry<Key, Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
                countTable++;
                Assert.assertTrue(
                        "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " "
                                + entry.getValue(),
                        entry.getKey().getRow().toString().startsWith(primaryTable1));
            }

            log.info("Found {} records in {}", countTable, peerTable1);
            Assert.assertEquals(masterTable1Records, countTable);

            return null;
        }
    });
}

From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java

License:Apache License

@Override
public Status replicate(final Path p, final Status status, final ReplicationTarget target,
        final ReplicaSystemHelper helper) {
    final Instance localInstance = HdfsZooInstance.getInstance();
    final AccumuloConfiguration localConf = new ServerConfigurationFactory(localInstance).getConfiguration();

    final String principal = getPrincipal(localConf, target);
    final File keytab;
    final String password;
    if (localConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
        String keytabPath = getKeytab(localConf, target);
        keytab = new File(keytabPath);
        if (!keytab.exists() || !keytab.isFile()) {
            log.error("{} is not a regular file. Cannot login to replicate", keytabPath);
            return status;
        }/*from  w  w w  . j a  v a2 s . c  om*/
        password = null;
    } else {
        keytab = null;
        password = getPassword(localConf, target);
    }

    if (null != keytab) {
        try {
            final UserGroupInformation accumuloUgi = UserGroupInformation.getCurrentUser();
            // Get a UGI with the principal + keytab
            UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal,
                    keytab.getAbsolutePath());

            // Run inside a doAs to avoid nuking the Tserver's user
            return ugi.doAs(new PrivilegedAction<Status>() {
                @Override
                public Status run() {
                    KerberosToken token;
                    try {
                        // Do *not* replace the current user
                        token = new KerberosToken(principal, keytab);
                    } catch (IOException e) {
                        log.error("Failed to create KerberosToken", e);
                        return status;
                    }
                    ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
                    return _replicate(p, status, target, helper, localConf, peerContext, accumuloUgi);
                }
            });
        } catch (IOException e) {
            // Can't log in, can't replicate
            log.error("Failed to perform local login", e);
            return status;
        }
    } else {
        // Simple case: make a password token, context and then replicate
        PasswordToken token = new PasswordToken(password);
        ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
        return _replicate(p, status, target, helper, localConf, peerContext, null);
    }
}

From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java

License:Apache License

protected Status replicateLogs(ClientContext peerContext, final HostAndPort peerTserver,
        final ReplicationTarget target, final Path p, final Status status, final long sizeLimit,
        final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper,
        final UserGroupInformation accumuloUgi)
        throws TTransportException, AccumuloException, AccumuloSecurityException {

    log.debug("Replication WAL to peer tserver");
    final Set<Integer> tids;
    final DataInputStream input;
    Span span = Trace.start("Read WAL header");
    span.data("file", p.toString());
    try {/*from w ww.j a v  a2  s . c  om*/
        input = getWalStream(p);
    } catch (LogHeaderIncompleteException e) {
        log.warn(
                "Could not read header from {}, assuming that there is no data present in the WAL, therefore replication is complete",
                p);
        Status newStatus;
        // Bump up the begin to the (infinite) end, trying to be accurate
        if (status.getInfiniteEnd()) {
            newStatus = Status.newBuilder(status).setBegin(Long.MAX_VALUE).build();
        } else {
            newStatus = Status.newBuilder(status).setBegin(status.getEnd()).build();
        }
        span = Trace.start("Update replication table");
        try {
            helper.recordNewStatus(p, newStatus, target);
        } catch (TableNotFoundException tnfe) {
            log.error("Tried to update status in replication table for {} as {}, but the table did not exist",
                    p, ProtobufUtil.toString(newStatus), e);
            throw new RuntimeException("Replication table did not exist, will retry", e);
        } finally {
            span.stop();
        }
        return newStatus;
    } catch (IOException e) {
        log.error("Could not create stream for WAL", e);
        // No data sent (bytes nor records) and no progress made
        return status;
    } finally {
        span.stop();
    }

    log.debug("Skipping unwanted data in WAL");
    span = Trace.start("Consume WAL prefix");
    span.data("file", p.toString());
    try {
        // We want to read all records in the WAL up to the "begin" offset contained in the Status message,
        // building a Set of tids from DEFINE_TABLET events which correspond to table ids for future mutations
        tids = consumeWalPrefix(target, input, p, status, sizeLimit);
    } catch (IOException e) {
        log.warn("Unexpected error consuming file.");
        return status;
    } finally {
        span.stop();
    }

    log.debug("Sending batches of data to peer tserver");

    Status lastStatus = status, currentStatus = status;
    final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
    while (true) {
        // Set some trace info
        span = Trace.start("Replicate WAL batch");
        span.data("Batch size (bytes)", Long.toString(sizeLimit));
        span.data("File", p.toString());
        span.data("Peer instance name", peerContext.getInstance().getInstanceName());
        span.data("Peer tserver", peerTserver.toString());
        span.data("Remote table ID", remoteTableId);

        ReplicationStats replResult;
        try {
            // Read and send a batch of mutations
            replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver,
                    new WalClientExecReturn(target, input, p, currentStatus, sizeLimit, remoteTableId, tcreds,
                            tids));
        } catch (Exception e) {
            log.error("Caught exception replicating data to {} at {}",
                    peerContext.getInstance().getInstanceName(), peerTserver, e);
            throw e;
        } finally {
            span.stop();
        }

        // Catch the overflow
        long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
        if (newBegin < 0) {
            newBegin = Long.MAX_VALUE;
        }

        currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();

        log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target,
                ProtobufUtil.toString(currentStatus));

        // If we got a different status
        if (!currentStatus.equals(lastStatus)) {
            span = Trace.start("Update replication table");
            try {
                if (null != accumuloUgi) {
                    final Status copy = currentStatus;
                    accumuloUgi.doAs(new PrivilegedAction<Void>() {
                        @Override
                        public Void run() {
                            try {
                                helper.recordNewStatus(p, copy, target);
                            } catch (Exception e) {
                                exceptionRef.set(e);
                            }
                            return null;
                        }
                    });
                    Exception e = exceptionRef.get();
                    if (null != e) {
                        if (e instanceof TableNotFoundException) {
                            throw (TableNotFoundException) e;
                        } else if (e instanceof AccumuloSecurityException) {
                            throw (AccumuloSecurityException) e;
                        } else if (e instanceof AccumuloException) {
                            throw (AccumuloException) e;
                        } else {
                            throw new RuntimeException("Received unexpected exception", e);
                        }
                    }
                } else {
                    helper.recordNewStatus(p, currentStatus, target);
                }
            } catch (TableNotFoundException e) {
                log.error(
                        "Tried to update status in replication table for {} as {}, but the table did not exist",
                        p, ProtobufUtil.toString(currentStatus), e);
                throw new RuntimeException("Replication table did not exist, will retry", e);
            } finally {
                span.stop();
            }

            log.debug("Recorded updated status for {}: {}", p, ProtobufUtil.toString(currentStatus));

            // If we don't have any more work, just quit
            if (!StatusUtil.isWorkRequired(currentStatus)) {
                return currentStatus;
            } else {
                // Otherwise, let it loop and replicate some more data
                lastStatus = currentStatus;
            }
        } else {
            log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target,
                    ProtobufUtil.toString(lastStatus));

            // otherwise, we didn't actually replicate (likely because there was error sending the data)
            // we can just not record any updates, and it will be picked up again by the work assigner
            return status;
        }
    }
}

From source file:org.apache.accumulo.tserver.TabletServer.java

License:Apache License

public static void main(String[] args) throws IOException {
    try {/*from  www  .  j a  v  a2  s  . c o m*/
        final String app = "tserver";
        Accumulo.setupLogging(app);
        SecurityUtil.serverLogin(SiteConfiguration.getInstance());
        ServerOpts opts = new ServerOpts();
        opts.parseArgs(app, args);
        String hostname = opts.getAddress();
        ServerConfigurationFactory conf = new ServerConfigurationFactory(HdfsZooInstance.getInstance());
        VolumeManager fs = VolumeManagerImpl.get();
        Accumulo.init(fs, conf, app);
        final TabletServer server = new TabletServer(conf, fs);
        server.config(hostname);
        DistributedTrace.enable(hostname, app, conf.getConfiguration());
        if (UserGroupInformation.isSecurityEnabled()) {
            UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
            loginUser.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() {
                    server.run();
                    return null;
                }
            });
        } else {
            server.run();
        }
    } catch (Exception ex) {
        log.error("Uncaught exception in TabletServer.main, exiting", ex);
        System.exit(1);
    } finally {
        DistributedTrace.disable();
    }
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private <T> T invokeHDFSClientRunnable(final HDFSClientRunnable<T> runnable,
        final Map<String, String> hadoopConfigs) throws IOException, InterruptedException {
    ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
    try {//  ww w  .  java 2s  .c o m
        boolean securityEnabled = Boolean.valueOf(hadoopConfigs.get("security_enabled"));
        final HdfsConfiguration hdfsConfiguration = new HdfsConfiguration();
        for (Entry<String, String> entry : hadoopConfigs.entrySet()) {
            hdfsConfiguration.set(entry.getKey(), entry.getValue());
        }
        UserGroupInformation.setConfiguration(hdfsConfiguration);
        UserGroupInformation sliderUser;
        String loggedInUser = getUserToRunAs(hadoopConfigs);
        if (securityEnabled) {
            String viewPrincipal = getViewParameterValue(PARAM_VIEW_PRINCIPAL);
            String viewPrincipalKeytab = getViewParameterValue(PARAM_VIEW_PRINCIPAL_KEYTAB);
            UserGroupInformation ambariUser = UserGroupInformation
                    .loginUserFromKeytabAndReturnUGI(viewPrincipal, viewPrincipalKeytab);
            if (loggedInUser.equals(ambariUser.getShortUserName())) {
                // HDFS throws exception when caller tries to impresonate themselves.
                // User: admin@EXAMPLE.COM is not allowed to impersonate admin
                sliderUser = ambariUser;
            } else {
                sliderUser = UserGroupInformation.createProxyUser(loggedInUser, ambariUser);
            }
        } else {
            sliderUser = UserGroupInformation.getBestUGI(null, loggedInUser);
        }
        try {
            T value = sliderUser.doAs(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    String fsPath = hadoopConfigs.get("fs.defaultFS");
                    FileSystem fs = FileSystem.get(URI.create(fsPath), hdfsConfiguration);
                    try {
                        return runnable.run(fs);
                    } finally {
                        fs.close();
                    }
                }
            });
            return value;
        } catch (UndeclaredThrowableException e) {
            throw e;
        }
    } finally {
        Thread.currentThread().setContextClassLoader(currentClassLoader);
    }
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private <T> T invokeSliderClientRunnable(final SliderClientContextRunnable<T> runnable)
        throws IOException, InterruptedException, YarnException {
    ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
    try {//from ww w. ja va  2s  .  c  om
        boolean securityEnabled = Boolean.valueOf(getHadoopConfigs().get("security_enabled"));
        UserGroupInformation.setConfiguration(getSliderClientConfiguration());
        UserGroupInformation sliderUser;
        String loggedInUser = getUserToRunAs();
        if (securityEnabled) {
            String viewPrincipal = getViewParameterValue(PARAM_VIEW_PRINCIPAL);
            String viewPrincipalKeytab = getViewParameterValue(PARAM_VIEW_PRINCIPAL_KEYTAB);
            UserGroupInformation ambariUser = UserGroupInformation
                    .loginUserFromKeytabAndReturnUGI(viewPrincipal, viewPrincipalKeytab);
            if (loggedInUser.equals(ambariUser.getShortUserName())) {
                // HDFS throws exception when caller tries to impresonate themselves.
                // User: admin@EXAMPLE.COM is not allowed to impersonate admin
                sliderUser = ambariUser;
            } else {
                sliderUser = UserGroupInformation.createProxyUser(loggedInUser, ambariUser);
            }
        } else {
            sliderUser = UserGroupInformation.getBestUGI(null, loggedInUser);
        }
        try {
            T value = sliderUser.doAs(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    final SliderClient sliderClient = createSliderClient();
                    try {
                        return runnable.run(sliderClient);
                    } finally {
                        destroySliderClient(sliderClient);
                    }
                }
            });
            return value;
        } catch (UndeclaredThrowableException e) {
            Throwable cause = e.getCause();
            if (cause instanceof YarnException) {
                YarnException ye = (YarnException) cause;
                throw ye;
            }
            throw e;
        }
    } finally {
        Thread.currentThread().setContextClassLoader(currentClassLoader);
    }
}

From source file:org.apache.apex.engine.security.TokenRenewer.java

License:Apache License

private long renewTokens(final boolean refresh, boolean checkOnly) throws IOException {
    logger.info("{}", checkOnly ? "Checking renewal" : (refresh ? "Refreshing tokens" : "Renewing tokens"));
    long expiryTime = System.currentTimeMillis() + (refresh ? tokenLifeTime : tokenRenewalInterval);

    final String tokenRenewer = UserGroupInformation.getCurrentUser().getUserName();
    logger.debug("Token renewer {}", tokenRenewer);

    File keyTabFile = null;// w  ww .java  2s .  co  m
    try (FileSystem fs = FileSystem.newInstance(conf)) {
        String destinationDir = FileUtils.getTempDirectoryPath();
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);

        if (principal == null) {
            //principal = UserGroupInformation.getCurrentUser().getUserName();
            principal = UserGroupInformation.getLoginUser().getUserName();
        }
        logger.debug("Principal {}", principal);
        UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal,
                keyTabFile.getAbsolutePath());
        if (!checkOnly) {
            try {
                UserGroupInformation currUGI = UserGroupInformation.createProxyUser(tokenRenewer, ugi);
                currUGI.doAs(new PrivilegedExceptionAction<Object>() {
                    @Override
                    public Object run() throws Exception {

                        if (refresh) {
                            Credentials creds = new Credentials();
                            try (FileSystem fs1 = FileSystem.newInstance(conf)) {
                                logger.info("Refreshing fs tokens");
                                fs1.addDelegationTokens(tokenRenewer, creds);
                                logger.info("Refreshed tokens");
                            }
                            if (renewRMToken) {
                                try (YarnClient yarnClient = StramClientUtils.createYarnClient(conf)) {
                                    logger.info("Refreshing rm tokens");
                                    new StramClientUtils.ClientRMHelper(yarnClient, conf)
                                            .addRMDelegationToken(tokenRenewer, creds);
                                    logger.info("Refreshed tokens");
                                }
                            }
                            credentials.addAll(creds);
                        } else {
                            Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
                            for (Token<? extends TokenIdentifier> token : tokens) {
                                logger.debug("Token {}", token);
                                if (token.getKind().equals(HDFS_TOKEN_KIND) || (renewRMToken
                                        && token.getKind().equals(RMDelegationTokenIdentifier.KIND_NAME))) {
                                    logger.info("Renewing token {}", token.getKind());
                                    token.renew(conf);
                                    logger.info("Renewed token");
                                }
                            }
                        }

                        return null;
                    }
                });
                UserGroupInformation.getCurrentUser().addCredentials(credentials);
            } catch (InterruptedException e) {
                logger.error("Error while renewing tokens ", e);
                expiryTime = System.currentTimeMillis();
            } catch (IOException e) {
                logger.error("Error while renewing tokens ", e);
                expiryTime = System.currentTimeMillis();
            }
        }
        if (logger.isDebugEnabled()) {
            logger.debug("number of tokens: {}", credentials.getAllTokens().size());
            Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
            while (iter.hasNext()) {
                Token<?> token = iter.next();
                logger.debug("updated token: {}", token);
            }
        }
    } finally {
        if (keyTabFile != null) {
            keyTabFile.delete();
        }
    }
    return expiryTime;
}

From source file:org.apache.atlas.security.SecureClientUtils.java

License:Apache License

public static URLConnectionClientHandler getClientConnectionHandler(DefaultClientConfig config,
        org.apache.commons.configuration.Configuration clientConfig, String doAsUser,
        final UserGroupInformation ugi) {
    config.getProperties().put(URLConnectionClientHandler.PROPERTY_HTTP_URL_CONNECTION_SET_METHOD_WORKAROUND,
            true);// w ww .  j  a  v a  2  s. c  o  m
    Configuration conf = new Configuration();
    conf.addResource(conf.get(SSLFactory.SSL_CLIENT_CONF_KEY, SecurityProperties.SSL_CLIENT_PROPERTIES));
    UserGroupInformation.setConfiguration(conf);
    final ConnectionConfigurator connConfigurator = newConnConfigurator(conf);
    String authType = "simple";
    if (clientConfig != null) {
        authType = clientConfig.getString("atlas.http.authentication.type", "simple");
    }
    Authenticator authenticator = new PseudoDelegationTokenAuthenticator();
    if (!authType.equals("simple")) {
        authenticator = new KerberosDelegationTokenAuthenticator();
    }
    authenticator.setConnectionConfigurator(connConfigurator);
    final DelegationTokenAuthenticator finalAuthenticator = (DelegationTokenAuthenticator) authenticator;
    final DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token();
    HttpURLConnectionFactory httpURLConnectionFactory = null;
    try {
        UserGroupInformation ugiToUse = ugi != null ? ugi : UserGroupInformation.getCurrentUser();
        final UserGroupInformation actualUgi = (ugiToUse
                .getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY)
                        ? ugiToUse.getRealUser()
                        : ugiToUse;
        LOG.info("Real User: {}, is from ticket cache? {}", actualUgi, actualUgi.isLoginTicketBased());
        if (StringUtils.isEmpty(doAsUser)) {
            doAsUser = actualUgi.getShortUserName();
        }
        LOG.info("doAsUser: {}", doAsUser);
        final String finalDoAsUser = doAsUser;
        httpURLConnectionFactory = new HttpURLConnectionFactory() {
            @Override
            public HttpURLConnection getHttpURLConnection(final URL url) throws IOException {
                try {
                    return actualUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
                        @Override
                        public HttpURLConnection run() throws Exception {
                            try {
                                return new DelegationTokenAuthenticatedURL(finalAuthenticator, connConfigurator)
                                        .openConnection(url, token, finalDoAsUser);
                            } catch (Exception e) {
                                throw new IOException(e);
                            }
                        }
                    });
                } catch (Exception e) {
                    if (e instanceof IOException) {
                        throw (IOException) e;
                    } else {
                        throw new IOException(e);
                    }
                }
            }
        };
    } catch (IOException e) {
        LOG.warn("Error obtaining user", e);
    }

    return new URLConnectionClientHandler(httpURLConnectionFactory);
}

From source file:org.apache.atlas.web.security.SSLAndKerberosTest.java

License:Apache License

@BeforeClass
public void setUp() throws Exception {
    jksPath = new Path(Files.createTempDirectory("tempproviders").toString(), "test.jks");
    providerUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file/" + jksPath.toUri();

    String persistDir = TestUtils.getTempDirectory();

    setupKDCAndPrincipals();//from   ww  w  .ja v  a 2s.com
    setupCredentials();

    // client will actually only leverage subset of these properties
    final PropertiesConfiguration configuration = getSSLConfiguration(providerUrl);

    TestUtils.writeConfiguration(configuration,
            persistDir + File.separator + ApplicationProperties.APPLICATION_PROPERTIES);

    String confLocation = System.getProperty("atlas.conf");
    URL url;
    if (confLocation == null) {
        url = SSLAndKerberosTest.class.getResource("/" + ApplicationProperties.APPLICATION_PROPERTIES);
    } else {
        url = new File(confLocation, ApplicationProperties.APPLICATION_PROPERTIES).toURI().toURL();
    }
    configuration.load(url);
    configuration.setProperty(TLS_ENABLED, true);
    configuration.setProperty("atlas.http.authentication.enabled", "true");
    configuration.setProperty("atlas.http.authentication.type", "kerberos");
    configuration.setProperty("atlas.http.authentication.kerberos.principal",
            "HTTP/localhost@" + kdc.getRealm());
    configuration.setProperty("atlas.http.authentication.kerberos.keytab", httpKeytabFile.getAbsolutePath());
    configuration.setProperty("atlas.http.authentication.kerberos.name.rules",
            "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT");

    TestUtils.writeConfiguration(configuration, persistDir + File.separator + "atlas-application.properties");

    subject = loginTestUser();
    UserGroupInformation.loginUserFromSubject(subject);
    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser("testUser",
            UserGroupInformation.getLoginUser());

    dgiCLient = proxyUser.doAs(new PrivilegedExceptionAction<AtlasClient>() {
        @Override
        public AtlasClient run() throws Exception {
            return new AtlasClient(DGI_URL) {
                @Override
                protected PropertiesConfiguration getClientProperties() {
                    return configuration;
                }
            };
        }
    });

    // save original setting
    originalConf = System.getProperty("atlas.conf");
    System.setProperty("atlas.conf", persistDir);
    secureEmbeddedServer = new TestSecureEmbeddedServer(21443, getWarPath()) {
        @Override
        public PropertiesConfiguration getConfiguration() {
            return configuration;
        }
    };
    secureEmbeddedServer.getServer().start();
}

From source file:org.apache.blur.command.ExportCommand.java

License:Apache License

@Override
public Long execute(final IndexContext context) throws IOException, InterruptedException {

    // get our blurQuery back
    blurQuery = mapper.readValue(blurQueryString, BlurQuery.class);

    final TableContext tableContext = context.getTableContext();
    final FieldManager fieldManager = tableContext.getFieldManager();
    final org.apache.blur.thrift.generated.Query simpleQuery = blurQuery.query;
    final boolean rowQuery = simpleQuery.rowQuery;
    final Term defaultPrimeDocTerm = tableContext.getDefaultPrimeDocTerm();
    // TODO: get filters working
    Filter queryFilter = null;/* ww  w. j a va2s  .  c om*/
    // TODO: get columnFetch to work

    final ScoreType scoreType = ScoreType.CONSTANT;

    // have a query to run, setup file to output to:
    String shard = context.getShard().getShard();
    String uuid = blurQuery.uuid;
    final Path path = new Path(destUri, uuid + "-" + shard + ".json.gz");
    final byte[] newLine = new String("\n").getBytes();

    final AtomicLong exported = new AtomicLong(0);

    LOG.info("start shard: " + shard);

    UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user);
    remoteUser.doAs(new PrivilegedExceptionAction<Long>() {

        public Long run() throws Exception {
            // setup query
            Query query;
            try {
                // query = parser.parse(queryStr);
                query = QueryParserUtil.parseQuery(simpleQuery.query, simpleQuery.rowQuery, fieldManager, null,
                        null, scoreType, tableContext);
            } catch (ParseException e) {
                throw new IOException("query could not be parsed correctly", e);
            }

            // setup storage with existing conf
            FileSystem fs = FileSystem.get(tableContext.getConfiguration());
            final OutputStream outputStream = new GZIPOutputStream(fs.create(path, true));

            IndexSearcherCloseable indexSearcher = context.getIndexSearcher();
            indexSearcher.search(query, new Collector() {

                private AtomicReader _reader;
                private OpenBitSet _primeDocBitSet;
                private Bits _liveDocs;

                @Override
                public void collect(int doc) throws IOException {
                    // doc equals primedoc in super query
                    Row row = null;

                    if (rowQuery) {
                        int nextPrimeDoc = _primeDocBitSet.nextSetBit(doc + 1);
                        for (int d = doc; d < nextPrimeDoc; d++) {
                            // was our document marked for deletion?
                            if (_liveDocs != null && !_liveDocs.get(d)) {
                                continue;
                            }
                            Document document = _reader.document(d);
                            BlurThriftRecord record = new BlurThriftRecord();
                            String rowId = RowDocumentUtil.readRecord(document, record);
                            row = new Row(rowId, record);
                        }
                    } else {
                        Document document = _reader.document(doc);
                        BlurThriftRecord record = new BlurThriftRecord();
                        String rowId = RowDocumentUtil.readRecord(document, record);
                        row = new Row(rowId, record);
                    }
                    // record has now been populated...
                    String json = mapper.writeValueAsString(row);
                    // LOG.info(json);
                    outputStream.write(json.getBytes());
                    outputStream.write(newLine);
                    exported.incrementAndGet();
                }

                @Override
                public void setNextReader(AtomicReaderContext context) throws IOException {
                    _reader = context.reader();
                    _liveDocs = _reader.getLiveDocs();
                    _primeDocBitSet = PrimeDocCache.getPrimeDocBitSet(defaultPrimeDocTerm, _reader);
                }

                @Override
                public void setScorer(Scorer scorer) throws IOException {

                }

                @Override
                public boolean acceptsDocsOutOfOrder() {
                    return false;
                }
            });

            outputStream.flush();
            outputStream.close();
            // unused
            return exported.get();
        }
    });

    LOG.info("complete shard: " + shard + " exported: " + exported.get());
    return exported.get();
}