Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:azkaban.jobtype.HadoopSecureHiveWrapper.java

public static void main(final String[] args) throws Exception {

    String propsFile = System.getenv(ProcessJob.JOB_PROP_ENV);
    Properties prop = new Properties();
    prop.load(new BufferedReader(new FileReader(propsFile)));

    hiveScript = prop.getProperty("hive.script");

    final Configuration conf = new Configuration();

    UserGroupInformation.setConfiguration(conf);
    securityEnabled = UserGroupInformation.isSecurityEnabled();

    if (shouldProxy(prop)) {
        UserGroupInformation proxyUser = null;
        String userToProxy = prop.getProperty("user.to.proxy");
        if (securityEnabled) {
            String filelocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
            if (filelocation == null) {
                throw new RuntimeException("hadoop token information not set.");
            }/*from  w  ww  .ja v  a  2  s  .  c o  m*/
            if (!new File(filelocation).exists()) {
                throw new RuntimeException("hadoop token file doesn't exist.");
            }

            logger.info("Found token file " + filelocation);

            logger.info("Setting " + HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY + " to "
                    + filelocation);
            System.setProperty(HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY, filelocation);

            UserGroupInformation loginUser = null;

            loginUser = UserGroupInformation.getLoginUser();
            logger.info("Current logged in user is " + loginUser.getUserName());

            logger.info("Creating proxy user.");
            proxyUser = UserGroupInformation.createProxyUser(userToProxy, loginUser);

            for (Token<?> token : loginUser.getTokens()) {
                proxyUser.addToken(token);
            }
        } else {
            proxyUser = UserGroupInformation.createRemoteUser(userToProxy);
        }

        logger.info("Proxied as user " + userToProxy);

        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                runHive(args);
                return null;
            }
        });

    } else {
        logger.info("Not proxying. ");
        runHive(args);
    }
}

From source file:azkaban.jobtype.HadoopSecureSparkWrapper.java

/**
 * Entry point: a Java wrapper to the spark-submit command
 * Args is built in HadoopSparkJob.//from  w  w w  .  j a  va2s . co  m
 *
 * @param args
 * @throws Exception
 */
public static void main(final String[] args) throws Exception {

    Properties jobProps = HadoopSecureWrapperUtils.loadAzkabanProps();
    HadoopConfigurationInjector.injectResources(new Props(null, jobProps));

    if (HadoopSecureWrapperUtils.shouldProxy(jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(jobProps, tokenFile, logger);
        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                runSpark(args);
                return null;
            }
        });
    } else {
        runSpark(args);
    }
}

From source file:org.apache.hadoop.mapred.Child.java

public static void main(String[] args) throws Throwable {
    LOG.debug("Child starting");

    final JobConf defaultConf = new JobConf();
    String host = args[0];/* www .  j  a v a 2  s  . c o m*/
    int port = Integer.parseInt(args[1]);
    final InetSocketAddress address = NetUtils.makeSocketAddr(host, port);
    final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
    final String logLocation = args[3];
    final int SLEEP_LONGER_COUNT = 5;
    int jvmIdInt = Integer.parseInt(args[4]);
    JVMId jvmId = new JVMId(firstTaskid.getJobID(), firstTaskid.isMap(), jvmIdInt);
    String prefix = firstTaskid.isMap() ? "MapTask" : "ReduceTask";

    cwd = System.getenv().get(TaskRunner.HADOOP_WORK_DIR);
    if (cwd == null) {
        throw new IOException("Environment variable " + TaskRunner.HADOOP_WORK_DIR + " is not set");
    }

    // file name is passed thru env
    String jobTokenFile = System.getenv().get(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
    Credentials credentials = TokenCache.loadTokens(jobTokenFile, defaultConf);
    LOG.debug("loading token. # keys =" + credentials.numberOfSecretKeys() + "; from file=" + jobTokenFile);

    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
    SecurityUtil.setTokenService(jt, address);
    UserGroupInformation current = UserGroupInformation.getCurrentUser();
    current.addToken(jt);

    UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString());
    taskOwner.addToken(jt);

    // Set the credentials
    defaultConf.setCredentials(credentials);

    final TaskUmbilicalProtocol umbilical = taskOwner
            .doAs(new PrivilegedExceptionAction<TaskUmbilicalProtocol>() {
                @Override
                public TaskUmbilicalProtocol run() throws Exception {
                    return (TaskUmbilicalProtocol) RPC.getProxy(TaskUmbilicalProtocol.class,
                            TaskUmbilicalProtocol.versionID, address, defaultConf);
                }
            });

    int numTasksToExecute = -1; //-1 signifies "no limit"
    int numTasksExecuted = 0;
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            try {
                if (taskid != null) {
                    TaskLog.syncLogs(logLocation, taskid, isCleanup, currentJobSegmented);
                }
            } catch (Throwable throwable) {
            }
        }
    });
    Thread t = new Thread() {
        public void run() {
            //every so often wake up and syncLogs so that we can track
            //logs of the currently running task
            while (true) {
                try {
                    Thread.sleep(5000);
                    if (taskid != null) {
                        TaskLog.syncLogs(logLocation, taskid, isCleanup, currentJobSegmented);
                    }
                } catch (InterruptedException ie) {
                } catch (IOException iee) {
                    LOG.error("Error in syncLogs: " + iee);
                    System.exit(-1);
                }
            }
        }
    };
    t.setName("Thread for syncLogs");
    t.setDaemon(true);
    t.start();

    String pid = "";
    if (!Shell.WINDOWS) {
        pid = System.getenv().get("JVM_PID");
    }
    JvmContext context = new JvmContext(jvmId, pid);
    int idleLoopCount = 0;
    Task task = null;

    UserGroupInformation childUGI = null;

    final JvmContext jvmContext = context;
    try {
        while (true) {
            taskid = null;
            currentJobSegmented = true;

            JvmTask myTask = umbilical.getTask(context);
            if (myTask.shouldDie()) {
                break;
            } else {
                if (myTask.getTask() == null) {
                    taskid = null;
                    currentJobSegmented = true;

                    if (++idleLoopCount >= SLEEP_LONGER_COUNT) {
                        //we sleep for a bigger interval when we don't receive
                        //tasks for a while
                        Thread.sleep(1500);
                    } else {
                        Thread.sleep(500);
                    }
                    continue;
                }
            }
            idleLoopCount = 0;
            task = myTask.getTask();
            task.setJvmContext(jvmContext);
            taskid = task.getTaskID();

            // Create the JobConf and determine if this job gets segmented task logs
            final JobConf job = new JobConf(task.getJobFile());
            currentJobSegmented = logIsSegmented(job);

            isCleanup = task.isTaskCleanupTask();
            // reset the statistics for the task
            FileSystem.clearStatistics();

            // Set credentials
            job.setCredentials(defaultConf.getCredentials());
            //forcefully turn off caching for localfs. All cached FileSystems
            //are closed during the JVM shutdown. We do certain
            //localfs operations in the shutdown hook, and we don't
            //want the localfs to be "closed"
            job.setBoolean("fs.file.impl.disable.cache", false);

            // set the jobTokenFile into task
            task.setJobTokenSecret(JobTokenSecretManager.createSecretKey(jt.getPassword()));

            // setup the child's mapred-local-dir. The child is now sandboxed and
            // can only see files down and under attemtdir only.
            TaskRunner.setupChildMapredLocalDirs(task, job);

            // setup the child's attempt directories
            localizeTask(task, job, logLocation);

            //setupWorkDir actually sets up the symlinks for the distributed
            //cache. After a task exits we wipe the workdir clean, and hence
            //the symlinks have to be rebuilt.
            TaskRunner.setupWorkDir(job, new File(cwd));

            //create the index file so that the log files 
            //are viewable immediately
            TaskLog.syncLogs(logLocation, taskid, isCleanup, logIsSegmented(job));

            numTasksToExecute = job.getNumTasksToExecutePerJvm();
            assert (numTasksToExecute != 0);

            task.setConf(job);

            // Initiate Java VM metrics
            initMetrics(prefix, jvmId.toString(), job.getSessionId());

            LOG.debug("Creating remote user to execute task: " + job.get("user.name"));
            childUGI = UserGroupInformation.createRemoteUser(job.get("user.name"));
            // Add tokens to new user so that it may execute its task correctly.
            for (Token<?> token : UserGroupInformation.getCurrentUser().getTokens()) {
                childUGI.addToken(token);
            }

            // Create a final reference to the task for the doAs block
            final Task taskFinal = task;
            childUGI.doAs(new PrivilegedExceptionAction<Object>() {
                @Override
                public Object run() throws Exception {
                    try {
                        // use job-specified working directory
                        FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory());
                        taskFinal.run(job, umbilical); // run the task
                    } finally {
                        TaskLog.syncLogs(logLocation, taskid, isCleanup, logIsSegmented(job));
                        TaskLogsTruncater trunc = new TaskLogsTruncater(defaultConf);
                        trunc.truncateLogs(new JVMInfo(
                                TaskLog.getAttemptDir(taskFinal.getTaskID(), taskFinal.isTaskCleanupTask()),
                                Arrays.asList(taskFinal)));
                    }

                    return null;
                }
            });
            if (numTasksToExecute > 0 && ++numTasksExecuted == numTasksToExecute) {
                break;
            }
        }
    } catch (FSError e) {
        LOG.fatal("FSError from child", e);
        umbilical.fsError(taskid, e.getMessage(), jvmContext);
    } catch (Exception exception) {
        LOG.warn("Error running child", exception);
        try {
            if (task != null) {
                // do cleanup for the task
                if (childUGI == null) {
                    task.taskCleanup(umbilical);
                } else {
                    final Task taskFinal = task;
                    childUGI.doAs(new PrivilegedExceptionAction<Object>() {
                        @Override
                        public Object run() throws Exception {
                            taskFinal.taskCleanup(umbilical);
                            return null;
                        }
                    });
                }
            }
        } catch (Exception e) {
            LOG.info("Error cleaning up", e);
        }
        // Report back any failures, for diagnostic purposes
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        exception.printStackTrace(new PrintStream(baos));
        if (taskid != null) {
            umbilical.reportDiagnosticInfo(taskid, baos.toString(), jvmContext);
        }
    } catch (Throwable throwable) {
        LOG.fatal("Error running child : " + StringUtils.stringifyException(throwable));
        if (taskid != null) {
            Throwable tCause = throwable.getCause();
            String cause = tCause == null ? throwable.getMessage() : StringUtils.stringifyException(tCause);
            umbilical.fatalError(taskid, cause, jvmContext);
        }
    } finally {
        RPC.stopProxy(umbilical);
        shutdownMetrics();
        // Shutting down log4j of the child-vm... 
        // This assumes that on return from Task.run() 
        // there is no more logging done.
        LogManager.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.tools.DelegationTokenFetcher.java

/**
 * Command-line interface//w  w w  . j a  v a  2  s.  c o  m
 */
public static void main(final String[] args) throws Exception {
    final Configuration conf = new Configuration();
    Options fetcherOptions = new Options();
    fetcherOptions.addOption(WEBSERVICE, true, "HTTPS url to reach the NameNode at");
    fetcherOptions.addOption(CANCEL, false, "cancel the token");
    fetcherOptions.addOption(RENEW, false, "renew the token");
    GenericOptionsParser parser = new GenericOptionsParser(conf, fetcherOptions, args);
    CommandLine cmd = parser.getCommandLine();

    // get options
    final String webUrl = cmd.hasOption(WEBSERVICE) ? cmd.getOptionValue(WEBSERVICE) : null;
    final boolean cancel = cmd.hasOption(CANCEL);
    final boolean renew = cmd.hasOption(RENEW);
    String[] remaining = parser.getRemainingArgs();

    // check option validity
    if (cancel && renew) {
        System.err.println("ERROR: Only specify cancel or renew.");
        printUsage(System.err);
    }
    if (remaining.length != 1 || remaining[0].charAt(0) == '-') {
        System.err.println("ERROR: Must specify exactly one token file");
        printUsage(System.err);
    }
    // default to using the local file system
    FileSystem local = FileSystem.getLocal(conf);
    final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);

    // Login the current user
    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {

            if (cancel) {
                for (Token<?> token : readTokens(tokenFile, conf)) {
                    if (token.isManaged()) {
                        token.cancel(conf);
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Cancelled token for " + token.getService());
                        }
                    }
                }
            } else if (renew) {
                for (Token<?> token : readTokens(tokenFile, conf)) {
                    if (token.isManaged()) {
                        token.renew(conf);
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Renewed token for " + token.getService());
                        }
                    }
                }
            } else {
                if (webUrl != null) {
                    getDTfromRemote(webUrl, null).writeTokenStorageFile(tokenFile, conf);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Fetched token via http for " + webUrl);
                    }
                } else {
                    FileSystem fs = FileSystem.get(conf);
                    Token<?> token = fs.getDelegationToken(ugi.getShortUserName());
                    Credentials cred = new Credentials();
                    cred.addToken(token.getService(), token);
                    cred.writeTokenStorageFile(tokenFile, conf);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Fetched token for " + fs.getUri() + " into " + tokenFile);
                    }
                }
            }
            return null;
        }
    });

}

From source file:alluxio.yarn.ApplicationMaster.java

/**
 * @param args Command line arguments to launch application master
 *//*  w  w w. ja  v  a 2  s  .  com*/
public static void main(String[] args) {
    Options options = new Options();
    options.addOption("num_workers", true, "Number of Alluxio workers to launch. Default 1");
    options.addOption("master_address", true, "(Required) Address to run Alluxio master");
    options.addOption("resource_path", true, "(Required) HDFS path containing the Application Master");

    try {
        LOG.info("Starting Application Master with args {}", Arrays.toString(args));
        final CommandLine cliParser = new GnuParser().parse(options, args);

        YarnConfiguration conf = new YarnConfiguration();
        UserGroupInformation.setConfiguration(conf);
        if (UserGroupInformation.isSecurityEnabled()) {
            String user = System.getenv("ALLUXIO_USER");
            UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
            for (Token token : UserGroupInformation.getCurrentUser().getTokens()) {
                ugi.addToken(token);
            }
            LOG.info("UserGroupInformation: " + ugi);
            ugi.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    runApplicationMaster(cliParser);
                    return null;
                }
            });
        } else {
            runApplicationMaster(cliParser);
        }
    } catch (Exception e) {
        LOG.error("Error running Application Master", e);
        System.exit(1);
    }
}

From source file:Main.java

static void execPrivileged(final String[] cmd_array) throws Exception {
    try {//from  w w  w  . j  av a2s  .co  m
        Process process = AccessController.doPrivileged(new PrivilegedExceptionAction<Process>() {
            public Process run() throws Exception {
                return Runtime.getRuntime().exec(cmd_array);
            }
        });
        // Close unused streams to make sure the child process won't hang
        process.getInputStream().close();
        process.getOutputStream().close();
        process.getErrorStream().close();
    } catch (PrivilegedActionException e) {
        throw (Exception) e.getCause();
    }
}

From source file:Main.java

/**
 * Gets the {@code sun.misc.Unsafe} instance, or {@code null} if not available on this platform.
 *///from  w w  w .  j ava  2 s .c o  m
private static sun.misc.Unsafe getUnsafe() {
    sun.misc.Unsafe unsafe = null;
    try {
        unsafe = AccessController.doPrivileged(new PrivilegedExceptionAction<Unsafe>() {
            @Override
            public sun.misc.Unsafe run() throws Exception {
                Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;

                for (Field f : k.getDeclaredFields()) {
                    f.setAccessible(true);
                    Object x = f.get(null);
                    if (k.isInstance(x)) {
                        return k.cast(x);
                    }
                }
                // The sun.misc.Unsafe field does not exist.
                return null;
            }
        });
    } catch (Throwable e) {
        // Catching Throwable here due to the fact that Google AppEngine raises NoClassDefFoundError
        // for Unsafe.
    }
    return unsafe;
}

From source file:com.paulwithers.bp106.DemoUtils.java

public static String convertObjectToString(final Object o) {
    String retVal_ = "";
    try {/*from w  w w. j a va2  s. c  o  m*/
        retVal_ = AccessController.doPrivileged(new PrivilegedExceptionAction<String>() {
            public String run() throws Exception {
                return ReflectionToStringBuilder.toString(o);
            }
        });
    } catch (AccessControlException e) {
        e.printStackTrace();
    } catch (PrivilegedActionException e) {
        e.printStackTrace();
    }
    return retVal_;
}

From source file:Main.java

/**
 * Gets the value of a static field./*w ww.j a  v  a  2  s.  c  om*/
 *
 * @param clazz from which to get the field value
 * @param name the name of the field
 * @return the value of the field.
 * @throws PrivilegedActionException
 */
static <T> T getStaticFieldValue(final Class<?> clazz, final String name) throws PrivilegedActionException {
    final PrivilegedExceptionAction<T> action = new PrivilegedExceptionAction<T>() {
        @SuppressWarnings("unchecked")
        public T run() throws Exception {
            Field field = clazz.getDeclaredField(name);
            field.setAccessible(true);
            return (T) field.get(null);
        }
    };

    return AccessController.doPrivileged(action);
}

From source file:Main.java

/**
 * Extracts a field from a class using reflection.
 *
 * @param clazz from which to get the field object
 * @param name the name of the field object
 * @return the field object.// w ww  .  j  av a 2 s.  c  o m
 * @throws PrivilegedActionException
 */
static Field getField(final Class<?> clazz, final String name) throws PrivilegedActionException {
    final PrivilegedExceptionAction<Field> action = new PrivilegedExceptionAction<Field>() {
        public Field run() throws Exception {
            Field field = clazz.getDeclaredField(name);
            field.setAccessible(true);
            return field;
        }
    };

    return AccessController.doPrivileged(action);
}