Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.apache.tez.dag.history.ats.acls.ATSHistoryACLPolicyManager.java

License:Apache License

private void initializeTimelineClient() {
    if (this.conf == null) {
        throw new TezUncheckedException("ATSACLManager not configured");
    }/*  w  ww  .j a va 2  s. c  om*/
    if (timelineClient != null) {
        this.timelineClient.stop();
        this.timelineClient = null;
    }
    this.timelineClient = TimelineClient.createTimelineClient();
    this.timelineClient.init(this.conf);
    this.timelineClient.start();
    try {
        this.user = UserGroupInformation.getCurrentUser().getShortUserName();
    } catch (IOException e) {
        throw new TezUncheckedException("Unable to get Current User UGI", e);
    }
}

From source file:org.apache.tez.dag.history.ats.acls.ATSV15HistoryACLPolicyManager.java

License:Apache License

private void initializeTimelineClient() {
    if (this.conf == null) {
        throw new TezUncheckedException("ATSACLManager not configured");
    }//from ww w  .  ja va  2 s.c  om
    if (timelineClient != null) {
        this.timelineClient.stop();
        this.timelineClient = null;
    }
    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
        this.timelineClient = TimelineClient.createTimelineClient();
        this.timelineClient.init(this.conf);
        this.timelineClient.start();
    } else {
        this.timelineClient = null;
        if (conf.get(TezConfiguration.TEZ_HISTORY_LOGGING_SERVICE_CLASS, "")
                .equals(atsHistoryLoggingServiceClassName)) {
            LOG.warn(atsHistoryLoggingServiceClassName + " is disabled due to Timeline Service being disabled, "
                    + YarnConfiguration.TIMELINE_SERVICE_ENABLED + " set to false");
        }
    }
    try {
        this.user = UserGroupInformation.getCurrentUser().getShortUserName();
    } catch (IOException e) {
        throw new TezUncheckedException("Unable to get Current User UGI", e);
    }
}

From source file:org.apache.tez.dag.history.ats.acls.TestATSHistoryV15.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {/*  www .  ja v  a2 s.  co  m*/
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryV15.class.getName(), 1, 1, 1, true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            atsActivePath = new Path("/tmp/ats/active/" + random.nextInt(100000));
            Path atsDonePath = new Path("/tmp/ats/done/" + random.nextInt(100000));
            conf.setDouble(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5);

            remoteFs.mkdirs(atsActivePath);
            remoteFs.mkdirs(atsDonePath);

            conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR,
                    remoteFs.resolvePath(atsActivePath).toString());
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR,
                    remoteFs.resolvePath(atsDonePath).toString());

            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    user = UserGroupInformation.getCurrentUser().getShortUserName();
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}

From source file:org.apache.tez.dag.history.ats.acls.TestATSHistoryWithACLs.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {//w  ww. j a  v  a  2 s.  c  o m
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryWithACLs.class.getName(), 1, 1, 1,
                    true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    user = UserGroupInformation.getCurrentUser().getShortUserName();
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}

From source file:org.apache.tez.mapreduce.client.ResourceMgrDelegate.java

License:Apache License

public String getStagingAreaDir() throws IOException, InterruptedException {
    //    Path path = new Path(MRJobConstants.JOB_SUBMIT_DIR);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path path = MRApps.getStagingAreaDir(conf, user);
    LOG.debug("getStagingAreaDir: dir=" + path);
    return path.toString();
}

From source file:org.apache.tez.mapreduce.client.YARNRunner.java

License:Apache License

@Override
public JobStatus getJobStatus(JobID jobID) throws IOException, InterruptedException {
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    String jobFile = MRApps.getJobFile(conf, user, jobID);
    DAGStatus dagStatus;/*from w  ww.j  a v a2s .co m*/
    try {
        if (dagClient == null) {
            dagClient = MRTezClient.getDAGClient(TypeConverter.toYarn(jobID).getAppId(), tezConf, null);
        }
        dagStatus = dagClient.getDAGStatus(null);
        return new DAGJobStatus(dagClient.getApplicationReport(), dagStatus, jobFile);
    } catch (TezException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.tez.mapreduce.committer.MROutputCommitter.java

License:Apache License

@Override
public void initialize() throws IOException {
    UserPayload userPayload = getContext().getOutputUserPayload();
    if (!userPayload.hasPayload()) {
        jobConf = new JobConf();
    } else {//  w  w w  . j a va  2s  .  co  m
        jobConf = new JobConf(TezUtils.createConfFromUserPayload(userPayload));
    }

    // Read all credentials into the credentials instance stored in JobConf.
    jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, getContext().getDAGAttemptNumber());
    committer = getOutputCommitter(getContext());
    jobContext = getJobContextFromVertexContext(getContext());
    initialized = true;
}

From source file:org.apache.tez.mapreduce.common.MRInputAMSplitGenerator.java

License:Apache License

@Override
public List<Event> initialize() throws Exception {
    Stopwatch sw = null;/*from  w w w.j av  a  2  s .  co m*/
    if (LOG.isDebugEnabled()) {
        sw = new Stopwatch().start();
    }
    MRInputUserPayloadProto userPayloadProto = MRInputHelpers
            .parseMRInputPayload(getContext().getInputUserPayload());
    if (LOG.isDebugEnabled()) {
        sw.stop();
        LOG.debug("Time to parse MRInput payload into prot: " + sw.elapsedMillis());
    }
    if (LOG.isDebugEnabled()) {
        sw.reset().start();
    }
    Configuration conf = TezUtils.createConfFromByteString(userPayloadProto.getConfigurationBytes());

    sendSerializedEvents = conf.getBoolean(MRJobConfig.MR_TEZ_INPUT_INITIALIZER_SERIALIZE_EVENT_PAYLOAD,
            MRJobConfig.MR_TEZ_INPUT_INITIALIZER_SERIALIZE_EVENT_PAYLOAD_DEFAULT);
    LOG.info("Emitting serialized splits: " + sendSerializedEvents);
    if (LOG.isDebugEnabled()) {
        sw.stop();
        LOG.debug("Time converting ByteString to configuration: " + sw.elapsedMillis());
    }

    if (LOG.isDebugEnabled()) {
        sw.reset().start();
    }

    int totalResource = getContext().getTotalAvailableResource().getMemory();
    int taskResource = getContext().getVertexTaskResource().getMemory();
    float waves = conf.getFloat(TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES,
            TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES_DEFAULT);

    int numTasks = (int) ((totalResource * waves) / taskResource);

    LOG.info("Input " + getContext().getInputName() + " asking for " + numTasks + " tasks. Headroom: "
            + totalResource + " Task Resource: " + taskResource + " waves: " + waves);

    // Read all credentials into the credentials instance stored in JobConf.
    JobConf jobConf = new JobConf(conf);
    jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());

    InputSplitInfoMem inputSplitInfo = null;
    boolean groupSplits = userPayloadProto.getGroupingEnabled();
    if (groupSplits) {
        LOG.info("Grouping input splits");
        inputSplitInfo = MRInputHelpers.generateInputSplitsToMem(jobConf, true, numTasks);
    } else {
        inputSplitInfo = MRInputHelpers.generateInputSplitsToMem(jobConf, false, 0);
    }
    if (LOG.isDebugEnabled()) {
        sw.stop();
        LOG.debug("Time to create splits to mem: " + sw.elapsedMillis());
    }

    List<Event> events = Lists.newArrayListWithCapacity(inputSplitInfo.getNumTasks() + 1);

    InputConfigureVertexTasksEvent configureVertexEvent = InputConfigureVertexTasksEvent.create(
            inputSplitInfo.getNumTasks(), VertexLocationHint.create(inputSplitInfo.getTaskLocationHints()),
            InputSpecUpdate.getDefaultSinglePhysicalInputSpecUpdate());
    events.add(configureVertexEvent);

    if (sendSerializedEvents) {
        MRSplitsProto splitsProto = inputSplitInfo.getSplitsProto();
        int count = 0;
        for (MRSplitProto mrSplit : splitsProto.getSplitsList()) {
            // Unnecessary array copy, can be avoided by using ByteBuffer instead of a raw array.
            InputDataInformationEvent diEvent = InputDataInformationEvent.createWithSerializedPayload(count++,
                    mrSplit.toByteString().asReadOnlyByteBuffer());
            events.add(diEvent);
        }
    } else {
        int count = 0;
        if (inputSplitInfo.holdsNewFormatSplits()) {
            for (org.apache.hadoop.mapreduce.InputSplit split : inputSplitInfo.getNewFormatSplits()) {
                InputDataInformationEvent diEvent = InputDataInformationEvent.createWithObjectPayload(count++,
                        split);
                events.add(diEvent);
            }
        } else {
            for (org.apache.hadoop.mapred.InputSplit split : inputSplitInfo.getOldFormatSplits()) {
                InputDataInformationEvent diEvent = InputDataInformationEvent.createWithObjectPayload(count++,
                        split);
                events.add(diEvent);
            }
        }
    }

    return events;
}

From source file:org.apache.tez.mapreduce.examples.UnionExample.java

License:Apache License

public boolean run(String inputPath, String outputPath, Configuration conf) throws Exception {
    System.out.println("Running UnionExample");
    // conf and UGI
    TezConfiguration tezConf;//w ww.  jav  a2 s  . c o  m
    if (conf != null) {
        tezConf = new TezConfiguration(conf);
    } else {
        tezConf = new TezConfiguration();
    }
    UserGroupInformation.setConfiguration(tezConf);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();

    // staging dir
    FileSystem fs = FileSystem.get(tezConf);
    String stagingDirStr = Path.SEPARATOR + "user" + Path.SEPARATOR + user + Path.SEPARATOR + ".staging"
            + Path.SEPARATOR + Path.SEPARATOR + Long.toString(System.currentTimeMillis());
    Path stagingDir = new Path(stagingDirStr);
    tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDirStr);
    stagingDir = fs.makeQualified(stagingDir);

    // No need to add jar containing this class as assumed to be part of
    // the tez jars.

    // TEZ-674 Obtain tokens based on the Input / Output paths. For now assuming staging dir
    // is the same filesystem as the one used for Input/Output.

    TezClient tezSession = TezClient.create("UnionExampleSession", tezConf);
    tezSession.start();

    DAGClient dagClient = null;

    try {
        if (fs.exists(new Path(outputPath))) {
            throw new FileAlreadyExistsException("Output directory " + outputPath + " already exists");
        }

        Map<String, LocalResource> localResources = new TreeMap<String, LocalResource>();

        DAG dag = createDAG(fs, tezConf, localResources, stagingDir, inputPath, outputPath);

        tezSession.waitTillReady();
        dagClient = tezSession.submitDAG(dag);

        // monitoring
        DAGStatus dagStatus = dagClient
                .waitForCompletionWithStatusUpdates(EnumSet.of(StatusGetOpts.GET_COUNTERS));
        if (dagStatus.getState() != DAGStatus.State.SUCCEEDED) {
            System.out.println("DAG diagnostics: " + dagStatus.getDiagnostics());
            return false;
        }
        return true;
    } finally {
        fs.delete(stagingDir, true);
        tezSession.stop();
    }
}

From source file:org.apache.tez.mapreduce.input.base.MRInputBase.java

License:Apache License

public List<Event> initialize() throws IOException {
    getContext().requestInitialMemory(0l, null); // mandatory call
    MRRuntimeProtos.MRInputUserPayloadProto mrUserPayload = MRInputHelpers
            .parseMRInputPayload(getContext().getUserPayload());
    boolean isGrouped = mrUserPayload.getGroupingEnabled();
    Preconditions.checkArgument(mrUserPayload.hasSplits() == false,
            "Split information not expected in " + this.getClass().getName());
    Configuration conf = TezUtils.createConfFromByteString(mrUserPayload.getConfigurationBytes());
    this.jobConf = new JobConf(conf);
    useNewApi = this.jobConf.getUseNewMapper();
    if (isGrouped) {
        if (useNewApi) {
            jobConf.set(MRJobConfig.INPUT_FORMAT_CLASS_ATTR,
                    org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat.class.getName());
        } else {//from  w  w  w.  j  a  v  a 2s.  c om
            jobConf.set("mapred.input.format.class",
                    org.apache.hadoop.mapred.split.TezGroupedSplitsInputFormat.class.getName());
        }
    }

    // Add tokens to the jobConf - in case they are accessed within the RR / IF
    jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());

    TaskAttemptID taskAttemptId = new TaskAttemptID(
            new TaskID(Long.toString(getContext().getApplicationId().getClusterTimestamp()),
                    getContext().getApplicationId().getId(), TaskType.MAP, getContext().getTaskIndex()),
            getContext().getTaskAttemptNumber());

    jobConf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, getContext().getDAGAttemptNumber());

    this.inputRecordCounter = getContext().getCounters().findCounter(TaskCounter.INPUT_RECORDS_PROCESSED);

    return null;
}