Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.axis2.jaxws.runtime.description.marshal.impl.ArtifactProcessor.java

/**
 * Return the Method matching the method name or null
 * @param methodName String containing method name
 * @param cls Class of the class that declares the method
 *
 * @return Method or null/* w  ww.  j  a  v a2s  .c  o  m*/
 */
private static Method getMethod(final String methodName, final Class cls) {
    // NOTE: This method must remain protected because it uses AccessController
    Method method = null;
    try {
        method = (Method) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() {
                Method[] methods = cls.getMethods();
                if (methods != null) {
                    for (int i = 0; i < methods.length; i++) {
                        if (methods[i].getName().equals(methodName)) {
                            return methods[i];
                        }
                    }
                }
                return null;
            }
        });
    } catch (PrivilegedActionException e) {

    }

    return method;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

private boolean validateHadoopFS(Stage.Context context, List<Stage.ConfigIssue> issues) {
    hdfsConfiguration = getHadoopConfiguration(context, issues);

    boolean validHapoopFsUri = true;
    // if hdfsUri is empty, we'll use the default fs uri from hdfs config. no validation required.
    if (!hdfsUri.isEmpty()) {
        if (hdfsUri.contains("://")) {
            try {
                new URI(hdfsUri);
            } catch (Exception ex) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_22, hdfsUri,
                        ex.toString(), ex));
                validHapoopFsUri = false;
            }//from  w w  w.j  ava  2  s.c  om

            // Configured URI have precedence
            hdfsConfiguration.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri);
        } else {
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                    HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_18, hdfsUri));
            validHapoopFsUri = false;
        }
    } else {
        // HDFS URI is not set, we're expecting that it will be available in config files
        hdfsUri = hdfsConfiguration.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    }

    // We must have value of default.FS otherwise it's clear miss configuration
    if (hdfsUri == null || hdfsUri.isEmpty()) {
        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_49));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hdfsConfiguration);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        if (hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsKerberos", Errors.HADOOPFS_00,
                        loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hdfsConfiguration.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Validation Error: " + Errors.HADOOPFS_01.getMessage(), hdfsUri, ex.toString(), ex);
        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_01, hdfsUri,
                String.valueOf(ex), ex));

        // We weren't able connect to the cluster and hence setting the validity to false
        validHapoopFsUri = false;
    }
    LOG.info("Authentication Config: " + logMessage);
    return validHapoopFsUri;
}

From source file:org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.java

/** Handle HTTP DELETE request. */
@DELETE/*ww w. j av  a  2  s  .  c o  m*/
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT) final UserParam username,
        @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) final DoAsParam doAsUser,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT) final DeleteOpParam op,
        @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive)
        throws IOException, InterruptedException {

    init(ugi, delegation, username, doAsUser, path, op, recursive);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException {
            REMOTE_ADDRESS.set(request.getRemoteAddr());
            try {

                final NameNode namenode = (NameNode) context.getAttribute("name.node");
                final String fullpath = path.getAbsolutePath();

                switch (op.getValue()) {
                case DELETE: {
                    final boolean b = namenode.delete(fullpath, recursive.getValue());
                    final String js = JsonUtil.toJsonString("boolean", b);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                default:
                    throw new UnsupportedOperationException(op + " is not supported");
                }

            } finally {
                REMOTE_ADDRESS.set(null);
            }
        }
    });
}

From source file:org.apache.axis2.jaxws.runtime.description.marshal.impl.ArtifactProcessor.java

/** @return ClassLoader */
private static ClassLoader getContextClassLoader() {
    // NOTE: This method must remain private because it uses AccessController
    ClassLoader cl = null;/*ww w .  ja  v  a2s.com*/
    try {
        cl = (ClassLoader) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws ClassNotFoundException {
                return Thread.currentThread().getContextClassLoader();
            }
        });
    } catch (PrivilegedActionException e) {
        if (log.isDebugEnabled()) {
            log.debug("Exception thrown from AccessController: " + e);
        }
        throw (RuntimeException) e.getException();
    }

    return cl;
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testDeleteFamiliesWithoutAndWithVisibilityLabels() throws Exception {
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    Admin hBaseAdmin = TEST_UTIL.getAdmin();
    HColumnDescriptor colDesc = new HColumnDescriptor(fam);
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(colDesc);/* w w  w.  j  a  v a  2 s  . c om*/
    hBaseAdmin.createTable(desc);
    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
        Put put = new Put(row1);
        put.addColumn(fam, qual, value);
        put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        table.put(put);
        Delete d = new Delete(row1);
        // without visibility
        d.addFamily(fam);
        table.delete(d);
        PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Scan s = new Scan();
                    ResultScanner scanner = table.getScanner(s);
                    Result[] next = scanner.next(3);
                    assertEquals(next.length, 1);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(scanAction);
        d = new Delete(row1);
        // with visibility
        d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        d.addFamily(fam);
        table.delete(d);
        scanAction = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Scan s = new Scan();
                    ResultScanner scanner = table.getScanner(s);
                    Result[] next = scanner.next(3);
                    assertEquals(next.length, 0);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(scanAction);
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

private boolean validateHadoopDir(final Stage.Context context, final String configName,
        final String configGroup, String dirPathTemplate, final List<Stage.ConfigIssue> issues) {
    final AtomicBoolean ok = new AtomicBoolean(true);
    if (!dirPathTemplate.startsWith("/")) {
        issues.add(context.createConfigIssue(configGroup, configName, Errors.HADOOPFS_40));
        ok.set(false);//from ww w  . jav a  2  s . c o m
    } else {
        dirPathTemplate = (dirPathTemplate.isEmpty()) ? "/" : dirPathTemplate;
        try {
            final Path dir = new Path(dirPathTemplate);
            final FileSystem fs = getFileSystemForInitDestroy();
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    if (!fs.exists(dir)) {
                        try {
                            if (fs.mkdirs(dir)) {
                                ok.set(true);
                            } else {
                                issues.add(
                                        context.createConfigIssue(configGroup, configName, Errors.HADOOPFS_41));
                                ok.set(false);
                            }
                        } catch (IOException ex) {
                            issues.add(context.createConfigIssue(configGroup, configName, Errors.HADOOPFS_42,
                                    ex.toString()));
                            ok.set(false);
                        }
                    } else {
                        try {
                            Path dummy = new Path(dir, "_sdc-dummy-" + UUID.randomUUID().toString());
                            fs.create(dummy).close();
                            fs.delete(dummy, false);
                            ok.set(true);
                        } catch (IOException ex) {
                            issues.add(context.createConfigIssue(configGroup, configName, Errors.HADOOPFS_43,
                                    ex.toString()));
                            ok.set(false);
                        }
                    }
                    return null;
                }
            });
        } catch (Exception ex) {
            issues.add(context.createConfigIssue(configGroup, configName, Errors.HADOOPFS_44, ex.toString()));
            ok.set(false);
        }
    }
    return ok.get();
}

From source file:org.apache.axis2.jaxws.spi.ServiceDelegate.java

/** @return ClassLoader */
private static ClassLoader getClassLoader(final Class cls) {
    // NOTE: This method must remain private because it uses AccessController
    ClassLoader cl = null;/*w  w  w  . j  a v  a2 s. c o  m*/
    try {
        cl = (ClassLoader) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws ClassNotFoundException {
                return cls.getClassLoader();
            }
        });
    } catch (PrivilegedActionException e) {
        if (log.isDebugEnabled()) {
            log.debug("Exception thrown from AccessController: " + e);
        }
        throw ExceptionFactory.makeWebServiceException(e.getException());
    }

    return cl;
}

From source file:org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions.java

@Test
public void testCellPermissionsForPutWithMultipleVersions() throws Exception {
    final byte[] TEST_ROW1 = Bytes.toBytes("r1");
    final byte[] TEST_Q1 = Bytes.toBytes("q1");
    final byte[] TEST_Q2 = Bytes.toBytes("q2");
    final byte[] ZERO = Bytes.toBytes(0L);

    final User user1 = User.createUserForTesting(conf, "user1", new String[0]);
    final User user2 = User.createUserForTesting(conf, "user2", new String[0]);

    verifyAllowed(new AccessTestAction() {
        @Override/*from  w w  w . jav a2s.  c  om*/
        public Object run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Map<String, Permission> permsU1andOwner = new HashMap<String, Permission>();
                permsU1andOwner.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Map<String, Permission> permsU2andOwner = new HashMap<String, Permission>();
                permsU2andOwner.put(user2.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU2andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 123, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q2, 123, ZERO);
                p.setACL(permsU2andOwner);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 127, ZERO);
                p.setACL(permsU2andOwner);
                t.put(p);
                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q2, 127, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    }, USER_OWNER);

    // new Put with TEST_Q1 column having TS=125. This covers old cell with TS 123 and user1 is
    // having RW permission. While TEST_Q2 is with latest TS and so it covers old cell with TS 127.
    // User1 is having RW permission on that too.
    user1.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 125, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                p.setACL(user2.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE));
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    });

    // Should be denied.
    user2.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Put p = new Put(TEST_ROW1);
                // column Q1 covers version at 123 fr which user2 do not have permission
                p.add(TEST_FAMILY1, TEST_Q1, 124, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                t.put(p);
                fail();
            } catch (Exception e) {

            } finally {
                t.close();
            }
            return null;
        }
    });
}

From source file:org.apache.hadoop.mapred.JobClient.java

/**
 * Internal method for submitting jobs to the system.
 * @param job the configuration to submit
 * @return a proxy object for the running job
 * @throws FileNotFoundException/*from  ww  w. j  a va 2  s  . c  o m*/
 * @throws ClassNotFoundException
 * @throws InterruptedException
 * @throws IOException
 */
public RunningJob submitJobInternal(final JobConf job)
        throws FileNotFoundException, ClassNotFoundException, InterruptedException, IOException {
    /*
     * configure the command line options correctly on the submitting dfs
     */
    return ugi.doAs(new PrivilegedExceptionAction<RunningJob>() {
        public RunningJob run()
                throws FileNotFoundException, ClassNotFoundException, InterruptedException, IOException {
            JobConf jobCopy = job;
            Path jobStagingArea = JobSubmissionFiles.getStagingDir(JobClient.this, jobCopy);
            JobID jobId = jobSubmitClient.getNewJobId();
            Path submitJobDir = new Path(jobStagingArea, jobId.toString());
            jobCopy.set("mapreduce.job.dir", submitJobDir.toString());
            JobStatus status = null;
            try {
                populateTokenCache(jobCopy, jobCopy.getCredentials());

                copyAndConfigureFiles(jobCopy, submitJobDir);

                // get delegation token for the dir
                TokenCache.obtainTokensForNamenodes(jobCopy.getCredentials(), new Path[] { submitJobDir },
                        jobCopy);

                Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
                int reduces = jobCopy.getNumReduceTasks();
                InetAddress ip = InetAddress.getLocalHost();
                if (ip != null) {
                    job.setJobSubmitHostAddress(ip.getHostAddress());
                    job.setJobSubmitHostName(ip.getHostName());
                }
                JobContext context = new JobContext(jobCopy, jobId);

                // Check the output specification
                if (reduces == 0 ? jobCopy.getUseNewMapper() : jobCopy.getUseNewReducer()) {
                    org.apache.hadoop.mapreduce.OutputFormat<?, ?> output = ReflectionUtils
                            .newInstance(context.getOutputFormatClass(), jobCopy);
                    output.checkOutputSpecs(context);
                } else {
                    jobCopy.getOutputFormat().checkOutputSpecs(fs, jobCopy);
                }

                jobCopy = (JobConf) context.getConfiguration();

                // Create the splits for the job
                FileSystem fs = submitJobDir.getFileSystem(jobCopy);
                LOG.debug("Creating splits at " + fs.makeQualified(submitJobDir));
                int maps = writeSplits(context, submitJobDir);
                jobCopy.setNumMapTasks(maps);

                // write "queue admins of the queue to which job is being submitted"
                // to job file.
                String queue = jobCopy.getQueueName();
                AccessControlList acl = jobSubmitClient.getQueueAdmins(queue);
                jobCopy.set(QueueManager.toFullPropertyName(queue, QueueACL.ADMINISTER_JOBS.getAclName()),
                        acl.getACLString());

                // Write job file to JobTracker's fs        
                FSDataOutputStream out = FileSystem.create(fs, submitJobFile,
                        new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));

                try {
                    jobCopy.writeXml(out);
                } finally {
                    out.close();
                }
                //
                // Now, actually submit the job (using the submit name)
                //
                printTokens(jobId, jobCopy.getCredentials());
                status = jobSubmitClient.submitJob(jobId, submitJobDir.toString(), jobCopy.getCredentials());
                JobProfile prof = jobSubmitClient.getJobProfile(jobId);
                if (status != null && prof != null) {
                    return new NetworkedJob(status, prof, jobSubmitClient);
                } else {
                    throw new IOException("Could not launch job");
                }
            } finally {
                if (status == null) {
                    LOG.info("Cleaning up the staging area " + submitJobDir);
                    if (fs != null && submitJobDir != null)
                        fs.delete(submitJobDir, true);
                }
            }
        }
    });
}

From source file:org.apache.hadoop.mapred.TaskTracker.java

/**
 * Do the real constructor work here.  It's in a separate method
 * so we can call it again and "recycle" the object after calling
 * close()./* www  .ja  va  2s  .  c o m*/
 */
synchronized void initialize() throws IOException, InterruptedException {
    this.fConf = new JobConf(originalConf);

    LOG.info("Starting tasktracker with owner as " + getMROwner().getShortUserName());

    localFs = FileSystem.getLocal(fConf);
    if (fConf.get("slave.host.name") != null) {
        this.localHostname = fConf.get("slave.host.name");
    }
    if (localHostname == null) {
        this.localHostname = DNS.getDefaultHost(fConf.get("mapred.tasktracker.dns.interface", "default"),
                fConf.get("mapred.tasktracker.dns.nameserver", "default"));
    }

    final String dirs = localStorage.getDirsString();
    fConf.setStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY, dirs);
    LOG.info("Good mapred local directories are: " + dirs);
    taskController.setConf(fConf);
    // Setup task controller so that deletion of user dirs happens properly
    taskController.setup(localDirAllocator, localStorage);
    server.setAttribute("conf", fConf);

    deleteUserDirectories(fConf);

    // NB: deleteLocalFiles uses the configured local dirs, but does not 
    // fail if a local directory has failed. 
    fConf.deleteLocalFiles(SUBDIR);
    final FsPermission ttdir = FsPermission.createImmutable((short) 0755);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, SUBDIR), ttdir);
    }
    fConf.deleteLocalFiles(TT_PRIVATE_DIR);
    final FsPermission priv = FsPermission.createImmutable((short) 0700);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, TT_PRIVATE_DIR), priv);
    }
    fConf.deleteLocalFiles(TT_LOG_TMP_DIR);
    final FsPermission pub = FsPermission.createImmutable((short) 0755);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, TT_LOG_TMP_DIR), pub);
    }
    // Create userlogs directory under all good mapred-local-dirs
    for (String s : localStorage.getDirs()) {
        Path userLogsDir = new Path(s, TaskLog.USERLOGS_DIR_NAME);
        if (!localFs.exists(userLogsDir)) {
            localFs.mkdirs(userLogsDir, pub);
        }
    }
    // Clear out state tables
    this.tasks.clear();
    this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
    this.runningJobs = new TreeMap<JobID, RunningJob>();
    this.mapTotal = 0;
    this.reduceTotal = 0;
    this.acceptNewTasks = true;
    this.status = null;

    this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L);
    this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L);
    //tweak the probe sample size (make it a function of numCopiers)
    probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500);

    createInstrumentation();

    // bind address
    String address = NetUtils.getServerAddress(fConf, "mapred.task.tracker.report.bindAddress",
            "mapred.task.tracker.report.port", "mapred.task.tracker.report.address");
    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
    String bindAddress = socAddr.getHostName();
    int tmpPort = socAddr.getPort();

    this.jvmManager = new JvmManager(this);

    // Set service-level authorization security policy
    if (this.fConf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
                .newInstance(this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                        MapReducePolicyProvider.class, PolicyProvider.class), this.fConf));
        ServiceAuthorizationManager.refresh(fConf, policyProvider);
    }

    // RPC initialization
    int max = maxMapSlots > maxReduceSlots ? maxMapSlots : maxReduceSlots;
    //set the num handlers to max*2 since canCommit may wait for the duration
    //of a heartbeat RPC
    this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf,
            this.jobTokenSecretManager);
    this.taskReportServer.start();

    // get the assigned address
    this.taskReportAddress = taskReportServer.getListenerAddress();
    this.fConf.set("mapred.task.tracker.report.address",
            taskReportAddress.getHostName() + ":" + taskReportAddress.getPort());
    LOG.info("TaskTracker up at: " + this.taskReportAddress);

    this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress;
    LOG.info("Starting tracker " + taskTrackerName);

    // Initialize DistributedCache
    this.distributedCacheManager = new TrackerDistributedCacheManager(this.fConf, taskController);
    this.distributedCacheManager.startCleanupThread();

    this.jobClient = (InterTrackerProtocol) UserGroupInformation.getLoginUser()
            .doAs(new PrivilegedExceptionAction<Object>() {
                public Object run() throws IOException {
                    return RPC.waitForProxy(InterTrackerProtocol.class, InterTrackerProtocol.versionID,
                            jobTrackAddr, fConf);
                }
            });
    this.justInited = true;
    this.running = true;
    // start the thread that will fetch map task completion events
    this.mapEventsFetcher = new MapEventsFetcherThread();
    mapEventsFetcher.setDaemon(true);
    mapEventsFetcher.setName("Map-events fetcher for all reduce tasks " + "on " + taskTrackerName);
    mapEventsFetcher.start();

    Class<? extends ResourceCalculatorPlugin> clazz = fConf.getClass(TT_RESOURCE_CALCULATOR_PLUGIN, null,
            ResourceCalculatorPlugin.class);
    resourceCalculatorPlugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, fConf);
    LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin);
    initializeMemoryManagement();

    getUserLogManager().clearOldUserLogs(fConf);

    setIndexCache(new IndexCache(this.fConf));

    mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots);
    reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots);
    mapLauncher.start();
    reduceLauncher.start();

    // create a localizer instance
    setLocalizer(new Localizer(localFs, localStorage.getDirs()));

    //Start up node health checker service.
    if (shouldStartHealthMonitor(this.fConf)) {
        startHealthMonitor(this.fConf);
    }

    // Start thread to monitor jetty bugs
    startJettyBugMonitor();

    oobHeartbeatOnTaskCompletion = fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false);
    oobHeartbeatDamper = fConf.getInt(TT_OUTOFBAND_HEARTBEAT_DAMPER, DEFAULT_OOB_HEARTBEAT_DAMPER);
}