Example usage for java.lang Thread setName

List of usage examples for java.lang Thread setName

Introduction

In this page you can find the example usage for java.lang Thread setName.

Prototype

public final synchronized void setName(String name) 

Source Link

Document

Changes the name of this thread to be equal to the argument name .

Usage

From source file:org.apache.hive.spark.client.SparkClientImpl.java

private Thread startDriver(final RpcServer rpcServer, final String clientId, final String secret)
        throws IOException {
    Runnable runnable;/*from ww w  .ja  va  2s . co  m*/
    final String serverAddress = rpcServer.getAddress();
    final String serverPort = String.valueOf(rpcServer.getPort());

    if (conf.containsKey(SparkClientFactory.CONF_KEY_IN_PROCESS)) {
        // Mostly for testing things quickly. Do not do this in production.
        // when invoked in-process it inherits the environment variables of the parent
        LOG.warn("!!!! Running remote driver in-process. !!!!");
        runnable = new Runnable() {
            @Override
            public void run() {
                List<String> args = Lists.newArrayList();
                args.add("--remote-host");
                args.add(serverAddress);
                args.add("--remote-port");
                args.add(serverPort);
                args.add("--client-id");
                args.add(clientId);
                args.add("--secret");
                args.add(secret);

                for (Map.Entry<String, String> e : conf.entrySet()) {
                    args.add("--conf");
                    args.add(String.format("%s=%s", e.getKey(), conf.get(e.getKey())));
                }
                try {
                    RemoteDriver.main(args.toArray(new String[args.size()]));
                } catch (Exception e) {
                    LOG.error("Error running driver.", e);
                }
            }
        };
    } else {
        // If a Spark installation is provided, use the spark-submit script. Otherwise, call the
        // SparkSubmit class directly, which has some caveats (like having to provide a proper
        // version of Guava on the classpath depending on the deploy mode).
        String sparkHome = Strings.emptyToNull(conf.get(SPARK_HOME_KEY));
        if (sparkHome == null) {
            sparkHome = Strings.emptyToNull(System.getenv(SPARK_HOME_ENV));
        }
        if (sparkHome == null) {
            sparkHome = Strings.emptyToNull(System.getProperty(SPARK_HOME_KEY));
        }
        String sparkLogDir = conf.get("hive.spark.log.dir");
        if (sparkLogDir == null) {
            if (sparkHome == null) {
                sparkLogDir = "./target/";
            } else {
                sparkLogDir = sparkHome + "/logs/";
            }
        }

        String osxTestOpts = "";
        if (Strings.nullToEmpty(System.getProperty("os.name")).toLowerCase().contains("mac")) {
            osxTestOpts = Strings.nullToEmpty(System.getenv(OSX_TEST_OPTS));
        }

        String driverJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir,
                osxTestOpts, conf.get(DRIVER_OPTS_KEY));
        String executorJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir,
                osxTestOpts, conf.get(EXECUTOR_OPTS_KEY));

        // Create a file with all the job properties to be read by spark-submit. Change the
        // file's permissions so that only the owner can read it. This avoid having the
        // connection secret show up in the child process's command line.
        File properties = File.createTempFile("spark-submit.", ".properties");
        if (!properties.setReadable(false) || !properties.setReadable(true, true)) {
            throw new IOException("Cannot change permissions of job properties file.");
        }
        properties.deleteOnExit();

        Properties allProps = new Properties();
        // first load the defaults from spark-defaults.conf if available
        try {
            URL sparkDefaultsUrl = Thread.currentThread().getContextClassLoader()
                    .getResource("spark-defaults.conf");
            if (sparkDefaultsUrl != null) {
                LOG.info("Loading spark defaults: " + sparkDefaultsUrl);
                allProps.load(new ByteArrayInputStream(Resources.toByteArray(sparkDefaultsUrl)));
            }
        } catch (Exception e) {
            String msg = "Exception trying to load spark-defaults.conf: " + e;
            throw new IOException(msg, e);
        }
        // then load the SparkClientImpl config
        for (Map.Entry<String, String> e : conf.entrySet()) {
            allProps.put(e.getKey(), conf.get(e.getKey()));
        }
        allProps.put(SparkClientFactory.CONF_CLIENT_ID, clientId);
        allProps.put(SparkClientFactory.CONF_KEY_SECRET, secret);
        allProps.put(DRIVER_OPTS_KEY, driverJavaOpts);
        allProps.put(EXECUTOR_OPTS_KEY, executorJavaOpts);

        String isTesting = conf.get("spark.testing");
        if (isTesting != null && isTesting.equalsIgnoreCase("true")) {
            String hiveHadoopTestClasspath = Strings.nullToEmpty(System.getenv("HIVE_HADOOP_TEST_CLASSPATH"));
            if (!hiveHadoopTestClasspath.isEmpty()) {
                String extraDriverClasspath = Strings
                        .nullToEmpty((String) allProps.get(DRIVER_EXTRA_CLASSPATH));
                if (extraDriverClasspath.isEmpty()) {
                    allProps.put(DRIVER_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
                } else {
                    extraDriverClasspath = extraDriverClasspath.endsWith(File.pathSeparator)
                            ? extraDriverClasspath
                            : extraDriverClasspath + File.pathSeparator;
                    allProps.put(DRIVER_EXTRA_CLASSPATH, extraDriverClasspath + hiveHadoopTestClasspath);
                }

                String extraExecutorClasspath = Strings
                        .nullToEmpty((String) allProps.get(EXECUTOR_EXTRA_CLASSPATH));
                if (extraExecutorClasspath.isEmpty()) {
                    allProps.put(EXECUTOR_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
                } else {
                    extraExecutorClasspath = extraExecutorClasspath.endsWith(File.pathSeparator)
                            ? extraExecutorClasspath
                            : extraExecutorClasspath + File.pathSeparator;
                    allProps.put(EXECUTOR_EXTRA_CLASSPATH, extraExecutorClasspath + hiveHadoopTestClasspath);
                }
            }
        }

        Writer writer = new OutputStreamWriter(new FileOutputStream(properties), Charsets.UTF_8);
        try {
            allProps.store(writer, "Spark Context configuration");
        } finally {
            writer.close();
        }

        // Define how to pass options to the child process. If launching in client (or local)
        // mode, the driver options need to be passed directly on the command line. Otherwise,
        // SparkSubmit will take care of that for us.
        String master = conf.get("spark.master");
        Preconditions.checkArgument(master != null, "spark.master is not defined.");
        String deployMode = conf.get("spark.submit.deployMode");

        List<String> argv = Lists.newLinkedList();

        if (sparkHome != null) {
            argv.add(new File(sparkHome, "bin/spark-submit").getAbsolutePath());
        } else {
            LOG.info("No spark.home provided, calling SparkSubmit directly.");
            argv.add(new File(System.getProperty("java.home"), "bin/java").getAbsolutePath());

            if (master.startsWith("local") || master.startsWith("mesos")
                    || SparkClientUtilities.isYarnClientMode(master, deployMode)
                    || master.startsWith("spark")) {
                String mem = conf.get("spark.driver.memory");
                if (mem != null) {
                    argv.add("-Xms" + mem);
                    argv.add("-Xmx" + mem);
                }

                String cp = conf.get("spark.driver.extraClassPath");
                if (cp != null) {
                    argv.add("-classpath");
                    argv.add(cp);
                }

                String libPath = conf.get("spark.driver.extraLibPath");
                if (libPath != null) {
                    argv.add("-Djava.library.path=" + libPath);
                }

                String extra = conf.get(DRIVER_OPTS_KEY);
                if (extra != null) {
                    for (String opt : extra.split("[ ]")) {
                        if (!opt.trim().isEmpty()) {
                            argv.add(opt.trim());
                        }
                    }
                }
            }

            argv.add("org.apache.spark.deploy.SparkSubmit");
        }

        if (SparkClientUtilities.isYarnClusterMode(master, deployMode)) {
            String executorCores = conf.get("spark.executor.cores");
            if (executorCores != null) {
                argv.add("--executor-cores");
                argv.add(executorCores);
            }

            String executorMemory = conf.get("spark.executor.memory");
            if (executorMemory != null) {
                argv.add("--executor-memory");
                argv.add(executorMemory);
            }

            String numOfExecutors = conf.get("spark.executor.instances");
            if (numOfExecutors != null) {
                argv.add("--num-executors");
                argv.add(numOfExecutors);
            }
        }
        // The options --principal/--keypad do not work with --proxy-user in spark-submit.sh
        // (see HIVE-15485, SPARK-5493, SPARK-19143), so Hive could only support doAs or
        // delegation token renewal, but not both. Since doAs is a more common case, if both
        // are needed, we choose to favor doAs. So when doAs is enabled, we use kinit command,
        // otherwise, we pass the principal/keypad to spark to support the token renewal for
        // long-running application.
        if ("kerberos".equals(hiveConf.get(HADOOP_SECURITY_AUTHENTICATION))) {
            String principal = SecurityUtil
                    .getServerPrincipal(hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL), "0.0.0.0");
            String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
            if (StringUtils.isNotBlank(principal) && StringUtils.isNotBlank(keyTabFile)) {
                if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
                    List<String> kinitArgv = Lists.newLinkedList();
                    kinitArgv.add("kinit");
                    kinitArgv.add(principal);
                    kinitArgv.add("-k");
                    kinitArgv.add("-t");
                    kinitArgv.add(keyTabFile + ";");
                    kinitArgv.addAll(argv);
                    argv = kinitArgv;
                } else {
                    // if doAs is not enabled, we pass the principal/keypad to spark-submit in order to
                    // support the possible delegation token renewal in Spark
                    argv.add("--principal");
                    argv.add(principal);
                    argv.add("--keytab");
                    argv.add(keyTabFile);
                }
            }
        }
        if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
            try {
                String currentUser = Utils.getUGI().getShortUserName();
                // do not do impersonation in CLI mode
                if (!currentUser.equals(System.getProperty("user.name"))) {
                    LOG.info("Attempting impersonation of " + currentUser);
                    argv.add("--proxy-user");
                    argv.add(currentUser);
                }
            } catch (Exception e) {
                String msg = "Cannot obtain username: " + e;
                throw new IllegalStateException(msg, e);
            }
        }

        argv.add("--properties-file");
        argv.add(properties.getAbsolutePath());
        argv.add("--class");
        argv.add(RemoteDriver.class.getName());

        String jar = "spark-internal";
        if (SparkContext.jarOfClass(this.getClass()).isDefined()) {
            jar = SparkContext.jarOfClass(this.getClass()).get();
        }
        argv.add(jar);

        argv.add("--remote-host");
        argv.add(serverAddress);
        argv.add("--remote-port");
        argv.add(serverPort);

        //hive.spark.* keys are passed down to the RemoteDriver via --conf,
        //as --properties-file contains the spark.* keys that are meant for SparkConf object.
        for (String hiveSparkConfKey : RpcConfiguration.HIVE_SPARK_RSC_CONFIGS) {
            String value = RpcConfiguration.getValue(hiveConf, hiveSparkConfKey);
            argv.add("--conf");
            argv.add(String.format("%s=%s", hiveSparkConfKey, value));
        }

        String cmd = Joiner.on(" ").join(argv);
        LOG.info("Running client driver with argv: {}", cmd);
        ProcessBuilder pb = new ProcessBuilder("sh", "-c", cmd);

        // Prevent hive configurations from being visible in Spark.
        pb.environment().remove("HIVE_HOME");
        pb.environment().remove("HIVE_CONF_DIR");
        // Add credential provider password to the child process's environment
        // In case of Spark the credential provider location is provided in the jobConf when the job is submitted
        String password = getSparkJobCredentialProviderPassword();
        if (password != null) {
            pb.environment().put(Constants.HADOOP_CREDENTIAL_PASSWORD_ENVVAR, password);
        }
        if (isTesting != null) {
            pb.environment().put("SPARK_TESTING", isTesting);
        }

        final Process child = pb.start();
        String threadName = Thread.currentThread().getName();
        final List<String> childErrorLog = Collections.synchronizedList(new ArrayList<String>());
        redirect("RemoteDriver-stdout-redir-" + threadName, new Redirector(child.getInputStream()));
        redirect("RemoteDriver-stderr-redir-" + threadName,
                new Redirector(child.getErrorStream(), childErrorLog));

        runnable = new Runnable() {
            @Override
            public void run() {
                try {
                    int exitCode = child.waitFor();
                    if (exitCode != 0) {
                        StringBuilder errStr = new StringBuilder();
                        synchronized (childErrorLog) {
                            Iterator iter = childErrorLog.iterator();
                            while (iter.hasNext()) {
                                errStr.append(iter.next());
                                errStr.append('\n');
                            }
                        }

                        LOG.warn("Child process exited with code {}", exitCode);
                        rpcServer.cancelClient(clientId,
                                "Child process (spark-submit) exited before connecting back with error log "
                                        + errStr.toString());
                    }
                } catch (InterruptedException ie) {
                    LOG.warn(
                            "Thread waiting on the child process (spark-submit) is interrupted, killing the child process.");
                    rpcServer.cancelClient(clientId,
                            "Thread waiting on the child porcess (spark-submit) is interrupted");
                    Thread.interrupted();
                    child.destroy();
                } catch (Exception e) {
                    String errMsg = "Exception while waiting for child process (spark-submit)";
                    LOG.warn(errMsg, e);
                    rpcServer.cancelClient(clientId, errMsg);
                }
            }
        };
    }

    Thread thread = new Thread(runnable);
    thread.setDaemon(true);
    thread.setName("Driver");
    thread.start();
    return thread;
}

From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java

private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }//w  w  w .j av  a  2s. co  m

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
        //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists
        final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
        if (!populatedDimensions) {
            this.logger.warn(
                    "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
            return null;
        }
    }

    //Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.AGGREGATION, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
        this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName);
        final Session session = getEntityManager().unwrap(Session.class);
        final Cache cache = session.getSessionFactory().getCache();
        cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    //Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
        lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

        //No portal events to aggregate, skip aggregation
        if (lastAggregated == null) {
            return new EventProcessingResult(0, null, null, true);
        }

        //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time 
        final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
                .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true);
        AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper
                .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
        cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute()
            .roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
        currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

        logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated,
                newestEventTime);

        //Do aggregation, capturing the start and end dates
        eventAggregatorStatus.setLastStart(DateTime.now());

        complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime,
                this.eventAggregationBatchSize,
                new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

        eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
        eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
        currentThread.setName(currentName);
    }

    //Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete = complete
            && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(events.intValue(), lastAggregated,
            eventAggregatorStatus.getLastEventDate(), complete);
}

From source file:net.bashtech.geobot.BotManager.java

public BotManager(String propertiesFile) {
    BotManager.setInstance(this);
    _propertiesFile = propertiesFile;//  w w  w.  j ava2s .  co m
    channelList = new HashMap<String, Channel>();
    blockedChannelList = new HashSet<String>();
    admins = new HashSet<String>();
    modules = new HashSet<BotModule>();
    tagAdmins = new HashSet<String>();
    tagStaff = new HashSet<String>();
    emoteSet = new LinkedList<String>();
    globalBannedWords = new LinkedList<Pattern>();
    emoteSetMapping = new HashMap<String, Set<String>>();
    banPhraseLists = new HashMap<Integer, List<Pattern>>();

    loadGlobalProfile();

    if (useGUI) {
        gui = new BotGUI();
    }

    receiverBot = new ReceiverBot(server, port);
    List<String> outdatedChannels = new LinkedList<String>();
    for (Map.Entry<String, Channel> entry : channelList.entrySet()) {
        String channel = entry.getValue().getChannel();
        if (!JSONUtil.krakenOutdatedChannel(channel.substring(1))
                || receiverBot.getNick().equalsIgnoreCase(channel.substring(1))
                || entry.getValue().staticChannel) {
            log("BM: Joining channel " + channel);
            receiverBot.joinChannel(channel.toLowerCase(), true);
            try {
                Thread.currentThread().sleep(1000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        } else {
            outdatedChannels.add(channel);
        }

    }
    receiverBot.startPusher();
    receiverBot.startJoinCheck();
    log("BM: Done Joining Channels");

    // Start EventFeedReader
    if (useEventFeed) {
        Runnable task = new EventFeedReader();
        Thread worker = new Thread(task);
        worker.setName("Reader");
        worker.start();
    }

    // Remove outdatedChannels
    for (String channel : outdatedChannels) {
        log("BM: Removing channel: " + channel);
        this.removeChannel(channel);
    }

    // Start timer to check for bot disconnects
    Timer reconnectTimer = new Timer();
    reconnectTimer.scheduleAtFixedRate(new ReconnectTimer(channelList), 30 * 1000, 30 * 1000);

}

From source file:com.alfaariss.oa.sso.web.profile.logout.LogoutProfile.java

private LogoutState startListeners(ITGT tgt, ISession session, HttpServletRequest servletRequest)
        throws OAException {
    List<Thread> listThreads = new Vector<Thread>();
    //create threads
    LogoutState state = new LogoutState(_sessionFactory, session.getId());

    int iIndex = 0;
    for (ITGTListener listener : _tgtFactory.getListeners()) {
        iIndex++;/*from  w  w  w .  j  av  a2s .c  om*/

        StringBuffer sbRunnableName = new StringBuffer(session.getId());
        sbRunnableName.append("_");
        sbRunnableName.append(iIndex);
        LogoutRunnable runnable = new LogoutRunnable(listener, tgt, state, sbRunnableName.toString());
        Thread tLogout = new Thread(runnable);

        StringBuffer sbThreadname = new StringBuffer("Logout (");
        sbThreadname.append(sbRunnableName.toString());
        sbThreadname.append(") - ");
        sbThreadname.append(tLogout.getName());
        tLogout.setName(sbThreadname.toString());
        listThreads.add(tLogout);
    }

    session.persist();

    _eventLogger.info(new UserEventLogItem(session, servletRequest.getRemoteAddr(),
            UserEvent.USER_LOGOUT_IN_PROGRESS, this, null));

    //start threads
    for (Thread thread : listThreads) {
        thread.start();
        _logger.debug("Started: " + thread.getName());
    }

    _logger.debug("Logout threads started");

    return state;
}

From source file:org.apereo.portal.events.aggr.PortalRawEventsAggregatorImpl.java

private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }/*  www .j  av a 2 s .com*/

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
        //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists
        final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
        if (!populatedDimensions) {
            this.logger.warn(
                    "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
            return null;
        }
    }

    //Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.AGGREGATION, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
        this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName);
        final Session session = getEntityManager().unwrap(Session.class);
        final Cache cache = session.getSessionFactory().getCache();
        cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    //Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
        lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

        //No portal events to aggregate, skip aggregation
        if (lastAggregated == null) {
            return new EventProcessingResult(0, null, null, true);
        }

        //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time 
        final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
                .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.CLEAN_UNCLOSED, true);
        AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper
                .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
        cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute()
            .roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
        currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

        logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated,
                newestEventTime);

        //Do aggregation, capturing the start and end dates
        eventAggregatorStatus.setLastStart(DateTime.now());

        complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime,
                this.eventAggregationBatchSize,
                new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

        eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
        eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
        currentThread.setName(currentName);
    }

    //Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete = complete
            && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(events.intValue(), lastAggregated,
            eventAggregatorStatus.getLastEventDate(), complete);
}

From source file:com.alibaba.wasp.fserver.FServer.java

/**
 * @param fs/* w  w w  .j av a  2s.c  o m*/
 * @param name
 * @return Thread the FServer is running in correctly named.
 * @throws java.io.IOException
 */
public static Thread startFServer(final FServer fs, final String name) throws IOException {
    Thread t = new Thread(fs);
    t.setName(name);
    t.start();
    return t;
}

From source file:org.apache.hadoop.hbase.client.TestClientOperationInterrupt.java

@Test
public void testInterrupt50Percent() throws IOException, InterruptedException {
    final AtomicInteger noEx = new AtomicInteger(0);
    final AtomicInteger badEx = new AtomicInteger(0);
    final AtomicInteger noInt = new AtomicInteger(0);
    final AtomicInteger done = new AtomicInteger(0);
    List<Thread> threads = new ArrayList<Thread>();

    final int nbThread = 100;

    for (int i = 0; i < nbThread; i++) {
        Thread t = new Thread() {
            @Override//from  w  w  w . ja  v a2  s. c om
            public void run() {
                try {
                    HTable ht = new HTable(conf, tableName);
                    Result r = ht.get(new Get(row1));
                    noEx.incrementAndGet();
                } catch (IOException e) {
                    LOG.info("exception", e);
                    if (!(e instanceof InterruptedIOException) || (e instanceof SocketTimeoutException)) {
                        badEx.incrementAndGet();
                    } else {
                        if (Thread.currentThread().isInterrupted()) {
                            noInt.incrementAndGet();
                            LOG.info("The thread should NOT be with the 'interrupt' status.");
                        }
                    }
                } finally {
                    done.incrementAndGet();
                }
            }
        };
        t.setName("TestClientOperationInterrupt #" + i);
        threads.add(t);
        t.start();
    }

    for (int i = 0; i < nbThread / 2; i++) {
        threads.get(i).interrupt();
    }

    boolean stillAlive = true;
    while (stillAlive) {
        stillAlive = false;
        for (Thread t : threads) {
            if (t.isAlive()) {
                stillAlive = true;
            }
        }
        Threads.sleep(10);
    }

    Assert.assertFalse(Thread.currentThread().isInterrupted());

    Assert.assertTrue(" noEx: " + noEx.get() + ", badEx=" + badEx.get() + ", noInt=" + noInt.get(),
            noEx.get() == nbThread / 2 && badEx.get() == 0);

    // The problem here is that we need the server to free its handlers to handle all operations
    while (done.get() != nbThread) {
        Thread.sleep(1);
    }

    HTable ht = new HTable(conf, tableName);
    Result r = ht.get(new Get(row1));
    Assert.assertFalse(r.isEmpty());
}

From source file:org.jumpmind.symmetric.service.impl.DataLoaderService.java

public void start() {
    dataLoadWorkers = (ThreadPoolExecutor) Executors.newCachedThreadPool(new ThreadFactory() {
        final AtomicInteger threadNumber = new AtomicInteger(1);
        final String namePrefix = parameterService.getEngineName().toLowerCase() + "-data-load-worker-";

        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName(namePrefix + threadNumber.getAndIncrement());
            t.setDaemon(false);/*from   w ww  .ja  va 2  s  .c  om*/
            if (t.getPriority() != Thread.NORM_PRIORITY) {
                t.setPriority(Thread.NORM_PRIORITY);
            }
            return t;
        }
    });
}

From source file:org.apache.stratos.cloud.controller.internal.CloudControllerServiceComponent.java

protected void activate(final ComponentContext context) {
    if (log.isDebugEnabled()) {
        log.debug("Activating CloudControllerServiceComponent...");
    }/*from w  w w  .j  a v  a  2s  . c  o  m*/
    try {
        executorService = StratosThreadPool.getExecutorService(THREAD_POOL_ID, THREAD_POOL_SIZE);
        scheduler = StratosThreadPool.getScheduledExecutorService(SCHEDULER_THREAD_POOL_ID,
                SCHEDULER_THREAD_POOL_SIZE);

        Runnable cloudControllerActivator = new Runnable() {
            @Override
            public void run() {
                try {
                    ComponentStartUpSynchronizer componentStartUpSynchronizer = ServiceReferenceHolder
                            .getInstance().getComponentStartUpSynchronizer();

                    // Register cloud controller service
                    BundleContext bundleContext = context.getBundleContext();
                    bundleContext.registerService(CloudControllerService.class.getName(),
                            new CloudControllerServiceImpl(), null);

                    if (CloudControllerContext.getInstance().isClustered()) {
                        Thread coordinatorElectorThread = new Thread() {
                            @Override
                            public void run() {
                                ServiceReferenceHolder.getInstance().getHazelcastInstance()
                                        .getLock(CLOUD_CONTROLLER_COORDINATOR_LOCK).lock();

                                String localMemberId = ServiceReferenceHolder.getInstance()
                                        .getHazelcastInstance().getCluster().getLocalMember().getUuid();
                                log.info("Elected member [" + localMemberId + "] "
                                        + "as the cloud controller coordinator of the cluster");

                                CloudControllerContext.getInstance().setCoordinator(true);
                                executeCoordinatorTasks();
                            }
                        };
                        coordinatorElectorThread.setName("Cloud controller coordinator elector thread");
                        executorService.submit(coordinatorElectorThread);
                    } else {
                        executeCoordinatorTasks();
                    }

                    componentStartUpSynchronizer.waitForAxisServiceActivation(Component.CloudController,
                            "CloudControllerService");
                    componentStartUpSynchronizer.setComponentStatus(Component.CloudController, true);
                    log.info("Cloud controller service component activated");
                } catch (Exception e) {
                    log.error("Could not activate cloud controller service component", e);
                }
            }
        };
        Thread cloudControllerActivatorThread = new Thread(cloudControllerActivator);
        cloudControllerActivatorThread.start();
    } catch (Exception e) {
        log.error("Could not activate cloud controller service component", e);
    }
}

From source file:org.dawnsci.commandserver.ui.view.ConsumerView.java

@Override
public void createPartControl(Composite content) {

    content.setLayout(new GridLayout(1, false));
    GridUtils.removeMargins(content);//from ww  w.  j a  v a 2s  .com

    this.viewer = new TableViewer(content, SWT.FULL_SELECTION | SWT.MULTI | SWT.V_SCROLL | SWT.H_SCROLL);
    viewer.setUseHashlookup(true);
    viewer.getTable().setHeaderVisible(true);
    viewer.getControl().setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true));

    createColumns();
    viewer.setContentProvider(createContentProvider());

    consumers = new TreeMap<String, ConsumerBean>(Collections.reverseOrder());
    viewer.setInput(consumers);

    createActions();
    try {
        createTopicListener(getUri());
    } catch (Exception e) {
        logger.error("Cannot listen to topic of command server!", e);
    }

    final Thread job = new Thread(new Runnable() {
        @Override
        public void run() {

            while (!viewer.getTable().isDisposed()) {
                try {
                    Thread.sleep(Constants.NOTIFICATION_FREQUENCY);
                    if (viewer.getControl().isDisposed())
                        return;

                    viewer.getControl().getDisplay().syncExec(new Runnable() {
                        public void run() {
                            viewer.refresh();
                        }
                    });
                } catch (InterruptedException e) {
                    return;
                }
            }
        }
    });

    job.setPriority(Thread.MIN_PRIORITY);
    job.setDaemon(true);
    job.setName("Refresh consumer table");
    job.start();
}