Example usage for com.google.common.util.concurrent ThreadFactoryBuilder setNameFormat

List of usage examples for com.google.common.util.concurrent ThreadFactoryBuilder setNameFormat

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ThreadFactoryBuilder setNameFormat.

Prototype

public ThreadFactoryBuilder setNameFormat(String nameFormat) 

Source Link

Document

Sets the naming format to use when naming threads ( Thread#setName ) which are created with this ThreadFactory.

Usage

From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.java

/**
 * Creates a replication manager and sets the watch on all the other registered region servers
 * @param replicationQueues the interface for manipulating replication queues
 * @param replicationPeers//from  www .j  ava  2 s . c o  m
 * @param replicationTracker
 * @param conf the configuration to use
 * @param stopper the stopper object for this region server
 * @param fs the file system to use
 * @param logDir the directory that contains all hlog directories of live RSs
 * @param oldLogDir the directory where old logs are archived
 * @param clusterId
 */
public ReplicationSourceManager(final ReplicationQueues replicationQueues,
        final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker,
        final Configuration conf, final Stoppable stopper, final FileSystem fs, final Path logDir,
        final Path oldLogDir, final UUID clusterId) {
    this.sources = new ArrayList<ReplicationSourceInterface>();
    this.replicationQueues = replicationQueues;
    this.replicationPeers = replicationPeers;
    this.replicationTracker = replicationTracker;
    this.stopper = stopper;
    this.hlogsById = new HashMap<String, SortedSet<String>>();
    this.oldsources = new ArrayList<ReplicationSourceInterface>();
    this.conf = conf;
    this.fs = fs;
    this.logDir = logDir;
    this.oldLogDir = oldLogDir;
    this.sleepBeforeFailover = conf.getLong("replication.sleep.before.failover", 2000);
    this.clusterId = clusterId;
    this.replicationTracker.registerListener(this);
    this.replicationPeers.getAllPeerIds();
    // It's preferable to failover 1 RS at a time, but with good zk servers
    // more could be processed at the same time.
    int nbWorkers = conf.getInt("replication.executor.workers", 1);
    // use a short 100ms sleep since this could be done inline with a RS startup
    // even if we fail, other region servers can take care of it
    this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>());
    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
    tfb.setNameFormat("ReplicationExecutor-%d");
    this.executor.setThreadFactory(tfb.build());
    this.rand = new Random();
}

From source file:org.apache.hedwig.server.netty.PubSubServer.java

public void start() throws Exception {
    final SynchronousQueue<Either<Object, Exception>> queue = new SynchronousQueue<Either<Object, Exception>>();

    new Thread(tg, new Runnable() {
        @Override// ww w  . j ava 2 s  .  com
        public void run() {
            try {
                // Since zk is needed by almost everyone,try to see if we
                // need that first
                ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
                scheduler = Executors.newSingleThreadScheduledExecutor(
                        tfb.setNameFormat("PubSubServerScheduler-%d").build());
                serverChannelFactory = new NioServerSocketChannelFactory(
                        Executors.newCachedThreadPool(tfb.setNameFormat("PubSub-Server-NIOBoss-%d").build()),
                        Executors.newCachedThreadPool(tfb.setNameFormat("PubSub-Server-NIOWorker-%d").build()));
                clientChannelFactory = new NioClientSocketChannelFactory(
                        Executors.newCachedThreadPool(tfb.setNameFormat("PubSub-Client-NIOBoss-%d").build()),
                        Executors.newCachedThreadPool(tfb.setNameFormat("PubSub-Client-NIOWorker-%d").build()));

                instantiateZookeeperClient();
                instantiateMetadataManagerFactory();
                tm = instantiateTopicManager();
                pm = instantiatePersistenceManager(tm);
                dm = new FIFODeliveryManager(tm, pm, conf);
                dm.start();

                sm = instantiateSubscriptionManager(tm, pm, dm);
                rm = instantiateRegionManager(pm, scheduler);
                sm.addListener(rm);

                allChannels = new DefaultChannelGroup("hedwig");
                // Initialize the Netty Handlers (used by the
                // UmbrellaHandler) once so they can be shared by
                // both the SSL and non-SSL channels.
                SubscriptionChannelManager subChannelMgr = new SubscriptionChannelManager();
                subChannelMgr.addSubChannelDisconnectedListener((SubChannelDisconnectedListener) dm);
                Map<OperationType, Handler> handlers = initializeNettyHandlers(tm, dm, pm, sm, subChannelMgr);
                // Initialize Netty for the regular non-SSL channels
                initializeNetty(null, handlers, subChannelMgr);
                if (conf.isSSLEnabled()) {
                    initializeNetty(new SslServerContextFactory(conf), handlers, subChannelMgr);
                }
                // register jmx
                registerJMX(subChannelMgr);
            } catch (Exception e) {
                ConcurrencyUtils.put(queue, Either.right(e));
                return;
            }

            ConcurrencyUtils.put(queue, Either.of(new Object(), (Exception) null));
        }

    }).start();

    Either<Object, Exception> either = ConcurrencyUtils.take(queue);
    if (either.left() == null) {
        throw either.right();
    }
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe.//  ww w. java  2s.c  o m
 * @param hfofDir the directory that was provided as the output path of a job using
 *          HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getTableName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = cfg.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = cfg.getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException/*from ww  w.j av a  2s .co m*/
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat//from  w  w w. ja v  a  2 s  .  com
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
            Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        // check whether there is invalid family name in HFiles to be bulkloaded
        Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
        ArrayList<String> familyNames = new ArrayList<String>();
        for (HColumnDescriptor family : families) {
            familyNames.add(family.getNameAsString());
        }
        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
        for (LoadQueueItem lqi : queue) {
            String familyNameInHFile = Bytes.toString(lqi.family);
            if (!familyNames.contains(familyNameInHFile)) {
                unmatchedFamilies.add(familyNameInHFile);
            }
        }
        if (unmatchedFamilies.size() > 0) {
            String msg = "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
                    + unmatchedFamilies + "; valid family names of table "
                    + Bytes.toString(table.getTableName()) + " are: " + familyNames;
            LOG.error(msg);
            throw new IOException(msg);
        }
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        //If using secure bulk load, get source delegation token, and
        //prepare staging directory and token
        if (userProvider.isHBaseSecurityEnabled()) {
            // fs is the source filesystem
            fsDelegationToken.acquireDelegationToken(fs);

            bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
                // Error is logged inside checkHFilesCountPerRegionPerFamily.
                throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                        + " hfiles to one family of one region");
            }

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        if (userProvider.isHBaseSecurityEnabled()) {
            fsDelegationToken.releaseDelegationToken();

            if (bulkToken != null) {
                new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
            }
        }
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }

    if (queue != null && !queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }//from  w  w  w.  j  a v a2s  . c o  m
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.bookkeeper.client.BookKeeper.java

/**
 * Contructor for use with the builder. Other constructors also use it.
 *///from  w ww. ja v  a  2 s. c  o m
private BookKeeper(ClientConfiguration conf, ZooKeeper zkc, ClientSocketChannelFactory channelFactory,
        StatsLogger statsLogger, DNSToSwitchMapping dnsResolver, HashedWheelTimer requestTimer,
        FeatureProvider featureProvider) throws IOException, InterruptedException, KeeperException {
    this.conf = conf;

    // initialize zookeeper client
    if (zkc == null) {
        this.zk = ZooKeeperClient.newBuilder().connectString(conf.getZkServers())
                .sessionTimeoutMs(conf.getZkTimeout())
                .operationRetryPolicy(
                        new BoundExponentialBackoffRetryPolicy(conf.getZkTimeout(), conf.getZkTimeout(), 0))
                .statsLogger(statsLogger).build();
        this.ownZKHandle = true;
    } else {
        if (!zkc.getState().isConnected()) {
            LOG.error("Unconnected zookeeper handle passed to bookkeeper");
            throw KeeperException.create(KeeperException.Code.CONNECTIONLOSS);
        }
        this.zk = zkc;
        this.ownZKHandle = false;
    }

    // initialize channel factory
    if (null == channelFactory) {
        ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
        this.channelFactory = new NioClientSocketChannelFactory(
                Executors.newCachedThreadPool(tfb.setNameFormat("BookKeeper-NIOBoss-%d").build()),
                Executors.newCachedThreadPool(tfb.setNameFormat("BookKeeper-NIOWorker-%d").build()));
        this.ownChannelFactory = true;
    } else {
        this.channelFactory = channelFactory;
        this.ownChannelFactory = false;
    }

    if (null == requestTimer) {
        this.requestTimer = new HashedWheelTimer(
                new ThreadFactoryBuilder().setNameFormat("BookieClientTimer-%d").build(),
                conf.getTimeoutTimerTickDurationMs(), TimeUnit.MILLISECONDS, conf.getTimeoutTimerNumTicks());
        this.ownTimer = true;
    } else {
        this.requestTimer = requestTimer;
        this.ownTimer = false;
    }

    if (null == featureProvider) {
        this.featureProvider = SettableFeatureProvider.DISABLE_ALL;
    } else {
        this.featureProvider = featureProvider;
    }

    // initialize scheduler
    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder().setNameFormat("BookKeeperClientScheduler-%d");
    this.scheduler = Executors.newSingleThreadScheduledExecutor(tfb.build());

    // initialize stats logger
    this.statsLogger = statsLogger.scope(BookKeeperClientStats.CLIENT_SCOPE);
    initOpLoggers(this.statsLogger);

    // initialize the ensemble placement
    this.placementPolicy = initializeEnsemblePlacementPolicy(conf, dnsResolver, this.requestTimer,
            this.featureProvider, this.statsLogger);

    // initialize main worker pool
    this.mainWorkerPool = OrderedSafeExecutor.newBuilder().name("BookKeeperClientWorker")
            .numThreads(conf.getNumWorkerThreads()).statsLogger(statsLogger)
            .traceTaskExecution(conf.getEnableTaskExecutionStats())
            .traceTaskWarnTimeMicroSec(conf.getTaskExecutionWarnTimeMicros()).build();

    // initialize bookie client
    this.bookieClient = new BookieClient(conf, this.channelFactory, this.mainWorkerPool, statsLogger);
    this.bookieWatcher = new BookieWatcher(conf, this.scheduler, this.placementPolicy, this);
    this.bookieWatcher.readBookiesBlocking();

    // initialize ledger manager
    this.ledgerManagerFactory = LedgerManagerFactory.newLedgerManagerFactory(conf, this.zk);
    this.ledgerManager = new CleanupLedgerManager(ledgerManagerFactory.newLedgerManager());
    this.ledgerIdGenerator = ledgerManagerFactory.newLedgerIdGenerator();

    scheduleBookieHealthCheckIfEnabled();
}

From source file:org.opendaylight.genius.alivenessmonitor.internal.AlivenessMonitor.java

private ThreadFactory getMonitoringThreadFactory(String threadNameFormat) {
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat(threadNameFormat);
    builder.setUncaughtExceptionHandler(
            (thread, ex) -> LOG.error("Received Uncaught Exception event in Thread: {}", thread.getName(), ex));
    return builder.build();
}

From source file:org.opendaylight.vpnservice.alivenessmonitor.internal.AlivenessMonitor.java

private ThreadFactory getMonitoringThreadFactory(String threadNameFormat) {
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat(threadNameFormat);
    builder.setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
        @Override/*w  w  w .  j  a  v  a  2  s.c  o  m*/
        public void uncaughtException(Thread t, Throwable e) {
            LOG.error("Received Uncaught Exception event in Thread: {}", t.getName(), e);
        }
    });
    return builder.build();
}

From source file:org.apache.hadoop.hbase.thrift.ThriftServerRunner.java

ExecutorService createExecutor(BlockingQueue<Runnable> callQueue, int workerThreads) {
    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
    tfb.setDaemon(true);/*from w  w  w  . j  a va 2  s. c om*/
    tfb.setNameFormat("thrift-worker-%d");
    return new ThreadPoolExecutor(workerThreads, workerThreads, Long.MAX_VALUE, TimeUnit.SECONDS, callQueue,
            tfb.build());
}