Example usage for com.google.common.util.concurrent ThreadFactoryBuilder build

List of usage examples for com.google.common.util.concurrent ThreadFactoryBuilder build

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ThreadFactoryBuilder build.

Prototype

public ThreadFactory build() 

Source Link

Document

Returns a new thread factory using the options supplied during the building process.

Usage

From source file:org.lendingclub.reflex.aws.sqs.SQSAdapter.java

@SuppressWarnings("unchecked")
public synchronized <T extends SQSAdapter> T start() {
    if (running.get()) {
        logger.warn("already running");
        return (T) this;
    }/*from w  ww  .j a v  a  2  s .co m*/

    Preconditions.checkArgument(sqs != null, "SQSClient must be set");
    if (urlSupplier == null && queueName != null) {
        urlSupplier = Suppliers.memoize(new SQSUrlSupplier(sqs, queueName));
    }

    if (urlSupplier == null) {
        throw new IllegalArgumentException("queueUrl or queueName must be set");
    }

    Runnable r = new Runnable() {

        public void run() {

            running.set(true);
            while (running.get()) {

                try {

                    if (isRunning()) {

                        ReceiveMessageRequest rmr = new ReceiveMessageRequest();
                        rmr.setWaitTimeSeconds(getWaitTimeSeconds());
                        rmr.setMaxNumberOfMessages(getMessagesPerRequest());
                        if (urlSupplier == null) {
                            throw new IllegalArgumentException("queueUrl or queueName must be set");
                        }

                        rmr.setQueueUrl(urlSupplier.get());
                        ReceiveMessageResult result = sqs.receiveMessage(rmr);
                        List<Message> list = result.getMessages();

                        if (list != null) {
                            for (Message message : list) {
                                try {
                                    messageReceiveCount.incrementAndGet();
                                    if (logger.isDebugEnabled()) {
                                        logger.debug("received: {}", message.getMessageId());
                                    }
                                    SQSMessage sqs = new SQSMessage();
                                    sqs.message = message;

                                    subject.onNext(sqs);
                                    if (autoDelete) {
                                        delete(message);
                                    }
                                    dispatchSuccessCount.incrementAndGet();
                                    resetFailureCount();
                                } catch (Exception e) {
                                    handleException(e);
                                }
                            }
                        }
                    } else {
                        logger.info("{} is paused", this);
                        Thread.sleep(10000);
                    }

                } catch (Throwable e) {
                    Exceptions.throwIfFatal(e);
                    handleException(e);
                }
            }
            logger.info("stopped");
        }
    };

    String threadNameFormat = String.format("%s-%s", "SQSAdapter",
            (Strings.isNullOrEmpty(name) ? Integer.toHexString(hashCode()) : name)) + "-%d";

    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadNameFormat);

    Thread t = tfb.build().newThread(r);
    logger.info("starting thread: {}", t);
    t.start();

    return (T) this;
}

From source file:org.diqube.threads.ExecutorManager.java

/**
 * Create a new cached thread pool, see {@link Executors#newCachedThreadPool()}.
 * /*from   w w  w.ja  va 2s  . c o  m*/
 * @param nameFormat
 *          a {@link String#format(String, Object...)}-compatible format String, to which a unique integer (0, 1,
 *          etc.) will be supplied as the single parameter. This integer will be unique to the built instance of the
 *          ThreadFactory and will be assigned sequentially. For example, {@code "rpc-pool-%d"} will generate thread
 *          names like {@code "rpc-pool-0"}, {@code "rpc-pool-1"}, {@code "rpc-pool-2"}, etc.
 * @param uncaughtExceptionHandler
 *          This will be called in case any of the threads of the ExecutorService ends because an exception was
 *          thrown.
 * 
 * @return The new cached thread pool.
 */
public ExecutorService newCachedThreadPool(String nameFormat,
        UncaughtExceptionHandler uncaughtExceptionHandler) {
    ThreadFactoryBuilder threadFactoryBuilder = new ThreadFactoryBuilder();
    threadFactoryBuilder.setNameFormat(nameFormat);
    threadFactoryBuilder.setUncaughtExceptionHandler(uncaughtExceptionHandler);
    return Executors.newCachedThreadPool(threadFactoryBuilder.build());
}

From source file:org.wisdom.executors.ManagedExecutorServiceImpl.java

public ManagedExecutorServiceImpl(String name, ThreadType tu, long hungTime, int coreSize, int maxSize,
        long keepAlive, boolean allowCoreThreadTimeOut, int workQueueCapacity, int priority,
        List<ExecutionContextService> ecs) {

    super(name, hungTime, ecs);
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setDaemon(tu == ThreadType.DAEMON)
            .setNameFormat(name + "-%s").setPriority(priority)
            .setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
                @Override/*ww  w  . jav a 2s. com*/
                public void uncaughtException(Thread t, Throwable e) {
                    logger.error("Uncaught exception in thread '{}'", t.getName(), e);
                }
            });

    BlockingQueue<Runnable> queue = createWorkQueue(workQueueCapacity);
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(coreSize, maxSize, keepAlive,
            TimeUnit.MILLISECONDS, queue, builder.build(), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    System.out.println("REJECTED EXECUTION : " + r);
                }
            });
    executor.allowCoreThreadTimeOut(allowCoreThreadTimeOut);
    setInternalPool(executor);
}

From source file:org.diqube.threads.ExecutorManager.java

/**
 * Create a new {@link ExecutorService} that does create threads as needed, but contains a maxmimum number of threads.
 * /*from  w  ww. j a v  a2  s .c  om*/
 * @param nameFormat
 *          a {@link String#format(String, Object...)}-compatible format String, to which a unique integer (0, 1,
 *          etc.) will be supplied as the single parameter. This integer will be unique to the built instance of the
 *          ThreadFactory and will be assigned sequentially. For example, {@code "rpc-pool-%d"} will generate thread
 *          names like {@code "rpc-pool-0"}, {@code "rpc-pool-1"}, {@code "rpc-pool-2"}, etc.
 * @param uncaughtExceptionHandler
 *          This will be called in case any of the threads of the ExecutorService ends because an exception was
 *          thrown.
 * @param maxPoolSize
 *          Maximum number of threads.
 * @return The new {@link ExecutorService}.
 */
public ExecutorService newCachedThreadPoolWithMax(String nameFormat,
        UncaughtExceptionHandler uncaughtExceptionHandler, int maxPoolSize) {
    ThreadFactoryBuilder threadFactoryBuilder = new ThreadFactoryBuilder();
    threadFactoryBuilder.setNameFormat(nameFormat);
    threadFactoryBuilder.setUncaughtExceptionHandler(uncaughtExceptionHandler);

    return new ThreadPoolExecutor(0, maxPoolSize, 10, TimeUnit.SECONDS, new LinkedBlockingQueue<>(),
            threadFactoryBuilder.build());
}

From source file:org.diqube.threads.ExecutorManager.java

/**
 * Create a new thread pool with a fixed set of threads, see {@link Executors#newFixedThreadPool(int)}. The returned
 * {@link ExecutorService} should be used for executing a specific diql query, with having the correct
 * {@link QueryUuidThreadState} set.//from   www  .  j a  v a  2  s. c om
 * 
 * <p>
 * All threads that are used by the returned {@link Executor} will be "bound" to the specific diql query: That means
 * that in case there is an uncaught exception thrown by one of those threads, the {@link QueryRegistry} will be
 * informed about this and a potentially installed exception handler for that query will be called.
 * 
 * <p>
 * In addition to that, the returned {@link Executor} will also be affected when any of the following methods are
 * called with the specific query ID:
 * 
 * <ul>
 * <li>{@link #findQueryUuidOfExecutorService(ExecutorService)}
 * <li>{@link #findAllExecutorServicesOfQueryExecution(UUID)}
 * <li>{@link #shutdownEverythingOfQueryExecution(UUID)}
 * <li>{@link #shutdownEverythingOfAllQueries()}
 * </ul>
 * 
 * <p>
 * The returned executor will be automatically terminated after {@link ConfigKey#QUERY_EXECUTION_TIMEOUT_SECONDS}
 * seconds.
 * 
 * @param numberOfThreads
 *          Number of threads the thread pool should have
 * @param nameFormat
 *          a {@link String#format(String, Object...)}-compatible format String, to which a unique integer (0, 1,
 *          etc.) will be supplied as the single parameter. This integer will be unique to the built instance of the
 *          ThreadFactory and will be assigned sequentially. For example, {@code "rpc-pool-%d"} will generate thread
 *          names like {@code "rpc-pool-0"}, {@code "rpc-pool-1"}, {@code "rpc-pool-2"}, etc.
 * @param queryUuid
 *          The UUID to whose execution the returned {@link Executor} belongs to. For a description of query
 *          UUID/executor UUID, see {@link QueryUuid} and ExecutablePlan.
 * @param executionUuid
 *          The UUID of the execution the returned {@link Executor} belongs to. For a description of query
 *          UUID/executor UUID, see {@link QueryUuid} and ExecutablePlan.
 * 
 * @return The new thread pool. It is not a {@link ExecutorService}, but only an {@link Executor} returned, because
 *         ONLY the {@link Executor#execute(Runnable)} method must be run, because then the exception forwarding which
 *         is described above will work correctly. This does not work when the method
 *         {@link ExecutorService#submit(java.util.concurrent.Callable)} etc. are called, becuase the ExecutorService
 *         won't forward the exception in that case, but encapsulate it in the corresponding {@link Future}.
 */
public synchronized Executor newQueryFixedThreadPoolWithTimeout(int numberOfThreads, String nameFormat,
        UUID queryUuid, UUID executionUuid) {
    ThreadFactoryBuilder baseThreadFactoryBuilder = new ThreadFactoryBuilder();
    baseThreadFactoryBuilder.setNameFormat(nameFormat);
    // Use our ThreadFactory as facette in order to install our exception handling and enable the publication of the
    // query & execution UUID in QueryUuid when any thread of the query starts running.
    ThreadFactory threadFactory = new QueryThreadFactory(baseThreadFactoryBuilder.build(), queryUuid,
            executionUuid, queryRegistry);

    DiqubeFixedThreadPoolExecutor res = new DiqubeFixedThreadPoolExecutor(numberOfThreads, threadFactory,
            queryUuid, executionUuid);
    res.setThreadNameFormatForToString(nameFormat);
    synchronized (queryExecutors) {
        if (!queryExecutors.containsKey(queryUuid))
            queryExecutors.put(queryUuid, new HashMap<>());
        if (!queryExecutors.get(queryUuid).containsKey(executionUuid))
            queryExecutors.get(queryUuid).put(executionUuid, new ArrayList<>());
        queryExecutors.get(queryUuid).get(executionUuid).add(res);
    }

    timeoutThread.registerTimeout((System.nanoTime() / (long) 1e6) + (queryExecutionTimeoutSeconds * 1000),
            res);

    return res;
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe.//from w w  w  . j a v  a  2 s . c  om
 * @param hfofDir the directory that was provided as the output path of a job using
 *          HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getTableName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = cfg.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = cfg.getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException/*w  ww .  jav a2s .  c  o m*/
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }//ww  w. j ava 2  s  .co  m
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler.java

@Override
protected void serviceStart() throws Exception {
    ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
    if (jobClassLoader != null) {
        // if the job classloader is enabled, we need to use the job classloader
        // as the thread context classloader (TCCL) of these threads in case the
        // committer needs to load another class via TCCL
        ThreadFactory backingTf = new ThreadFactory() {
            @Override// ww w  . ja v a2  s .com
            public Thread newThread(Runnable r) {
                Thread thread = new Thread(r);
                thread.setContextClassLoader(jobClassLoader);
                return thread;
            }
        };
        tfBuilder.setThreadFactory(backingTf);
    }
    ThreadFactory tf = tfBuilder.build();
    launcherPool = new ThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread(new Runnable() {
        @Override
        public void run() {
            CommitterEvent event = null;
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(new EventProcessor(event));
            }
        }
    });
    eventHandlingThread.setName("CommitterEvent Handler");
    eventHandlingThread.start();
    super.serviceStart();
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat/*from   w w w.j a  v  a  2s  . co  m*/
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
            Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        // check whether there is invalid family name in HFiles to be bulkloaded
        Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
        ArrayList<String> familyNames = new ArrayList<String>();
        for (HColumnDescriptor family : families) {
            familyNames.add(family.getNameAsString());
        }
        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
        for (LoadQueueItem lqi : queue) {
            String familyNameInHFile = Bytes.toString(lqi.family);
            if (!familyNames.contains(familyNameInHFile)) {
                unmatchedFamilies.add(familyNameInHFile);
            }
        }
        if (unmatchedFamilies.size() > 0) {
            String msg = "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
                    + unmatchedFamilies + "; valid family names of table "
                    + Bytes.toString(table.getTableName()) + " are: " + familyNames;
            LOG.error(msg);
            throw new IOException(msg);
        }
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        //If using secure bulk load, get source delegation token, and
        //prepare staging directory and token
        if (userProvider.isHBaseSecurityEnabled()) {
            // fs is the source filesystem
            fsDelegationToken.acquireDelegationToken(fs);

            bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
                // Error is logged inside checkHFilesCountPerRegionPerFamily.
                throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                        + " hfiles to one family of one region");
            }

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        if (userProvider.isHBaseSecurityEnabled()) {
            fsDelegationToken.releaseDelegationToken();

            if (bulkToken != null) {
                new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
            }
        }
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }

    if (queue != null && !queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
}