Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.trellmor.berrymotes.sync.EmoteDownloader.java

public void start(SyncResult syncResult) {
    Log.info("EmoteDownload started");

    this.updateNetworkInfo();

    mSyncResult = syncResult;/* w  ww.  java 2s .co  m*/

    if (!mIsConnected) {
        Log.error("Network not available");
        syncResult.stats.numIoExceptions++;
        return;
    }

    // Registers BroadcastReceiver to track network connection changes.
    IntentFilter filter = new IntentFilter(ConnectivityManager.CONNECTIVITY_ACTION);
    NetworkReceiver receiver = new NetworkReceiver();
    mContext.registerReceiver(receiver, filter);

    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);

    mHttpClient = AndroidHttpClient.newInstance(USER_AGENT);
    try {
        String[] subreddits = getSubreddits();

        for (String subreddit : subreddits) {
            if (mSubreddits.isChecked(subreddit)) {
                Runnable subredditEmoteDownloader = new SubredditEmoteDownloader(mContext, this, subreddit);
                executor.execute(subredditEmoteDownloader);
            } else {
                // Delete this subreddit
                deleteSubreddit(subreddit, mContentResolver);
                // Reset last download date
                SharedPreferences.Editor settings = PreferenceManager.getDefaultSharedPreferences(mContext)
                        .edit();
                settings.remove(SettingsActivity.KEY_SYNC_LAST_MODIFIED + subreddit);
                settings.commit();
            }
        }
        executor.shutdown();
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
    } catch (URISyntaxException e) {
        Log.error("Emotes URL is malformed", e);
        synchronized (mSyncResult) {
            mSyncResult.stats.numParseExceptions++;
            if (mSyncResult.delayUntil < 60 * 60)
                mSyncResult.delayUntil = 60 * 60;
        }
        return;
    } catch (IOException e) {
        Log.error("Error reading from network: " + e.getMessage(), e);
        synchronized (mSyncResult) {
            mSyncResult.stats.numIoExceptions++;
            if (mSyncResult.delayUntil < 30 * 60)
                mSyncResult.delayUntil = 30 * 60;
        }
        return;
    } catch (InterruptedException e) {
        synchronized (mSyncResult) {
            syncResult.moreRecordsToGet = true;
        }

        Log.info("Sync interrupted");

        executor.shutdownNow();
        try {
            executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
        } catch (InterruptedException e2) {
        }

        Thread.currentThread().interrupt();
    } finally {
        Log.info("Deleted emotes: " + Long.toString(mSyncResult.stats.numDeletes));
        Log.info("Added emotes: " + Long.toString(mSyncResult.stats.numInserts));

        // Unregisters BroadcastReceiver at the end
        mContext.unregisterReceiver(receiver);

        mHttpClient.close();
    }

    Log.info("EmoteDownload finished");
}

From source file:org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure.java

/**
 * Create Split directory//from w ww.j  a  v  a  2 s.com
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(
            regionFs.getFamilies().size());
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> sfis = regionFs.getStoreFiles(family);
        if (sfis == null)
            continue;
        Collection<StoreFileInfo> filteredSfis = null;
        for (StoreFileInfo sfi : sfis) {
            // Filter. There is a lag cleaning up compacted reference files. They get cleared
            // after a delay in case outstanding Scanners still have references. Because of this,
            // the listing of the Store content may have straggler reference files. Skip these.
            // It should be safe to skip references at this point because we checked above with
            // the region if it thinks it is splittable and if we are here, it thinks it is
            // splitable.
            if (sfi.isReference()) {
                LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
                continue;
            }
            if (filteredSfis == null) {
                filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
                files.put(family, filteredSfis);
            }
            filteredSfis.add(sfi);
            nbFiles++;
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(
            conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
                    conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)),
            nbFiles);
    LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region="
            + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
    final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
        byte[] familyName = Bytes.toBytes(e.getKey());
        final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
        final Collection<StoreFileInfo> storeFiles = e.getValue();
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("pid=" + getProcId() + " split storefiles for region " + getParentRegion().getShortNameToLog()
                + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the
 * queue and send them.//  w  w w  . ja  v  a  2s  .  c  o  m
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);

            // Keep 20 queries inside the query queue.
            while (queryQueue.size() == 20) {
                Thread.sleep(1);

                long currentTime = System.currentTimeMillis();
                if (currentTime - reportStartTime >= reportIntervalMs) {
                    long timePassed = currentTime - startTime;
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                                    + "Average Client Time: {}ms.",
                            timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt);
                    reportStartTime = currentTime;
                    numReportIntervals++;

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                    + "Average Client Time: {}ms.",
            timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.phoenix.execute.UpsertSelectOverlappingBatchesIT.java

/**
 * Tests that splitting a region is not blocked indefinitely by UPSERT SELECT load
 *//*w w  w .  j  ava  2 s  . co  m*/
@Test
public void testSplitDuringUpsertSelect() throws Exception {
    int numUpsertSelectRunners = 4;
    ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
    try (Connection conn = driver.connect(url, props)) {
        final UpsertSelectRunner upsertSelectRunner = new UpsertSelectRunner(dataTable, 0, 105, 1);
        // keep running slow upsert selects
        SlowBatchRegionObserver.SLOW_MUTATE = true;
        for (int i = 0; i < numUpsertSelectRunners; i++) {
            exec.submit(new UpsertSelectLooper(upsertSelectRunner));
            Thread.sleep(300);
        }

        // keep trying to split the region
        final HBaseTestingUtility utility = getUtility();
        final Admin admin = utility.getAdmin();
        final TableName dataTN = TableName.valueOf(dataTable);
        assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size());
        utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
                try {
                    List<RegionInfo> regions = admin.getRegions(dataTN);
                    if (regions.size() > 1) {
                        logger.info("Found region was split");
                        return true;
                    }
                    if (regions.size() == 0) {
                        // This happens when region in transition or closed
                        logger.info("No region returned");
                        return false;
                    }
                    ;
                    RegionInfo hRegion = regions.get(0);
                    logger.info("Attempting to split region");
                    admin.splitRegionAsync(hRegion.getRegionName(), Bytes.toBytes(2));
                    return false;
                } catch (NotServingRegionException | DoNotRetryRegionException re) {
                    // during split
                    return false;
                }
            }
        });
    } finally {
        SlowBatchRegionObserver.SLOW_MUTATE = false;
        exec.shutdownNow();
        exec.awaitTermination(60, TimeUnit.SECONDS);
    }
}

From source file:org.openspaces.admin.internal.admin.DefaultAdmin.java

@Override
public void close() {
    if (!closeStarted.compareAndSet(false, true)) {
        if (lifecycleLogger.isDebugEnabled()) {
            lifecycleLogger.debug(/*from  w  w  w .ja v a2s.  com*/
                    "Not closing admin, since close() has already been called. hashCode()=" + this.hashCode());
        }
        return;
    }
    if (lifecycleLogger.isDebugEnabled()) {
        lifecycleLogger.debug("Closing admin. hashCode=" + this.hashCode());
    }
    discoveryService.stop();
    if (scheduledProcessingUnitMonitorFuture != null) {
        scheduledProcessingUnitMonitorFuture.cancel(true);
        scheduledProcessingUnitMonitorFuture = null;
    }
    if (scheduledAgentProcessessMonitorFuture != null) {
        scheduledAgentProcessessMonitorFuture.cancel(true);
        scheduledAgentProcessessMonitorFuture = null;
    }

    scheduledExecutorService.shutdownNow();
    for (ExecutorService executorService : eventsExecutorServices) {
        executorService.shutdownNow();
    }

    longRunningExecutorService.shutdownNow();

    closeEnded.set(true);

    if (lifecycleLogger.isDebugEnabled()) {
        lifecycleLogger.debug("Admin closed. hashCode=" + this.hashCode());
    }
}

From source file:org.apache.hadoop.raid.RaidNode.java

public boolean startSmokeTest(boolean wait) throws Exception {
    Runnable worker = this.blockIntegrityMonitor.getCorruptionMonitor();
    if (worker == null || !(worker instanceof CorruptionWorker)) {
        throw new IOException("CorruptionWorker is not found");
    }/*  w w  w  .ja  v a 2  s  .c  o  m*/
    if (!(this instanceof DistRaidNode)) {
        throw new IOException("Current Raid daemon is not DistRaidNode");
    }
    if (!(this.blockIntegrityMonitor instanceof DistBlockIntegrityMonitor)) {
        throw new IOException("Current BlockFix daemon is not DistBlockIntegrityMonitor");
    }
    SmokeTestThread.LOG.info("[SMOKETEST] Start Raid Smoke Test");
    long startTime = System.currentTimeMillis();
    SmokeTestThread str = new SmokeTestThread(this);
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<Boolean> future = executor.submit(str);
    boolean result = false;
    if (wait) {
        try {
            result = future.get(1200, TimeUnit.SECONDS);
        } catch (Throwable exp) {
            SmokeTestThread.LOG.info("[SMOKETEST] Get Exception ", exp);
        } finally {
            executor.shutdownNow();
            SmokeTestThread.LOG.info("[SMOKETEST] Finish Raid Smoke Test (" + (result ? "succeed" : "fail")
                    + ") using " + (System.currentTimeMillis() - startTime) + "ms");
            if (str.ioe != null) {
                throw str.ioe;
            }
        }
    }
    return result;
}

From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java

@Test
public void testGetConnectionsWithDifferentJDBCParamsConcurrently() throws Exception {
    DriverManager.registerDriver(PhoenixDriver.INSTANCE);
    ExecutorService exec = Executors.newFixedThreadPool(4);
    // establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver
    String zkQuorum = "localhost:" + getUtility().getZkCluster().getClientPort();
    String baseUrl = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
    int numConnections = 20;
    List<Callable<Connection>> callables = new ArrayList<>(numConnections);
    List<Future<Connection>> futures = new ArrayList<>(numConnections);
    try {//from  w w w  .  java2s .c  om
        GLOBAL_HCONNECTIONS_COUNTER.getMetric().reset();
        GLOBAL_QUERY_SERVICES_COUNTER.getMetric().reset();
        assertEquals(0, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue());
        assertEquals(0, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue());
        for (int i = 1; i <= numConnections; i++) {
            String customUrl = baseUrl + ':' + CUSTOM_URL_STRING + '=' + i;
            Callable<Connection> c = new GetConnectionCallable(customUrl + ";");
            callables.add(c);
            futures.add(exec.submit(c));
        }
        for (int i = 0; i < futures.size(); i++) {
            futures.get(i).get();
        }
        assertEquals(numConnections, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue());
        assertEquals(numConnections, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue());
    } finally {
        exec.shutdownNow();
        for (int i = 0; i < futures.size(); i++) {
            try {
                Connection c = futures.get(i).get();
                // close the query services instance because we created a lot of HConnections.
                c.unwrap(PhoenixConnection.class).getQueryServices().close();
                c.close();
            } catch (Exception ignore) {
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.util.RegionMover.java

private void loadRegions(Admin admin, String hostname, int port, List<HRegionInfo> regionsToMove, boolean ack)
        throws Exception {
    String server = null;//from w w w  .  j  a v a  2 s .  co m
    List<HRegionInfo> movedRegions = Collections.synchronizedList(new ArrayList<HRegionInfo>());
    int maxWaitInSeconds = admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY,
            DEFAULT_SERVERSTART_WAIT_MAX);
    long maxWait = EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000;
    while ((EnvironmentEdgeManager.currentTime() < maxWait) && (server == null)) {
        try {
            ArrayList<String> regionServers = getServers(admin);
            // Remove the host Region server from target Region Servers list
            server = stripServer(regionServers, hostname, port);
            if (server != null) {
                break;
            }
        } catch (IOException e) {
            LOG.warn("Could not get list of region servers", e);
        } catch (Exception e) {
            LOG.info("hostname=" + hostname + " is not up yet, waiting");
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException e) {
            LOG.error("Interrupted while waiting for " + hostname + " to be up.Quitting now", e);
            throw e;
        }
    }
    if (server == null) {
        LOG.error("Host:" + hostname + " is not up.Giving up.");
        throw new Exception("Host to load regions not online");
    }
    LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads
            + " threads.Ack mode:" + this.ack);
    ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
    List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
    int counter = 0;
    while (counter < regionsToMove.size()) {
        HRegionInfo region = regionsToMove.get(counter);
        String currentServer = getServerNameForRegion(admin, region);
        if (currentServer == null) {
            LOG.warn("Could not get server for Region:" + region.getEncodedName() + " moving on");
            counter++;
            continue;
        } else if (server.equals(currentServer)) {
            LOG.info("Region " + region.getRegionNameAsString() + "already on target server=" + server);
            counter++;
            continue;
        }
        if (ack) {
            Future<Boolean> task = moveRegionsPool
                    .submit(new MoveWithAck(admin, region, currentServer, server, movedRegions));
            taskList.add(task);
        } else {
            Future<Boolean> task = moveRegionsPool
                    .submit(new MoveWithoutAck(admin, region, currentServer, server, movedRegions));
            taskList.add(task);
        }
        counter++;
    }
    moveRegionsPool.shutdown();
    long timeoutInSeconds = regionsToMove.size()
            * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX);
    try {
        if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) {
            moveRegionsPool.shutdownNow();
        }
    } catch (InterruptedException e) {
        moveRegionsPool.shutdownNow();
        Thread.currentThread().interrupt();
    }
    for (Future<Boolean> future : taskList) {
        try {
            // if even after shutdownNow threads are stuck we wait for 5 secs max
            if (!future.get(5, TimeUnit.SECONDS)) {
                LOG.error("Was Not able to move region....Exiting Now");
                throw new Exception("Could not move region Exception");
            }
        } catch (InterruptedException e) {
            LOG.error("Interrupted while waiting for Thread to Complete " + e.getMessage(), e);
            throw e;
        } catch (ExecutionException e) {
            LOG.error("Got Exception From Thread While moving region " + e.getMessage(), e);
            throw e;
        } catch (CancellationException e) {
            LOG.error(
                    "Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds + "secs",
                    e);
            throw e;
        }
    }
}

From source file:org.apache.druid.segment.realtime.firehose.EventReceiverFirehoseTest.java

@Test
public void testMultipleThreads()
        throws InterruptedException, IOException, TimeoutException, ExecutionException {
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH)).andReturn(null).anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT))
            .andReturn(AllowAllAuthenticator.ALLOW_ALL_RESULT).anyTimes();
    req.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true);
    EasyMock.expectLastCall().anyTimes();

    EasyMock.expect(req.getContentType()).andReturn("application/json").times(2 * NUM_EVENTS);
    EasyMock.expect(req.getHeader("X-Firehose-Producer-Id")).andReturn(null).times(2 * NUM_EVENTS);
    EasyMock.replay(req);/*from   w ww.j  a va  2  s  . co m*/

    final ExecutorService executorService = Execs.singleThreaded("single_thread");
    final Future future = executorService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            for (int i = 0; i < NUM_EVENTS; ++i) {
                final InputStream inputStream = IOUtils.toInputStream(inputRow, StandardCharsets.UTF_8);
                firehose.addAll(inputStream, req);
                inputStream.close();
            }
            return true;
        }
    });

    for (int i = 0; i < NUM_EVENTS; ++i) {
        final InputStream inputStream = IOUtils.toInputStream(inputRow, StandardCharsets.UTF_8);
        firehose.addAll(inputStream, req);
        inputStream.close();
    }

    future.get(10, TimeUnit.SECONDS);

    EasyMock.verify(req);

    final Iterable<Map.Entry<String, EventReceiverFirehoseMetric>> metrics = register.getMetrics();
    Assert.assertEquals(1, Iterables.size(metrics));

    final Map.Entry<String, EventReceiverFirehoseMetric> entry = Iterables.getLast(metrics);

    Assert.assertEquals(SERVICE_NAME, entry.getKey());
    Assert.assertEquals(CAPACITY, entry.getValue().getCapacity());
    Assert.assertEquals(CAPACITY, firehose.getCapacity());
    Assert.assertEquals(2 * NUM_EVENTS, entry.getValue().getCurrentBufferSize());
    Assert.assertEquals(2 * NUM_EVENTS, firehose.getCurrentBufferSize());

    for (int i = 2 * NUM_EVENTS - 1; i >= 0; --i) {
        Assert.assertTrue(firehose.hasMore());
        Assert.assertNotNull(firehose.nextRow());
        Assert.assertEquals(i, firehose.getCurrentBufferSize());
    }

    Assert.assertEquals(CAPACITY, entry.getValue().getCapacity());
    Assert.assertEquals(CAPACITY, firehose.getCapacity());
    Assert.assertEquals(0, entry.getValue().getCurrentBufferSize());
    Assert.assertEquals(0, firehose.getCurrentBufferSize());

    firehose.close();
    Assert.assertFalse(firehose.hasMore());
    Assert.assertEquals(0, Iterables.size(register.getMetrics()));

    executorService.shutdownNow();
}

From source file:org.jboss.tools.discovery.core.internal.connectors.xpl.RemoteExternalBundleDiscoveryStrategy.java

protected Map<File, Entry> loadRegistry(File storageDirectory, IProgressMonitor monitor) throws CoreException {

    // new SubProgressMonitor(monitor, ticksTenPercent * 3);

    final int totalTicks = 100000;
    final int ticksTenPercent = totalTicks / 10;

    monitor.beginTask("Remote discovery", totalTicks);

    Directory directory;// www.  j  a  v a  2s  . c om

    try {
        final Directory[] temp = new Directory[1];
        final URI uri = new URI(directoryUrl);
        WebUtil.readResource(uri, new TextContentProcessor() {
            public void process(Reader reader) throws IOException {
                DirectoryParser parser = new DirectoryParser();
                parser.setBaseUri(uri);
                temp[0] = parser.parse(reader);
            }
        }, new SubProgressMonitor(monitor, ticksTenPercent));
        directory = temp[0];
        if (directory == null) {
            throw new IllegalStateException();
        }
    } catch (UnknownHostException e) {
        throw new CoreException(new Status(IStatus.ERROR, DiscoveryCore.ID_PLUGIN,
                NLS.bind(
                        "Cannot access {0}: unknown host: please check your Internet connection and try again.",
                        e.getMessage()),
                e));
    } catch (IOException e) {
        throw new CoreException(new Status(IStatus.ERROR, DiscoveryCore.ID_PLUGIN,
                "IO failure: cannot load discovery directory", e));
    } catch (URISyntaxException e) {
        throw new CoreException(new Status(IStatus.ERROR, DiscoveryCore.ID_PLUGIN,
                "IO failure: cannot load discovery directory", e));
    }
    if (monitor.isCanceled()) {
        return null;
    }
    if (directory.getEntries().isEmpty()) {
        throw new CoreException(
                new Status(IStatus.ERROR, DiscoveryCore.ID_PLUGIN, "Discovery directory is empty"));
    }

    Map<File, Directory.Entry> bundleFileToDirectoryEntry = new HashMap<File, Directory.Entry>();

    ExecutorService executorService = createExecutorService(directory.getEntries().size());
    try {
        List<Future<DownloadBundleJob>> futures = new ArrayList<Future<DownloadBundleJob>>();
        // submit jobs
        for (Directory.Entry entry : directory.getEntries()) {
            futures.add(executorService.submit(new DownloadBundleJob(entry, storageDirectory, monitor)));
        }
        int futureSize = ticksTenPercent * 4 / directory.getEntries().size();
        // collect job results
        for (Future<DownloadBundleJob> job : futures) {
            try {
                DownloadBundleJob bundleJob;
                for (;;) {
                    try {
                        bundleJob = job.get(1L, TimeUnit.SECONDS);
                        break;
                    } catch (TimeoutException e) {
                        if (monitor.isCanceled()) {
                            return null;
                        }
                    }
                }
                if (bundleJob.file != null) {
                    bundleFileToDirectoryEntry.put(bundleJob.file, bundleJob.entry);
                }
                monitor.worked(futureSize);
            } catch (ExecutionException e) {
                Throwable cause = e.getCause();
                if (cause instanceof OperationCanceledException) {
                    monitor.setCanceled(true);
                    return null;
                }
                IStatus status;
                if (cause instanceof CoreException) {
                    status = ((CoreException) cause).getStatus();
                } else {
                    status = new Status(IStatus.ERROR, DiscoveryCore.ID_PLUGIN, "Unexpected error", cause);
                }
                // log errors but continue on
                StatusHandler.log(status);
            } catch (InterruptedException e) {
                monitor.setCanceled(true);
                return null;
            }
        }
    } finally {
        executorService.shutdownNow();
    }
    return bundleFileToDirectoryEntry;
}