Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

/**
 * @return the size of all pages data set
 *///from w w w .j av  a 2 s.co m
private int getPagesSize(final String startDate, final String endDate, final String country,
        final List<Dimension> requestedDimensions, final List<ApiDimensionFilter> apiDimensionFilters)
        throws IOException {
    final ExecutorService es = Executors.newCachedThreadPool(ExecutorsUtils
            .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

    int startRow = 0;
    long groupSize = Math.max(1, Math.round(API_REQUESTS_PER_SECOND));
    List<Future<Integer>> results = new ArrayList<>((int) groupSize);

    while (true) {
        for (int i = 0; i < groupSize; ++i) {
            final int start = startRow;
            startRow += GoogleWebmasterClient.API_ROW_LIMIT;

            Future<Integer> submit = es.submit(new Callable<Integer>() {
                @Override
                public Integer call() {
                    log.info(String.format("Getting page size from %s...", start));
                    String interruptedMsg = String.format(
                            "Interrupted while trying to get the size of all pages for %s. Current start row is %d.",
                            country, start);
                    while (true) {
                        try {
                            LIMITER.acquirePermits(1);
                        } catch (InterruptedException e) {
                            log.error("RateBasedLimiter: " + interruptedMsg, e);
                            return -1;
                        }

                        if (Thread.interrupted()) {
                            log.error(interruptedMsg);
                            return -1;
                        }

                        try {
                            List<String> pages = _client.getPages(_siteProperty, startDate, endDate, country,
                                    GoogleWebmasterClient.API_ROW_LIMIT, requestedDimensions,
                                    apiDimensionFilters, start);
                            if (pages.size() < GoogleWebmasterClient.API_ROW_LIMIT) {
                                return pages.size() + start; //Figured out the size
                            } else {
                                return -1;
                            }
                        } catch (IOException e) {
                            log.info(String.format("Getting page size from %s failed. Retrying...", start));
                        }
                    }
                }
            });
            results.add(submit);
        }
        //Check the results group in order. The first non-negative count indicates the size of total pages.
        for (Future<Integer> result : results) {
            try {
                Integer integer = result.get(GET_PAGE_SIZE_TIME_OUT, TimeUnit.MINUTES);
                if (integer >= 0) {
                    es.shutdownNow();
                    return integer;
                }
            } catch (InterruptedException | ExecutionException e) {
                throw new RuntimeException(e);
            } catch (TimeoutException e) {
                throw new RuntimeException(String.format(
                        "Exceeding the timeout of %d minutes while getting the total size of all pages.",
                        GET_PAGE_SIZE_TIME_OUT), e);
            }
        }
        results.clear();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerPutTest.java

/**
 * [FLINK-6020]/*from ww  w. ja va2  s  .com*/
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    BlobStore blobStore = mock(BlobStore.class);
    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        for (int i = 0; i < concurrentPutOperations; i++) {
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> {
                try {
                    BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                    BlobKey uploadedKey = put(server, jobId, inputStream, blobType);
                    // check the uploaded file's contents (concurrently)
                    verifyContents(server, jobId, uploadedKey, data);
                    return uploadedKey;
                } catch (IOException e) {
                    throw new CompletionException(new FlinkException("Could not upload blob.", e));
                }
            }, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:com.ottogroup.bi.spqr.pipeline.component.queue.chronicle.DefaultStreamingMessageQueueTest.java

/**
 * Inserts a configurable number of messages into a {@link Chronicle} and measures the
 * duration it takes to read the content from it using the {@link DefaultStreamingMessageQueue} implementation
 *//* ww  w . j  a  v  a 2 s  .  c  om*/
//   @Test
public void testNext_performanceTest() throws Exception {

    Properties props = new Properties();
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_DELETE_ON_EXIT, "true");
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_PATH, System.getProperty("java.io.tmpdir"));
    final DefaultStreamingMessageQueue inbox = new DefaultStreamingMessageQueue();
    inbox.setId("testNext_performanceTest");
    inbox.initialize(props);

    final StreamingMessageQueueProducer producer = inbox.getProducer();
    final StreamingMessageQueueConsumer consumer = inbox.getConsumer();

    final CountDownLatch latch = new CountDownLatch(numberOfMessagesPerfTest);

    ExecutorService svc = Executors.newCachedThreadPool();

    Future<Integer> producerDurationFuture = svc.submit(new Callable<Integer>() {

        public Integer call() {
            StreamingDataMessage object = new StreamingDataMessage(new byte[] { 01, 2, 3, 4, 5, 6, 7, 9 },
                    System.currentTimeMillis());
            long s1 = System.nanoTime();
            for (int i = 0; i < numberOfMessagesPerfTest; i++) {
                producer.insert(object);
            }
            long s2 = System.nanoTime();
            return (int) (s2 - s1);
        }
    });

    Future<Integer> durationFuture = svc.submit(new Callable<Integer>() {
        public Integer call() {
            StreamingDataMessage msg = null;
            long start = System.nanoTime();
            while (true) {
                msg = consumer.next();
                if (msg != null) {
                    latch.countDown();
                    if (latch.getCount() == 0)
                        break;
                } else {
                    LockSupport.parkNanos(1);
                }

            }
            long end = System.nanoTime();
            return (int) (end - start);
        }
    });

    try {
        Assert.assertTrue("Failed to receive expected number of messages", latch.await(10, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        Assert.fail("Failed to receive expected number of messages");
    }

    int producerDuration = producerDurationFuture.get();
    int duration = durationFuture.get();

    double messagesPerNano = ((double) numberOfMessagesPerfTest / (double) duration);
    double messagesPerNanoRounded = (double) Math.round(messagesPerNano * 10000) / 10000;

    double messagesPerMilli = messagesPerNano * 1000000;
    messagesPerMilli = (double) Math.round(messagesPerMilli * 100) / 100;

    long messagesPerSecondTmps = Math.round(messagesPerNano * 1000000 * 1000);
    double messagesPerSecond = (double) Math.round(messagesPerSecondTmps);
    ;

    double nanosPerMessage = ((double) duration / (double) numberOfMessagesPerfTest);
    nanosPerMessage = (double) Math.round(nanosPerMessage * 100) / 100;

    logger.info("message count: " + numberOfMessagesPerfTest);
    logger.info(
            "message producing: " + producerDuration + "ns, " + TimeUnit.NANOSECONDS.toMillis(producerDuration)
                    + "ms, " + TimeUnit.NANOSECONDS.toSeconds(producerDuration) + "s");
    logger.info("message consumption: " + duration + "ns, " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms, "
            + TimeUnit.NANOSECONDS.toSeconds(duration) + "s");
    logger.info("message throughput: " + messagesPerNanoRounded + " msgs/ns, " + messagesPerMilli + " msgs/ms, "
            + messagesPerSecond + " msgs/s");

    svc.shutdownNow();
}

From source file:com.sjc.cc.instance.service.impl.VmServiceImpl.java

/**
 * ?/*from   w  w w . java2s  .  co  m*/
 * 
 * @param jobid
 * @return
 * @throws InterruptedException
 * @throws ExecutionException
 */
public String asynQueryVmStatus(String jobid, String platformId)
        throws InterruptedException, ExecutionException {
    VmCallable task = new VmCallable(jobid, platformId);
    ExecutorService es = Executors.newFixedThreadPool(1);
    Future<String> future = es.submit(task);
    String jobStatus = future.get();
    es.shutdownNow();
    return jobStatus;
}

From source file:org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.java

@Test
public void testConcurrentAccessToSystemCredentials() {
    final Map<ApplicationId, ByteBuffer> testCredentials = new HashMap<>();
    ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[300]);
    ApplicationId applicationId = ApplicationId.newInstance(123456, 120);
    testCredentials.put(applicationId, byteBuffer);

    final List<Throwable> exceptions = Collections.synchronizedList(new ArrayList<Throwable>());

    final int NUM_THREADS = 10;
    final CountDownLatch allDone = new CountDownLatch(NUM_THREADS);
    final ExecutorService threadPool = Executors.newFixedThreadPool(NUM_THREADS);

    final AtomicBoolean stop = new AtomicBoolean(false);

    try {//from   w ww .j  av  a  2s  . c  o  m
        for (int i = 0; i < NUM_THREADS; i++) {
            threadPool.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        for (int i = 0; i < 100 && !stop.get(); i++) {
                            NodeHeartbeatResponse nodeHeartBeatResponse = newNodeHeartbeatResponse(0,
                                    NodeAction.NORMAL, null, null, null, null, 0);
                            nodeHeartBeatResponse.setSystemCredentialsForApps(testCredentials);
                            NodeHeartbeatResponseProto proto = ((NodeHeartbeatResponsePBImpl) nodeHeartBeatResponse)
                                    .getProto();
                            Assert.assertNotNull(proto);
                        }
                    } catch (Throwable t) {
                        exceptions.add(t);
                        stop.set(true);
                    } finally {
                        allDone.countDown();
                    }
                }
            });
        }

        int testTimeout = 2;
        Assert.assertTrue("Timeout waiting for more than " + testTimeout + " " + "seconds",
                allDone.await(testTimeout, TimeUnit.SECONDS));
    } catch (InterruptedException ie) {
        exceptions.add(ie);
    } finally {
        threadPool.shutdownNow();
    }
    Assert.assertTrue("Test failed with exception(s)" + exceptions, exceptions.isEmpty());
}

From source file:org.nuxeo.connect.tools.report.client.ReportConnector.java

<A> void connect(Consumer consumer) throws IOException, InterruptedException, ExecutionException {
    ExecutorService executor = Executors.newCachedThreadPool(new ThreadFactory() {

        @Override//from w ww.j  a va2 s.c om
        public Thread newThread(Runnable target) {
            Thread thread = new Thread(target, "connect-report");
            thread.setDaemon(true);
            return thread;
        }
    });
    try {
        for (ReportServer server : new Discovery()) {
            try (ServerSocket callback = new ServerSocket(0)) {
                final Future<?> consumed = executor.submit(new Runnable() {

                    @Override
                    public void run() {

                        String name = Thread.currentThread().getName();
                        Thread.currentThread().setName("connect-report-consumer-" + server);
                        try (InputStream source = callback.accept().getInputStream()) {
                            consumer.consume(Json.createParser(source));
                        } catch (IOException | JsonParsingException cause) {
                            throw new AssertionError("Cannot consume connect report", cause);
                        } finally {
                            Thread.currentThread().setName(name);
                        }
                        LogFactory.getLog(ReportConnector.class).info("Consumed " + server);
                    }
                });
                final Future<?> served = executor.submit(new Runnable() {

                    @Override
                    public void run() {
                        String name = Thread.currentThread().getName();
                        Thread.currentThread().setName("connect-report-server-" + server);
                        InetSocketAddress address = (InetSocketAddress) callback.getLocalSocketAddress();
                        try {
                            server.run(address.getHostName(), address.getPort());
                        } catch (IOException cause) {
                            throw new AssertionError("Cannot run connect report", cause);
                        } finally {
                            Thread.currentThread().setName(name);
                        }
                    }

                });
                ExecutionException consumerError = null;
                try {
                    consumed.get();
                } catch (ExecutionException cause) {
                    consumerError = cause;
                }
                try {
                    served.get();
                } catch (ExecutionException cause) {
                    if (consumerError != null) {
                        consumerError.addSuppressed(cause);
                        throw consumerError;
                    }
                    throw cause;
                }
            }
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob.//  w  w w . j  a va2 s  .c  o m
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 * @param cacheAccessesHAStore
 *       whether the cache has access to the {@link BlobServer}'s HA store or not
 */
private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType,
        final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config,
                    cacheAccessesHAStore ? blobStoreServer : blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(cache, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);

        if (blobType == PERMANENT_BLOB) {
            // wait until all operations have completed and check that no exception was thrown
            filesFuture.get();
        } else {
            // wait for all futures to complete (do not abort on expected exceptions) and check
            // that at least one succeeded
            int completedSuccessfully = 0;
            for (CompletableFuture<File> op : getOperations) {
                try {
                    op.get();
                    ++completedSuccessfully;
                } catch (Throwable t) {
                    // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException
                    if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) {
                        // ignore
                        org.apache.flink.util.ExceptionUtils.rethrowIOException(t);
                    }
                }
            }
            // multiple clients may have accessed the BLOB successfully before it was
            // deleted, but always at least one:
            assertThat(completedSuccessfully, greaterThanOrEqualTo(1));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:ubic.gemma.loader.expression.geo.GeoFamilyParser.java

@Override
public void parse(InputStream is) throws IOException {
    if (is == null) {
        throw new IOException("Inputstream was null");
    }//from   ww w . ja  v  a2 s  .  c  o m

    if (is.available() == 0) {
        throw new IOException("No bytes to read from the input stream.");
    }

    final BufferedReader dis = new BufferedReader(new InputStreamReader(is));

    log.debug("Parsing....");

    final ExecutorService executor = Executors.newSingleThreadExecutor();

    FutureTask<Exception> future = new FutureTask<Exception>(new Callable<Exception>() {
        @Override
        public Exception call() {
            try {
                return doParse(dis);
            } catch (Exception e) {
                log.error(e, e);
                return e;
            }

        }
    });

    executor.execute(future);
    executor.shutdown();

    while (!future.isDone() && !future.isCancelled()) {
        try {
            TimeUnit.SECONDS.sleep(5L);
        } catch (InterruptedException e) {
            // probably cancelled.
            return;
        }
        log.info(parsedLines + " lines parsed.");
    }

    try {
        Exception e = future.get();
        if (e != null) {
            log.error(e.getMessage());
            throw new RuntimeException(e.getCause());
        }
    } catch (ExecutionException e) {
        throw new RuntimeException("Parse failed", e.getCause());
    } catch (java.util.concurrent.CancellationException e) {
        throw new RuntimeException("Parse was cancelled", e.getCause());
    } catch (InterruptedException e) {
        throw new RuntimeException("Parse was interrupted", e.getCause());
    }

    executor.shutdownNow();

    assert future.isDone();
    // assert executor.isTerminated();

    log.info("Done parsing.");
}

From source file:org.apache.cassandra.hadoop.AbstractColumnFamilyInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = HadoopCompat.getConfiguration(context);
    ;/*ww  w . ja  va  2 s .  c  om*/

    validateConfiguration(conf);

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(conf);
    cfName = ConfigHelper.getInputColumnFamily(conf);
    partitioner = ConfigHelper.getInputPartitioner(conf);
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
        Range<Token> jobRange = null;
        if (jobKeyRange != null) {
            if (jobKeyRange.start_key != null) {
                if (!partitioner.preservesOrder())
                    throw new UnsupportedOperationException(
                            "KeyRange based on keys can only be used with a order preserving paritioner");
                if (jobKeyRange.start_token != null)
                    throw new IllegalArgumentException("only start_key supported");
                if (jobKeyRange.end_token != null)
                    throw new IllegalArgumentException("only start_key supported");
                jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key),
                        partitioner.getToken(jobKeyRange.end_key), partitioner);
            } else if (jobKeyRange.start_token != null) {
                jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token),
                        partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
            } else {
                logger.warn("ignoring jobKeyRange specified without start_key or start_token");
            }
        }

        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits
                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(
                        partitioner.getTokenFactory().fromString(range.start_token),
                        partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }

        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}

From source file:ubic.gemma.core.loader.expression.geo.GeoFamilyParser.java

@Override
public void parse(InputStream is) throws IOException {
    if (is == null) {
        throw new IOException("Inputstream was null");
    }/* w  w  w.  java 2  s  .com*/

    if (is.available() == 0) {
        throw new IOException("No bytes to read from the input stream.");
    }

    try (final BufferedReader dis = new BufferedReader(new InputStreamReader(is))) {

        GeoFamilyParser.log.debug("Parsing....");

        final ExecutorService executor = Executors.newSingleThreadExecutor();

        FutureTask<Exception> future = new FutureTask<>(new Callable<Exception>() {
            @Override
            public Exception call() {
                try {
                    GeoFamilyParser.this.doParse(dis);
                    dis.close();
                    return null;
                } catch (Exception e) {
                    GeoFamilyParser.log.error(e, e);
                    return e;
                }
            }
        });

        executor.execute(future);
        executor.shutdown();

        while (!future.isDone() && !future.isCancelled()) {
            try {
                TimeUnit.SECONDS.sleep(5L);
            } catch (InterruptedException e) {
                // probably cancelled.
                dis.close();
                return;
            }
            GeoFamilyParser.log.info(parsedLines + " lines parsed.");
        }

        try {
            Exception e = future.get();
            if (e != null) {
                GeoFamilyParser.log.error(e.getMessage());
                throw new RuntimeException(e.getCause());
            }
        } catch (ExecutionException e) {
            throw new RuntimeException("Parse failed", e.getCause());
        } catch (java.util.concurrent.CancellationException e) {
            throw new RuntimeException("Parse was cancelled", e.getCause());
        } catch (InterruptedException e) {
            throw new RuntimeException("Parse was interrupted", e.getCause());
        }

        executor.shutdownNow();

        assert future.isDone();
        // assert executor.isTerminated();

        GeoFamilyParser.log.info("Done parsing.");
    }
}