Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;

Source Link

Document

Waits if necessary for at most the given time for the computation to complete, and then retrieves its result, if available.

Usage

From source file:io.druid.server.lookup.cache.LookupCoordinatorManager.java

void updateNodes(Collection<URL> urls, final Map<String, Map<String, Object>> knownLookups)
        throws IOException, InterruptedException, ExecutionException {
    if (knownLookups == null) {
        LOG.debug("No config for lookups found");
        return;/*from w w w  .  ja v a 2 s . co  m*/
    }
    if (knownLookups.isEmpty()) {
        LOG.debug("No known lookups. Skipping update");
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Updating %d lookups on %d nodes", knownLookups.size(), urls.size());
    }
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    updateAllOnHost(url, knownLookups);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    LOG.warn("Update on [%s] interrupted", url);
                    throw Throwables.propagate(e);
                } catch (IOException | ExecutionException e) {
                    // Don't raise as ExecutionException. Just log and continue
                    LOG.makeAlert(e, "Error submitting to [%s]", url).emit();
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        LOG.warn("Timeout in updating hosts! Attempting to cancel");
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
    }
}

From source file:io.druid.server.lookup.cache.LookupCoordinatorManager.java

void deleteAllOnTier(final String tier, final Collection<String> dropLookups)
        throws ExecutionException, InterruptedException, IOException {
    if (dropLookups.isEmpty()) {
        LOG.debug("Nothing to drop");
        return;//from  w  ww  .j a v  a  2  s. c om
    }
    final Collection<URL> urls = getAllHostsAnnounceEndpoint(tier);
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (final String drop : dropLookups) {
                    final URL lookupURL;
                    try {
                        lookupURL = new URL(url.getProtocol(), url.getHost(), url.getPort(),
                                String.format("%s/%s", url.getFile(), drop));
                    } catch (MalformedURLException e) {
                        throw new ISE(e, "Error creating url for [%s]/[%s]", url, drop);
                    }
                    try {
                        deleteOnHost(lookupURL);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        LOG.warn("Delete [%s] interrupted", lookupURL);
                        throw Throwables.propagate(e);
                    } catch (IOException | ExecutionException e) {
                        // Don't raise as ExecutionException. Just log and continue
                        LOG.makeAlert(e, "Error deleting [%s]", lookupURL).emit();
                    }
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
        throw new ExecutionException("Timeout in updating hosts! Attempting to cancel", e);
    }
}

From source file:org.apache.druid.query.groupby.epinephelinae.GroupByMergingQueryRunnerV2.java

private void waitForFutureCompletion(GroupByQuery query, ListenableFuture<List<AggregateResult>> future,
        boolean hasTimeout, long timeout) {
    try {/*w  w w  . j a  va2s .  c  o m*/
        if (queryWatcher != null) {
            queryWatcher.registerQuery(query, future);
        }

        if (hasTimeout && timeout <= 0) {
            throw new TimeoutException();
        }

        final List<AggregateResult> results = hasTimeout ? future.get(timeout, TimeUnit.MILLISECONDS)
                : future.get();

        for (AggregateResult result : results) {
            if (!result.isOk()) {
                future.cancel(true);
                throw new ResourceLimitExceededException(result.getReason());
            }
        }
    } catch (InterruptedException e) {
        log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
        future.cancel(true);
        throw new QueryInterruptedException(e);
    } catch (CancellationException e) {
        throw new QueryInterruptedException(e);
    } catch (TimeoutException e) {
        log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
        future.cancel(true);
        throw new QueryInterruptedException(e);
    } catch (ExecutionException e) {
        throw Throwables.propagate(e.getCause());
    }
}

From source file:org.jclouds.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

@Override
public ListenableFuture<String> execute(final String container, final Blob blob, final PutOptions options) {
    return executor.submit(new Callable<String>() {
        @Override// w  w w  .j  a  va 2 s  . c  o m
        public String call() throws Exception {
            String key = blob.getMetadata().getName();
            Payload payload = blob.getPayload();
            MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
            algorithm.calculateChunkSize(payload.getContentMetadata().getContentLength());
            int parts = algorithm.getParts();
            long chunkSize = algorithm.getChunkSize();
            long remaining = algorithm.getRemaining();
            if (parts > 0) {
                final S3Client client = blobstore.getContext().unwrapApi(S3Client.class);
                String uploadId = null;
                final Map<Integer, ListenableFuture<String>> futureParts = new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                final Map<Integer, Exception> errorMap = Maps.newHashMap();
                AtomicInteger errors = new AtomicInteger(0);
                int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                int effectiveParts = remaining > 0 ? parts + 1 : parts;
                try {
                    uploadId = client.initiateMultipartUpload(container,
                            ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                    logger.debug(String.format(
                            "initiated multipart upload of %s to container %s"
                                    + " with uploadId %s consisting from %s part (possible max. retries: %d)",
                            key, container, uploadId, effectiveParts, maxRetries));
                    // we need a bounded-blocking queue to control the amount of parallel jobs
                    ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                    Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                    SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                    CountDownLatch latch = new CountDownLatch(effectiveParts);
                    int part;
                    while ((part = algorithm.getNextPart()) <= parts) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);
                        prepareUploadPart(container, key, uploadId, partKey, payload,
                                algorithm.getNextChunkOffset(), chunkSize, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch);
                    }
                    if (remaining > 0) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);
                        prepareUploadPart(container, key, uploadId, partKey, payload,
                                algorithm.getNextChunkOffset(), remaining, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch);
                    }
                    latch.await();
                    // handling retries
                    while (errors.get() <= maxRetries && !toRetry.isEmpty()) {
                        int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                        CountDownLatch retryLatch = new CountDownLatch(atOnce);
                        for (int i = 0; i < atOnce; i++) {
                            Part failedPart = toRetry.poll();
                            Integer partKey = Integer.valueOf(failedPart.getPart());
                            activeParts.put(partKey);
                            prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags, activeParts,
                                    futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                        }
                        retryLatch.await();
                    }
                    if (errors.get() > maxRetries) {
                        throw new BlobRuntimeException(String.format(
                                "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                errors.get(), key, container, uploadId));
                    }
                    String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                    logger.debug(String.format(
                            "multipart upload of %s to container %s with uploadId %s"
                                    + " successfully finished with %s retries",
                            key, container, uploadId, errors.get()));
                    return eTag;
                } catch (Exception ex) {
                    RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                    if (rtex == null) {
                        rtex = new RuntimeException(ex);
                    }
                    for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                        entry.getValue().cancel(false);
                    }
                    if (uploadId != null) {
                        client.abortMultipartUpload(container, key, uploadId);
                    }
                    throw rtex;
                }
            } else {
                // Issue 936: don't just call putBlob, as that will see options=multiPart and
                // recursively call this execute method again; instead mark as not multipart
                // because it can all fit in one go.
                final PutOptions nonMultipartOptions = PutOptions.Builder.multipart(false);
                ListenableFuture<String> futureETag = executor.submit(new Callable<String>() {
                    @Override
                    public String call() throws Exception {
                        return blobstore.putBlob(container, blob, nonMultipartOptions);
                    }
                });
                return maxTime != null ? futureETag.get(maxTime, TimeUnit.SECONDS) : futureETag.get();
            }
        }
    });
}

From source file:de.blizzy.documentr.search.PageIndex.java

private SearchResult findPages(String searchText, int page, Authentication authentication,
        IndexSearcher searcher) throws ParseException, IOException, TimeoutException {

    Future<Query> queryFuture = taskExecutor.submit(new ParseQueryTask(searchText, analyzer));
    Bits visibleDocIds = getVisibleDocIds(searcher, authentication);

    Query query;//  w  w w  .  j  av  a 2 s  .co  m
    try {
        query = queryFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof ParseException) {
            throw (ParseException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        queryFuture.cancel(false);
    }
    TopDocs docs = searcher.search(query, new PagePermissionFilter(visibleDocIds), HITS_PER_PAGE * page);

    int start = HITS_PER_PAGE * (page - 1);
    int end = Math.min(HITS_PER_PAGE * page, docs.scoreDocs.length);
    IndexReader reader = searcher.getIndexReader();
    List<ListenableFuture<SearchHit>> hitFutures = Lists.newArrayList();
    for (int i = start; i < end; i++) {
        ListenableFuture<SearchHit> hitFuture = taskExecutor
                .submit(new GetSearchHitTask(query, reader, docs.scoreDocs[i].doc, analyzer));
        hitFutures.add(hitFuture);
    }

    try {
        ListenableFuture<List<SearchHit>> allHitsFuture = Futures.allAsList(hitFutures);
        List<SearchHit> hits = allHitsFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        return new SearchResult(hits, docs.totalHits, HITS_PER_PAGE);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        for (ListenableFuture<SearchHit> hitFuture : hitFutures) {
            hitFuture.cancel(false);
        }
    }
}

From source file:io.druid.indexing.common.task.AppenderatorDriverRealtimeIndexTask.java

private void waitForSegmentPublishAndHandoff(long timeout)
        throws InterruptedException, ExecutionException, TimeoutException {
    if (!pendingHandoffs.isEmpty()) {
        ListenableFuture<?> allHandoffs = Futures.allAsList(pendingHandoffs);
        log.info("Waiting for handoffs");

        if (timeout > 0) {
            allHandoffs.get(timeout, TimeUnit.MILLISECONDS);
        } else {//from w w  w  .  j  a v a 2 s.co m
            allHandoffs.get();
        }
    }
}

From source file:co.cask.cdap.cli.command.GetStreamStatsCommand.java

@Override
public void perform(Arguments arguments, PrintStream output) throws Exception {
    long currentTime = System.currentTimeMillis();

    Id.Stream streamId = Id.Stream.from(cliConfig.getCurrentNamespace(),
            arguments.get(ArgumentName.STREAM.toString()));
    // limit limit to [1, MAX_LIMIT]
    int limit = Math.max(1,
            Math.min(MAX_LIMIT, arguments.getInt(ArgumentName.LIMIT.toString(), DEFAULT_LIMIT)));
    long startTime = getTimestamp(arguments.get(ArgumentName.START_TIME.toString(), "min"), currentTime);
    long endTime = getTimestamp(arguments.get(ArgumentName.END_TIME.toString(), "max"), currentTime);

    // hack to validate streamId
    StreamProperties config = streamClient.getConfig(streamId);
    if (config.getFormat().getName().equals("text")) {
        output.printf("No schema found for stream '%s'", streamId.getId());
        output.println();//from   w  w w .j a va 2 s.c o  m
        return;
    }

    // build processorMap: Hive column name -> StatsProcessor
    Map<String, Set<StatsProcessor>> processorMap = Maps.newHashMap();
    Schema streamSchema = config.getFormat().getSchema();
    for (Schema.Field field : streamSchema.getFields()) {
        Schema fieldSchema = field.getSchema();
        String hiveColumnName = cdapSchemaColumName2HiveColumnName(streamId, field.getName());
        processorMap.put(hiveColumnName,
                getProcessorsForType(fieldSchema.getType(), fieldSchema.getUnionSchemas()));
    }

    // get a list of stream events and calculates various statistics about the events
    String timestampCol = getTimestampHiveColumn(streamId);
    ListenableFuture<ExploreExecutionResult> resultsFuture = queryClient.execute(streamId.getNamespace(),
            "SELECT * FROM " + getHiveTableName(streamId) + " WHERE " + timestampCol + " BETWEEN " + startTime
                    + " AND " + endTime + " LIMIT " + limit);
    ExploreExecutionResult results = resultsFuture.get(1, TimeUnit.MINUTES);
    List<ColumnDesc> schema = results.getResultSchema();

    // apply StatsProcessors to every element in every row
    int rows = 0;
    while (results.hasNext()) {
        rows++;
        QueryResult row = results.next();
        for (int i = 0; i < row.getColumns().size(); i++) {
            Object column = row.getColumns().get(i);
            ColumnDesc columnDesc = schema.get(i);
            String columnName = columnDesc.getName();
            if (isUserHiveColumn(streamId, columnName)) {
                Set<StatsProcessor> processors = processorMap.get(columnName);
                if (processors != null) {
                    for (StatsProcessor processor : processors) {
                        processor.process(column);
                    }
                }
            }
        }
    }

    // print report
    for (ColumnDesc columnDesc : schema) {
        if (isUserHiveColumn(streamId, columnDesc.getName())) {
            String truncatedColumnName = getTruncatedColumnName(streamId, columnDesc.getName());
            output.printf("column: %s, type: %s", truncatedColumnName, columnDesc.getType());
            output.println();
            Set<StatsProcessor> processors = processorMap.get(columnDesc.getName());
            if (processors != null && !processors.isEmpty()) {
                for (StatsProcessor processor : processors) {
                    processor.printReport(output);
                }
                output.println();
            } else {
                output.println("No statistics available");
                output.println();
            }
        }
    }

    output.printf("Analyzed %d Stream events in the time range [%d, %d]...", rows, startTime, endTime);
    output.println();
    output.println();
}

From source file:com.github.jarlakxen.embedphantomjs.executor.PhantomJSFileExecutor.java

public ListenableFuture<String> execute(final File sourceFile, final String... args) {
    final String cmd = this.phantomReference.getBinaryPath() + " " + sourceFile.getAbsolutePath() + " "
            + StringUtils.join(args, " ");
    try {// www .  ja  va2s  .c  o m
        final Process process = Runtime.getRuntime().exec(cmd);
        LOGGER.info("Command to execute: " + cmd);

        final ListenableFuture<String> action = processExecutorService.submit(new Callable<String>() {
            @Override
            public String call() throws Exception {
                LOGGER.info("Command to execute: " + cmd);
                String output = IOUtils.toString(process.getInputStream());
                process.waitFor();
                LOGGER.debug("Command " + cmd + " output:" + output);
                return output;
            }
        });

        timeoutExecutorService.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    action.get(executionTimeout.getTimeout(), executionTimeout.getUnit());
                } catch (Exception e) {
                    action.cancel(false);
                    process.destroy();
                }
            }
        });

        return action;

    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:co.cask.cdap.internal.app.runtime.adapter.AdapterService.java

private ApplicationTemplateInfo getTemplateInfo(File jarFile)
        throws InterruptedException, ExecutionException, TimeoutException, IOException {
    ApplicationTemplateInfo existing = fileToTemplateMap.get().get(jarFile.getAbsoluteFile());
    HashCode fileHash = Files.hash(jarFile, Hashing.md5());
    // if the file is the same, just return
    if (existing != null && fileHash.equals(existing.getFileHash())) {
        return existing;
    }/* w  w w . j  a v  a  2 s.  c  o m*/

    // instantiate the template application and call configure() on it to determine it's specification
    InMemoryConfigurator configurator = new InMemoryConfigurator(
            new LocalLocationFactory().create(jarFile.toURI()), null);
    ListenableFuture<ConfigResponse> result = configurator.config();
    ConfigResponse response = result.get(2, TimeUnit.MINUTES);
    InputSupplier<? extends Reader> configSupplier = response.get();
    if (response.getExitCode() != 0 || configSupplier == null) {
        throw new IllegalArgumentException("Failed to get template info");
    }
    ApplicationSpecification spec;
    try (Reader configReader = configSupplier.getInput()) {
        spec = GSON.fromJson(configReader, ApplicationSpecification.class);
    }

    // verify that the name is ok
    Id.Application.from(Constants.DEFAULT_NAMESPACE_ID, spec.getName());

    // determine the program type of the template
    ProgramType programType;
    int numWorkflows = spec.getWorkflows().size();
    int numWorkers = spec.getWorkers().size();
    if (numWorkers == 0 && numWorkflows == 1) {
        programType = ProgramType.WORKFLOW;
    } else if (numWorkers == 1 && numWorkflows == 0) {
        programType = ProgramType.WORKER;
    } else {
        throw new IllegalArgumentException(
                "An application template must contain exactly one worker or one workflow.");
    }

    return new ApplicationTemplateInfo(jarFile, spec.getName(), spec.getDescription(), programType, fileHash);
}

From source file:org.apache.druid.query.AsyncQueryRunner.java

@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) {
    final Query<T> query = queryPlus.getQuery();
    final int priority = QueryContexts.getPriority(query);
    final QueryPlus<T> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState();
    final ListenableFuture<Sequence<T>> future = executor
            .submit(new AbstractPrioritizedCallable<Sequence<T>>(priority) {
                @Override//from   w  w w  .ja va 2  s. co  m
                public Sequence<T> call() {
                    //Note: this is assumed that baseRunner does most of the work eagerly on call to the
                    //run() method and resulting sequence accumulate/yield is fast.
                    return baseRunner.run(threadSafeQueryPlus, responseContext);
                }
            });
    queryWatcher.registerQuery(query, future);

    return new LazySequence<>(new Supplier<Sequence<T>>() {
        @Override
        public Sequence<T> get() {
            try {
                if (QueryContexts.hasTimeout(query)) {
                    return future.get(QueryContexts.getTimeout(query), TimeUnit.MILLISECONDS);
                } else {
                    return future.get();
                }
            } catch (ExecutionException | InterruptedException | TimeoutException ex) {
                throw Throwables.propagate(ex);
            }
        }
    });
}