Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;

Source Link

Document

Waits if necessary for at most the given time for the computation to complete, and then retrieves its result, if available.

Usage

From source file:org.onosproject.incubator.rpc.grpc.LinkProviderServiceClientProxy.java

@Override
public void linksVanished(DeviceId deviceId) {
    checkValidity();/* w ww. j a va 2  s .  c  o m*/

    LinkProviderServiceRpcFutureStub newStub = LinkProviderServiceRpcGrpc.newFutureStub(channel);
    ListenableFuture<Void> future = newStub.linkVanished(vanishMsg(provider().id(), deviceId));

    try {
        // There's no need to wait, but just checking server
        future.get(500, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        log.error("linksVanished({}) failed", deviceId, e);
        invalidate();
        Thread.currentThread().interrupt();
    } catch (ExecutionException | TimeoutException e) {
        log.error("linksVanished({}) failed", deviceId, e);
        invalidate();
    }
}

From source file:org.onosproject.incubator.rpc.grpc.LinkProviderServiceClientProxy.java

@Override
public void linksVanished(ConnectPoint connectPoint) {
    checkValidity();/*  w  w  w.  j ava2 s .c om*/

    LinkProviderServiceRpcFutureStub newStub = LinkProviderServiceRpcGrpc.newFutureStub(channel);
    ListenableFuture<Void> future = newStub.linkVanished(vanishMsg(provider().id(), connectPoint));

    try {
        // There's no need to wait, but just checking server
        future.get(500, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        log.error("linksVanished({}) failed", connectPoint, e);
        invalidate();
        Thread.currentThread().interrupt();
    } catch (ExecutionException | TimeoutException e) {
        log.error("linksVanished({}) failed", connectPoint, e);
        invalidate();
    }
}

From source file:org.onosproject.incubator.rpc.grpc.LinkProviderServiceClientProxy.java

@Override
public void linkDetected(LinkDescription linkDescription) {
    checkValidity();//from   www  . j  a v a 2  s. c  o  m

    LinkProviderServiceRpcFutureStub newStub = LinkProviderServiceRpcGrpc.newFutureStub(channel);
    ListenableFuture<Void> future = newStub.linkDetected(detectMsg(provider().id(), linkDescription));

    try {
        // There's no need to wait, but just checking server
        future.get(500, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        log.error("linkDetected({}) failed", linkDescription, e);
        invalidate();
        Thread.currentThread().interrupt();
    } catch (ExecutionException | TimeoutException e) {
        log.error("linkDetected({}) failed", linkDescription, e);
        invalidate();
    }
}

From source file:org.onosproject.incubator.rpc.grpc.LinkProviderServiceClientProxy.java

@Override
public void linkVanished(LinkDescription linkDescription) {
    checkValidity();/*  www . java  2  s . c o  m*/

    LinkProviderServiceRpcFutureStub newStub = LinkProviderServiceRpcGrpc.newFutureStub(channel);
    ListenableFuture<Void> future = newStub.linkVanished(vanishMsg(provider().id(), linkDescription));

    try {
        // There's no need to wait, but just checking server
        future.get(500, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        log.error("linkVanished({}) failed", linkDescription, e);
        invalidate();
        Thread.currentThread().interrupt();
    } catch (ExecutionException | TimeoutException e) {
        log.error("linkVanished({}) failed", linkDescription, e);
        invalidate();
    }
}

From source file:eu.eubrazilcc.lvl.service.io.ImportPublicationsTask.java

/**
 * Imports publications from external databases into the application's database.
 *///from w  ww  .j  a  v  a2s .c o m
private Callable<Integer> importPublicationsTask() {
    return new Callable<Integer>() {
        @Override
        public Integer call() throws Exception {
            LOGGER.info("Importing new publications from: " + DATABASES);
            int count = 0;
            final File tmpDir = createTmpDir();
            try (final EntrezHelper entrez = EntrezHelper.create()) {
                final List<ListenableFuture<Integer>> subTasks = newArrayList();
                for (final String db : DATABASES) {
                    if (PUBMED.equals(db)) {
                        subTasks.addAll(importPubMedSubTasks(entrez, tmpDir));
                    } else {
                        throw new IllegalArgumentException("Unsupported database: " + db);
                    }
                }
                final ListenableFuture<List<Integer>> globalTask = successfulAsList(subTasks);
                final List<Integer> results = globalTask.get(TIMEOUT_MINUTES, MINUTES);
                for (final Integer result : results) {
                    if (result != null) {
                        count += result;
                    } else {
                        setHasErrors(true);
                        setStatus("Error while importing publications: not all publications were imported");
                    }
                }
            } catch (InterruptedException ie) {
                // ignore and propagate
                LOGGER.warn("Publication import was interrupted, exiting");
                throw ie;
            } catch (Exception e) {
                setHasErrors(true);
                setStatus("Uncaught error while importing publications: not all publications were imported");
                LOGGER.error("Uncaught error while importing publications", e);
            } finally {
                deleteQuietly(tmpDir);
            }
            final String msg = count + " new publications were imported from: " + on(", ").join(DATABASES);
            if (!hasErrors()) {
                setStatus(msg);
                LOGGER.info(msg);
            } else {
                LOGGER.warn(msg + " - errors reported");
            }
            NOTIFICATION_MANAGER
                    .broadcast(Notification.builder().scope(DATA_CURATOR_ROLE).message(msg).build());
            // unregister this task before returning the result to the execution service
            TASK_STORAGE.remove(getUuid());
            return new Integer(count);
        }
    };
}

From source file:c5db.replication.InRamSim.java

public void start(Collection<Long> initialPeers)
        throws ExecutionException, InterruptedException, TimeoutException {
    createAndStartReplicators(initialPeers);

    rpcFiber.start();//from   w  ww. j  a v a2s .  com

    // bootstrap ALL the replicators, collect their futures and skip the null ones.
    List<ListenableFuture<Void>> futures = replicators.values().stream()
            .map(repl -> repl.bootstrapQuorum(peerIds)).filter(future -> future != null)
            .collect(Collectors.toList());

    for (ListenableFuture<Void> aFuture : futures) {
        LOG.debug("Waiting for bootstrap");
        aFuture.get(4, TimeUnit.SECONDS);
    }

    //    pickAReplicator().bootstrapQuorum(peerIds).get(4, TimeUnit.SECONDS);
}

From source file:org.jclouds.blobstore.strategy.internal.DeleteAllKeysInList.java

public void execute(final String containerName, ListContainerOptions options) {
    String message = options.getDir() != null
            ? String.format("clearing path %s/%s", containerName, options.getDir())
            : String.format("clearing container %s", containerName);
    options = options.clone();//  w  w w  . j  a v  a 2s. co m
    if (options.isRecursive())
        message += " recursively";
    logger.debug(message);
    Map<StorageMetadata, Exception> exceptions = Maps.newHashMap();
    for (int numErrors = 0; numErrors < maxErrors;) {
        // fetch partial directory listing
        PageSet<? extends StorageMetadata> listing;
        ListenableFuture<PageSet<? extends StorageMetadata>> listFuture = connection.list(containerName,
                options);
        try {
            listing = listFuture.get(maxTime, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
            break;
        } catch (ExecutionException ee) {
            ++numErrors;
            if (numErrors == maxErrors) {
                throw propagate(ee.getCause());
            }
            retryHandler.imposeBackoffExponentialDelay(numErrors, message);
            continue;
        } catch (TimeoutException te) {
            ++numErrors;
            if (numErrors == maxErrors) {
                throw propagate(te);
            }
            retryHandler.imposeBackoffExponentialDelay(numErrors, message);
            continue;
        } finally {
            listFuture.cancel(true);
        }

        // recurse on subdirectories
        if (options.isRecursive()) {
            for (StorageMetadata md : listing) {
                String fullPath = parentIsFolder(options, md) ? options.getDir() + "/" + md.getName()
                        : md.getName();
                switch (md.getType()) {
                case BLOB:
                    break;
                case FOLDER:
                case RELATIVE_PATH:
                    if (options.isRecursive() && !fullPath.equals(options.getDir())) {
                        execute(containerName, options.clone().inDirectory(fullPath));
                    }
                    break;
                case CONTAINER:
                    throw new IllegalArgumentException("Container type not supported");
                }
            }
        }

        // remove blobs and now-empty subdirectories
        Map<StorageMetadata, ListenableFuture<?>> responses = Maps.newHashMap();
        for (StorageMetadata md : listing) {
            String fullPath = parentIsFolder(options, md) ? options.getDir() + "/" + md.getName()
                    : md.getName();
            switch (md.getType()) {
            case BLOB:
                responses.put(md, connection.removeBlob(containerName, fullPath));
                break;
            case FOLDER:
                if (options.isRecursive()) {
                    responses.put(md, connection.deleteDirectory(containerName, fullPath));
                }
                break;
            case RELATIVE_PATH:
                if (options.isRecursive()) {
                    responses.put(md, connection.deleteDirectory(containerName, md.getName()));
                }
                break;
            case CONTAINER:
                throw new IllegalArgumentException("Container type not supported");
            }
        }

        try {
            exceptions = awaitCompletion(responses, userExecutor, maxTime, logger, message);
        } catch (TimeoutException te) {
            ++numErrors;
            if (numErrors == maxErrors) {
                throw propagate(te);
            }
            retryHandler.imposeBackoffExponentialDelay(numErrors, message);
            continue;
        } finally {
            for (ListenableFuture<?> future : responses.values()) {
                future.cancel(true);
            }
        }

        if (!exceptions.isEmpty()) {
            ++numErrors;
            if (numErrors == maxErrors) {
                break;
            }
            retryHandler.imposeBackoffExponentialDelay(numErrors, message);
            continue;
        }

        String marker = listing.getNextMarker();
        if (marker == null) {
            break;
        }
        logger.debug("%s with marker %s", message, marker);
        options = options.afterMarker(marker);

        // Reset numErrors if we execute a successful iteration.  This ensures
        // that we only try an unsuccessful operation maxErrors times but
        // allow progress with directories containing many blobs in the face
        // of some failures.
        numErrors = 0;
    }
    if (!exceptions.isEmpty())
        throw new BlobRuntimeException(String.format("error %s: %s", message, exceptions));
}

From source file:org.jclouds.openstack.swift.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

@Override
public ListenableFuture<String> execute(final String container, final Blob blob, final PutOptions options,
        final BlobToObject blob2Object) {
    return ioExecutor.submit(new Callable<String>() {
        @Override//from w w  w  . ja va2  s .  c o  m
        public String call() throws Exception {
            String key = blob.getMetadata().getName();
            Payload payload = blob.getPayload();
            MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
            algorithm.calculateChunkSize(payload.getContentMetadata().getContentLength());
            int parts = algorithm.getParts();
            long chunkSize = algorithm.getChunkSize();
            long remaining = algorithm.getRemaining();
            if (parts > 0) {
                CommonSwiftClient client = ablobstore.getContext().unwrap(SwiftApiMetadata.CONTEXT_TOKEN)
                        .getApi();
                final Map<Integer, ListenableFuture<String>> futureParts = new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                final Map<Integer, Exception> errorMap = Maps.newHashMap();
                AtomicInteger errors = new AtomicInteger(0);
                int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                int effectiveParts = remaining > 0 ? parts + 1 : parts;
                try {
                    logger.debug(String.format(
                            "initiated multipart upload of %s to container %s"
                                    + " consisting from %s part (possible max. retries: %d)",
                            key, container, effectiveParts, maxRetries));
                    // we need a bounded-blocking queue to control the amount of parallel jobs
                    ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                    Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                    SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                    CountDownLatch latch = new CountDownLatch(effectiveParts);
                    int part;
                    while ((part = algorithm.getNextPart()) <= parts) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);

                        prepareUploadPart(container, blob, key, partKey, payload,
                                algorithm.getNextChunkOffset(), chunkSize, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch, blob2Object);
                    }
                    if (remaining > 0) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);
                        prepareUploadPart(container, blob, key, partKey, payload,
                                algorithm.getNextChunkOffset(), remaining, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch, blob2Object);
                    }
                    latch.await();
                    // handling retries
                    while (errors.get() <= maxRetries && toRetry.size() > 0) {
                        int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                        CountDownLatch retryLatch = new CountDownLatch(atOnce);
                        for (int i = 0; i < atOnce; i++) {
                            Part failedPart = toRetry.poll();
                            Integer partKey = Integer.valueOf(failedPart.getPart());
                            activeParts.put(partKey);
                            prepareUploadPart(container, blob, key, partKey, payload, failedPart.getOffset(),
                                    failedPart.getSize(), etags, activeParts, futureParts, errors, maxRetries,
                                    errorMap, toRetry, retryLatch, blob2Object);
                        }
                        retryLatch.await();
                    }
                    if (errors.get() > maxRetries) {
                        throw new BlobRuntimeException(String.format(
                                "Too many failed parts: %s while multipart upload of %s to container %s",
                                errors.get(), key, container));
                    }

                    String eTag = client.putObjectManifest(container, key);
                    logger.debug(String.format(
                            "multipart upload of %s to container %s" + " successfully finished with %s retries",
                            key, container, errors.get()));
                    return eTag;
                } catch (Exception ex) {
                    RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                    if (rtex == null) {
                        rtex = new RuntimeException(ex);
                    }
                    for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                        entry.getValue().cancel(false);
                    }
                    /*
                    if (uploadId != null) {
                        client.abortMultipartUpload(container, key, uploadId);
                    } */
                    throw rtex;
                }
            } else {
                ListenableFuture<String> futureETag = ablobstore.putBlob(container, blob, options);
                return maxTime != null ? futureETag.get(maxTime, TimeUnit.SECONDS) : futureETag.get();
            }
        }
    });
}

From source file:org.jclouds.aws.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

@Override
public ListenableFuture<String> execute(final String container, final Blob blob, final PutOptions options) {
    return ioExecutor.submit(new Callable<String>() {
        @Override/*from  w  w w  . ja  va  2s  .c om*/
        public String call() throws Exception {
            String key = blob.getMetadata().getName();
            Payload payload = blob.getPayload();
            MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm();
            algorithm.calculateChunkSize(payload.getContentMetadata().getContentLength());
            int parts = algorithm.getParts();
            long chunkSize = algorithm.getChunkSize();
            long remaining = algorithm.getRemaining();
            if (parts > 0) {
                AWSS3Client client = ablobstore.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi();
                String uploadId = null;
                final Map<Integer, ListenableFuture<String>> futureParts = new ConcurrentHashMap<Integer, ListenableFuture<String>>();
                final Map<Integer, Exception> errorMap = Maps.newHashMap();
                AtomicInteger errors = new AtomicInteger(0);
                int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
                int effectiveParts = remaining > 0 ? parts + 1 : parts;
                try {
                    uploadId = client.initiateMultipartUpload(container,
                            ObjectMetadataBuilder.create().key(key).build()); // TODO md5
                    logger.debug(String.format(
                            "initiated multipart upload of %s to container %s"
                                    + " with uploadId %s consisting from %s part (possible max. retries: %d)",
                            key, container, uploadId, effectiveParts, maxRetries));
                    // we need a bounded-blocking queue to control the amount of parallel jobs 
                    ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
                    Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
                    SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
                    CountDownLatch latch = new CountDownLatch(effectiveParts);
                    int part;
                    while ((part = algorithm.getNextPart()) <= parts) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);
                        prepareUploadPart(container, key, uploadId, partKey, payload,
                                algorithm.getNextChunkOffset(), chunkSize, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch);
                    }
                    if (remaining > 0) {
                        Integer partKey = Integer.valueOf(part);
                        activeParts.put(partKey);
                        prepareUploadPart(container, key, uploadId, partKey, payload,
                                algorithm.getNextChunkOffset(), remaining, etags, activeParts, futureParts,
                                errors, maxRetries, errorMap, toRetry, latch);
                    }
                    latch.await();
                    // handling retries
                    while (errors.get() <= maxRetries && toRetry.size() > 0) {
                        int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
                        CountDownLatch retryLatch = new CountDownLatch(atOnce);
                        for (int i = 0; i < atOnce; i++) {
                            Part failedPart = toRetry.poll();
                            Integer partKey = Integer.valueOf(failedPart.getPart());
                            activeParts.put(partKey);
                            prepareUploadPart(container, key, uploadId, partKey, payload,
                                    failedPart.getOffset(), failedPart.getSize(), etags, activeParts,
                                    futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
                        }
                        retryLatch.await();
                    }
                    if (errors.get() > maxRetries) {
                        throw new BlobRuntimeException(String.format(
                                "Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
                                errors.get(), key, container, uploadId));
                    }
                    String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
                    logger.debug(String.format(
                            "multipart upload of %s to container %s with uploadId %s"
                                    + " successfully finished with %s retries",
                            key, container, uploadId, errors.get()));
                    return eTag;
                } catch (Exception ex) {
                    RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
                    if (rtex == null) {
                        rtex = new RuntimeException(ex);
                    }
                    for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
                        entry.getValue().cancel(false);
                    }
                    if (uploadId != null) {
                        client.abortMultipartUpload(container, key, uploadId);
                    }
                    throw rtex;
                }
            } else {
                // Issue 936: don't just call putBlob, as that will see options=multiPart and 
                // recursively call this execute method again; instead mark as not multipart
                // because it can all fit in one go.
                PutOptions nonMultipartOptions = PutOptions.Builder.multipart(false);
                ListenableFuture<String> futureETag = ablobstore.putBlob(container, blob, nonMultipartOptions);
                return maxTime != null ? futureETag.get(maxTime, TimeUnit.SECONDS) : futureETag.get();
            }
        }
    });
}

From source file:eu.eubrazilcc.lvl.service.io.ImportSequencesTask.java

/**
 * Imports sequences from external databases into the application's database.
 *///from  w w  w  . ja v  a 2  s .  co m
private Callable<Integer> importSequencesTask() {
    return new Callable<Integer>() {
        @Override
        public Integer call() throws Exception {
            LOGGER.info("Importing new sequences from: " + DATABASES);
            int count = 0;
            final File tmpDir = createTmpDir();
            try (final EntrezHelper entrez = EntrezHelper.create()) {
                final List<ListenableFuture<Integer>> subTasks = newArrayList();
                for (final String db : DATABASES) {
                    if (GENBANK.equals(db)) {
                        subTasks.addAll(importGenBankSubTasks(entrez, tmpDir));
                    } else {
                        throw new IllegalArgumentException("Unsupported database: " + db);
                    }
                }
                final ListenableFuture<List<Integer>> globalTask = successfulAsList(subTasks);
                final List<Integer> results = globalTask.get(TIMEOUT_MINUTES, MINUTES);
                for (final Integer result : results) {
                    if (result != null) {
                        count += result;
                    } else {
                        setHasErrors(true);
                        setStatus("Error while importing sequences: not all sequences were imported");
                    }
                }
            } catch (InterruptedException ie) {
                // ignore and propagate
                LOGGER.warn("Sequence import was interrupted, exiting");
                throw ie;
            } catch (Exception e) {
                setHasErrors(true);
                setStatus("Uncaught error while importing sequences: not all sequences were imported");
                LOGGER.error("Uncaught error while importing sequences", e);
            } finally {
                deleteQuietly(tmpDir);
            }
            final String msg = count + " new sequences were imported from: " + on(", ").join(DATABASES);
            if (!hasErrors()) {
                setStatus(msg);
                LOGGER.info(msg);
            } else {
                LOGGER.warn(msg + " - errors reported");
            }
            NOTIFICATION_MANAGER
                    .broadcast(Notification.builder().scope(DATA_CURATOR_ROLE).message(msg).build());
            // schedule publication import
            final ImportPublicationsTask importPublicationsTask = ImportPublicationsTask.builder()
                    .filter(NewReferenceFilter.builder().build()).pmids(pmids).build();
            importPublicationsTaskId = importPublicationsTask.getUuid();
            TASK_RUNNER.execute(importPublicationsTask);
            TASK_STORAGE.add(importPublicationsTask);
            // unregister this task before returning the result to the execution service
            TASK_STORAGE.remove(getUuid());
            return new Integer(count);
        }
    };
}