Example usage for com.google.common.util.concurrent Futures transform

List of usage examples for com.google.common.util.concurrent Futures transform

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures transform.

Prototype

public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
        Function<? super I, ? extends O> function) 

Source Link

Document

Returns a new ListenableFuture whose result is the product of applying the given Function to the result of the given Future .

Usage

From source file:org.thingsboard.server.dao.alarm.BaseAlarmService.java

@Override
public ListenableFuture<TimePageData<AlarmInfo>> findAlarms(TenantId tenantId, AlarmQuery query) {
    ListenableFuture<List<AlarmInfo>> alarms = alarmDao.findAlarms(tenantId, query);
    if (query.getFetchOriginator() != null && query.getFetchOriginator().booleanValue()) {
        alarms = Futures.transformAsync(alarms, input -> {
            List<ListenableFuture<AlarmInfo>> alarmFutures = new ArrayList<>(input.size());
            for (AlarmInfo alarmInfo : input) {
                alarmFutures.add(Futures.transform(
                        entityService.fetchEntityNameAsync(tenantId, alarmInfo.getOriginator()),
                        originatorName -> {
                            if (originatorName == null) {
                                originatorName = "Deleted";
                            }/*from  w  w w  . ja  va2 s  .  co  m*/
                            alarmInfo.setOriginatorName(originatorName);
                            return alarmInfo;
                        }));
            }
            return Futures.successfulAsList(alarmFutures);
        });
    }
    return Futures.transform(alarms, new Function<List<AlarmInfo>, TimePageData<AlarmInfo>>() {
        @Nullable
        @Override
        public TimePageData<AlarmInfo> apply(@Nullable List<AlarmInfo> alarms) {
            return new TimePageData<>(alarms, query.getPageLink());
        }
    });
}

From source file:com.metamx.rdiclient.RdiClientImpl.java

private ListenableFuture<HttpResponseStatus> retryingPost(final RequestBuilder request, final int attempt,
        final int maxRetries) {
    final SettableFuture<HttpResponseStatus> retVal = SettableFuture.create();
    final ListenableFuture<HttpResponseStatus> response = Futures.transform(
            request.go(new StatusResponseHandler(Charsets.UTF_8)),
            new AsyncFunction<StatusResponseHolder, HttpResponseStatus>() {
                @Override//from  w  w w  .  j a  v a 2 s .  c  o  m
                public ListenableFuture<HttpResponseStatus> apply(StatusResponseHolder result)
                        throws Exception {
                    // Throw an RdiHttpResponseException in case of unexpected HTTP status codes.
                    if (result.getStatus().getCode() / 100 == 2) {
                        return Futures.immediateFuture(result.getStatus());
                    } else {
                        return Futures.immediateFailedFuture(new RdiHttpResponseException(result));
                    }
                }
            });
    Futures.addCallback(response, new FutureCallback<HttpResponseStatus>() {
        @Override
        public void onSuccess(HttpResponseStatus result) {
            retVal.set(result);
        }

        @Override
        public void onFailure(Throwable e) {
            final boolean shouldRetry;
            if (maxRetries <= 0) {
                shouldRetry = false;
            } else if (e instanceof IOException || e instanceof ChannelException) {
                shouldRetry = true;
            } else if (e instanceof RdiHttpResponseException) {
                final int statusCode = ((RdiHttpResponseException) e).getStatusCode();
                shouldRetry = statusCode / 100 == 5 || (statusCode / 100 == 4 && statusCode != 400);
            } else {
                shouldRetry = false;
            }

            if (shouldRetry) {
                final long sleepMillis = retryDuration(attempt);
                log.warn(e, "Failed try #%d, retrying in %,dms (%,d tries left).", attempt + 1, sleepMillis,
                        maxRetries);
                retryExecutor.schedule(new Runnable() {
                    @Override
                    public void run() {
                        final ListenableFuture<HttpResponseStatus> nextTry = retryingPost(request, attempt + 1,
                                maxRetries - 1);
                        Futures.addCallback(nextTry, new FutureCallback<HttpResponseStatus>() {
                            @Override
                            public void onSuccess(HttpResponseStatus result2) {
                                retVal.set(result2);
                            }

                            @Override
                            public void onFailure(Throwable e2) {
                                retVal.setException(e2);
                            }
                        });
                    }
                }, sleepMillis, TimeUnit.MILLISECONDS);
            } else if (e instanceof RdiException || e instanceof Error) {
                retVal.setException(e);
            } else {
                retVal.setException(new RdiException(String
                        .format("Got exception when posting events to urlString[%s].", config.getRdiUrl()), e));
            }
        }
    });

    return retVal;
}

From source file:org.opendaylight.controller.sample.toaster.provider.OpendaylightToaster.java

private void checkStatusAndMakeToast(final MakeToastInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {

    // Read the ToasterStatus and, if currently Up, try to write the status to Down.
    // If that succeeds, then we essentially have an exclusive lock and can proceed
    // to make toast.

    final ReadWriteTransaction tx = dataProvider.newReadWriteTransaction();
    ListenableFuture<Optional<Toaster>> readFuture = tx.read(OPERATIONAL, TOASTER_IID);

    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            (AsyncFunction<Optional<Toaster>, Void>) toasterData -> {

                ToasterStatus toasterStatus = ToasterStatus.Up;
                if (toasterData.isPresent()) {
                    toasterStatus = toasterData.get().getToasterStatus();
                }/* w  w  w  . j a  va  2 s . c  o m*/

                LOG.debug("Read toaster status: {}", toasterStatus);

                if (toasterStatus == ToasterStatus.Up) {

                    if (outOfBread()) {
                        LOG.debug("Toaster is out of bread");

                        return Futures.immediateFailedCheckedFuture(
                                new TransactionCommitFailedException("", makeToasterOutOfBreadError()));
                    }

                    LOG.debug("Setting Toaster status to Down");

                    // We're not currently making toast - try to update the status to Down
                    // to indicate we're going to make toast. This acts as a lock to prevent
                    // concurrent toasting.
                    tx.put(OPERATIONAL, TOASTER_IID, buildToaster(ToasterStatus.Down));
                    return tx.submit();
                }

                LOG.debug("Oops - already making toast!");

                // Return an error since we are already making toast. This will get
                // propagated to the commitFuture below which will interpret the null
                // TransactionStatus in the RpcResult as an error condition.
                return Futures.immediateFailedCheckedFuture(
                        new TransactionCommitFailedException("", makeToasterInUseError()));
            });

    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onSuccess(final Void result) {
            // OK to make toast
            currentMakeToastTask.set(executor.submit(new MakeToastTask(input, futureResult)));
        }

        @Override
        public void onFailure(final Throwable ex) {
            if (ex instanceof OptimisticLockFailedException) {

                // Another thread is likely trying to make toast simultaneously and updated the
                // status before us. Try reading the status again - if another make toast is
                // now in progress, we should get ToasterStatus.Down and fail.

                if ((tries - 1) > 0) {
                    LOG.debug("Got OptimisticLockFailedException - trying again");

                    checkStatusAndMakeToast(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, ex.getMessage()).build());
                }

            } else {

                LOG.debug("Failed to commit Toaster status", ex);

                // Probably already making toast.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) ex).getErrorList()).build());
            }
        }
    });
}

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.PolicyManager.java

private ListenableFuture<Void> deleteTableIfExists(final ReadWriteTransaction rwTx,
        final InstanceIdentifier<Table> tablePath) {
    return Futures.transform(rwTx.read(LogicalDatastoreType.CONFIGURATION, tablePath),
            new Function<Optional<Table>, Void>() {

                @Override//  w w w  .ja  v  a 2  s. c o m
                public Void apply(Optional<Table> optTable) {
                    if (optTable.isPresent()) {
                        rwTx.delete(LogicalDatastoreType.CONFIGURATION, tablePath);
                    }
                    return null;
                }
            });
}

From source file:org.opendaylight.ovsdb.lib.impl.OvsdbClientImpl.java

private void populateSchema(final List<String> dbNames, final Map<String, DatabaseSchema> schema,
        final SettableFuture<Map<String, DatabaseSchema>> sfuture) {

    if (dbNames == null || dbNames.isEmpty()) {
        return;/*from   ww w.  ja  v  a 2s .  c  o  m*/
    }

    Futures.transform(rpc.get_schema(Lists.newArrayList(dbNames.get(0))),
            new com.google.common.base.Function<JsonNode, Void>() {
                @Override
                public Void apply(JsonNode jsonNode) {
                    try {
                        schema.put(dbNames.get(0), DatabaseSchema.fromJson(dbNames.get(0), jsonNode));
                        if (schema.size() > 1 && !sfuture.isCancelled()) {
                            populateSchema(dbNames.subList(1, dbNames.size()), schema, sfuture);
                        } else if (schema.size() == 1) {
                            sfuture.set(schema);
                        }
                    } catch (Exception e) {
                        sfuture.setException(e);
                    }
                    return null;
                }
            });
}

From source file:zipkin.storage.elasticsearch.ElasticsearchSpanStore.java

@Override
public ListenableFuture<List<DependencyLink>> getDependencies(long endMillis, @Nullable Long lookback) {
    long beginMillis = lookback != null ? endMillis - lookback : 0;
    // We just return all dependencies in the days that fall within endTs and lookback as
    // dependency links themselves don't have timestamps.
    Set<String> indices = indexNameFormatter.indexNamePatternsForRange(beginMillis, endMillis);
    return Futures.transform(client.findDependencies(indices.toArray(new String[0])),
            new Function<List<DependencyLink>, List<DependencyLink>>() {
                @Override/*  w  w  w  . ja  v a 2  s .c o  m*/
                public List<DependencyLink> apply(List<DependencyLink> input) {
                    return input == null ? Collections.<DependencyLink>emptyList()
                            : DependencyLinker.merge(input);
                }
            });
}

From source file:org.opendaylight.netconf.sal.connect.netconf.util.NetconfBaseOps.java

private ListenableFuture<Optional<NormalizedNode<?, ?>>> extractData(
        final Optional<YangInstanceIdentifier> path, final ListenableFuture<DOMRpcResult> configRunning) {
    return Futures.transform(configRunning, new Function<DOMRpcResult, Optional<NormalizedNode<?, ?>>>() {
        @Override/*from  w  w w .  ja  va2  s  . co  m*/
        public Optional<NormalizedNode<?, ?>> apply(final DOMRpcResult result) {
            Preconditions.checkArgument(result.getErrors().isEmpty(), "Unable to read data: %s, errors: %s",
                    path, result.getErrors());
            final DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> dataNode = ((ContainerNode) result
                    .getResult()).getChild(
                            NetconfMessageTransformUtil.toId(NetconfMessageTransformUtil.NETCONF_DATA_QNAME))
                            .get();
            return transformer.selectFromDataStructure(dataNode, path.get());
        }
    });
}

From source file:me.j360.trace.storage.elasticsearch.ElasticsearchSpanStore.java

@Override
public ListenableFuture<List<String>> getServiceNames() {
    SearchRequestBuilder elasticRequest = client.prepareSearch(indexNameFormatter.catchAll())
            .setTypes(ElasticsearchConstants.SPAN).setQuery(matchAllQuery()).setSize(0)
            .addAggregation(AggregationBuilders.terms("annotationServiceName_agg")
                    .field("annotations.endpoint.serviceName").size(0))
            .addAggregation(AggregationBuilders.nested("binaryAnnotations_agg").path("binaryAnnotations")
                    .subAggregation(AggregationBuilders.terms("binaryAnnotationsServiceName_agg")
                            .field("binaryAnnotations.endpoint.serviceName").size(0)));

    return Futures.transform(ElasticFutures.toGuava(elasticRequest.execute()),
            ConvertServiceNamesResponse.INSTANCE);
}

From source file:org.opendaylight.oven.impl.OvenProvider.java

/**
 * Read the OvenStatus and, if currently Waiting, try to write the status to Preheating. 
 * If that succeeds, then we can proceed to cook the food. 
 *
 * @param input//from w w  w . j a v  a 2s .c  o  m
 * @param futureResult
 * @param tries
 */
private void checkStatusAndCookFood(final CookFoodInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {
    /*
     * We create a ReadWriteTransaction by using the databroker. Then, we
     * read the status of the oven with getOvenStatus() using the
     * databroker again. Once we have the status, we analyze it and then
     * databroker submit function is called to effectively change the oven
     * status. This all affects the MD-SAL tree, more specifically the part
     * of the tree that contain the oven (the nodes).
     */
    LOG.info("In checkStatusAndCookFood()");
    final ReadWriteTransaction tx = db.newReadWriteTransaction();
    final ListenableFuture<Optional<OvenParams>> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL,
            OVEN_IID);
    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            new AsyncFunction<Optional<OvenParams>, Void>() {

                @Override
                public ListenableFuture<Void> apply(Optional<OvenParams> ovenParamsData) throws Exception {
                    if (ovenParamsData.isPresent()) {
                        status = ovenParamsData.get().getOvenStatus();
                    } else {
                        throw new Exception("Error reading OvenParams.status data from the store.");
                    }
                    LOG.info("Read oven status: {}", status);

                    if (status == OvenStatus.Waiting) {
                        //Check if numberOfMealAvailable is not 0, if yes Notify outOfStock
                        if (numberOfMealAvailable.get() == 0) {
                            LOG.info("No more meal availble to cook");
                            notificationProvider.publish(new KitchenOutOfFoodBuilder().build());
                            return Futures.immediateFailedCheckedFuture(
                                    new TransactionCommitFailedException("", cookNoMoreMealError()));
                        }

                        LOG.info("Setting Camera status to Preheating");
                        // We're not currently cooking food - we try to update the status to On
                        // to indicate we're going to cook food. This acts as a lock to prevent
                        // concurrent cooking.
                        tx.put(LogicalDatastoreType.OPERATIONAL, OVEN_IID,
                                buildOvenParams(OvenStatus.Preheating));
                        return tx.submit();
                    }
                    LOG.info("The oven is actually on use, cancel actual program before.");
                    // Return an error since we are already cooking food. This will get
                    // propagated to the commitFuture below which will interpret the null
                    // TransactionStatus in the RpcResult as an error condition.
                    return Futures.immediateFailedCheckedFuture(
                            new TransactionCommitFailedException("", cookOvenInUseError()));
                }

                private RpcError cookNoMoreMealError() {
                    return RpcResultBuilder.newError(ErrorType.APPLICATION, "resource-denied",
                            "No more food available to cook", "out-of-stock", null, null);
                }
            });
    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onFailure(Throwable t) {
            if (t instanceof OptimisticLockFailedException) {
                // Another thread is likely trying to cook food simultaneously and updated the
                // status before us. Try reading the status again - if another cookFood is
                // now in progress, we should get OvenStatus.Waiting and fail.
                if ((tries - 1) > 0) {
                    LOG.info("Got OptimisticLockFailedException - trying again");
                    checkStatusAndCookFood(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, t.getMessage()).build());
                }
            } else {
                LOG.info("Failed to commit Oven status", t);
                // Probably already cooking.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) t).getErrorList()).build());
            }
        }

        @Override
        public void onSuccess(Void result) {
            // OK to cook
            currentCookingMealTask.set(executor.submit(new CookMealTask(input, futureResult)));

        }

    });
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchRefresh.java

@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
    final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
    final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(),
            qNames, excludeQualifiedNames);
    final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream()
            .map(s -> service.submit(() -> {
                final QualifiedName tableName = QualifiedName.fromString(s, false);
                final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
                int offset = 0;
                int count;
                final Sort sort;
                if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
                    sort = new Sort("id", SortOrder.ASC);
                } else {
                    sort = new Sort("part_id", SortOrder.ASC);
                }/*  w  w  w  . ja  v  a2 s  . c o m*/
                final Pageable pageable = new Pageable(10000, offset);
                do {
                    final List<PartitionDto> partitionDtos = partitionService.list(tableName, sort, pageable,
                            true, true, new GetPartitionsRequestDto(null, null, true, true));
                    count = partitionDtos.size();
                    if (!partitionDtos.isEmpty()) {
                        final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos,
                                1000);
                        partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures
                                .add(indexPartitionDtos(tableName, subPartitionsDtos)));
                        offset = offset + count;
                        pageable.setOffset(offset);
                    }
                } while (count == 10000);
                return Futures.transform(Futures.successfulAsList(indexFutures),
                        Functions.constant((Void) null));
            })).collect(Collectors.toList());
    final ListenableFuture<Void> processPartitionsFuture = Futures
            .transformAsync(Futures.successfulAsList(futures), input -> {
                final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL)
                        .collect(Collectors.toList());
                return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls),
                        Functions.constant(null));
            });
    return Futures.transformAsync(processPartitionsFuture, input -> {
        elasticSearchUtil.refresh();
        final List<ListenableFuture<Void>> cleanUpFutures = tables.stream()
                .map(s -> service.submit(
                        () -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames)))
                .collect(Collectors.toList());
        return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null));
    });
}