Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.b2international.index.es.EsDocumentWriter.java

@Override
public void commit() throws IOException {
    if (indexOperations.isEmpty() && deleteOperations.isEmpty() && updateOperations.isEmpty()) {
        return;// w w  w . ja  v  a2  s . co m
    }

    final Set<DocumentMapping> mappingsToRefresh = Collections.synchronizedSet(newHashSet());
    final EsClient client = admin.client();
    // apply bulk updates first
    final ListeningExecutorService executor;
    if (updateOperations.size() > 1) {
        executor = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(Math.min(4, updateOperations.size())));
    } else {
        executor = MoreExecutors.newDirectExecutorService();
    }
    final List<ListenableFuture<?>> updateFutures = newArrayList();
    for (BulkUpdate<?> update : updateOperations) {
        updateFutures.add(executor.submit(() -> bulkUpdate(client, update, mappingsToRefresh)));
    }
    try {
        executor.shutdown();
        Futures.allAsList(updateFutures).get();
        executor.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException | ExecutionException e) {
        throw new IndexException("Couldn't execute bulk updates", e);
    }

    // then bulk indexes/deletes
    if (!indexOperations.isEmpty() || !deleteOperations.isEmpty()) {
        final BulkProcessor processor = client.bulk(new BulkProcessor.Listener() {
            @Override
            public void beforeBulk(long executionId, BulkRequest request) {
                admin.log().debug("Sending bulk request {}", request.numberOfActions());
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                admin.log().error("Failed bulk request", failure);
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                admin.log().debug("Successfully processed bulk request ({}) in {}.", request.numberOfActions(),
                        response.getTook());
                if (response.hasFailures()) {
                    for (BulkItemResponse itemResponse : response.getItems()) {
                        checkState(!itemResponse.isFailed(), "Failed to commit bulk request in index '%s', %s",
                                admin.name(), itemResponse.getFailureMessage());
                    }
                }
            }
        }).setConcurrentRequests(getConcurrencyLevel()).setBulkActions(10_000)
                .setBulkSize(new ByteSizeValue(10L, ByteSizeUnit.MB)).build();

        for (Class<?> type : ImmutableSet.copyOf(indexOperations.rowKeySet())) {
            final Map<String, Object> indexOperationsForType = indexOperations.row(type);

            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (Entry<String, Object> entry : Iterables.consumingIterable(indexOperationsForType.entrySet())) {
                final String id = entry.getKey();
                if (!deleteOperations.containsValue(id)) {
                    final Object obj = entry.getValue();
                    final Set<String> hashedFields = mapping.getHashedFields();
                    final byte[] _source;

                    if (!hashedFields.isEmpty()) {
                        final ObjectNode objNode = mapper.valueToTree(obj);
                        final ObjectNode hashedNode = mapper.createObjectNode();

                        // Preserve property order, share references with objNode
                        for (String hashedField : hashedFields) {
                            JsonNode value = objNode.get(hashedField);
                            if (value != null && !value.isNull()) {
                                hashedNode.set(hashedField, value);
                            }
                        }

                        final byte[] hashedBytes = mapper.writeValueAsBytes(hashedNode);
                        final HashCode hashCode = Hashing.sha1().hashBytes(hashedBytes);

                        // Inject the result as an extra field into the to-be-indexed JSON content
                        objNode.put(DocumentMapping._HASH, hashCode.toString());
                        _source = mapper.writeValueAsBytes(objNode);

                    } else {
                        _source = mapper.writeValueAsBytes(obj);
                    }

                    processor.add(new IndexRequest(typeIndex, typeString, id).opType(OpType.INDEX)
                            .source(_source, XContentType.JSON));
                }
            }

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        // Remaining delete operations can be executed on their own
        for (Class<?> type : ImmutableSet.copyOf(deleteOperations.keySet())) {
            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        try {
            processor.awaitClose(5, TimeUnit.MINUTES);
        } catch (InterruptedException e) {
            throw new IndexException("Interrupted bulk processing part of the commit", e);
        }
    }

    // refresh the index if there were only updates
    admin.refresh(mappingsToRefresh);
}

From source file:org.thingsboard.server.dao.timeseries.BaseTimeseriesService.java

@Override
public ListenableFuture<List<Void>> save(TenantId tenantId, EntityId entityId, TsKvEntry tsKvEntry) {
    validate(entityId);/*from   ww w .  j  a v  a  2  s .c o  m*/
    if (tsKvEntry == null) {
        throw new IncorrectParameterException("Key value entry can't be null");
    }
    List<ListenableFuture<Void>> futures = Lists.newArrayListWithExpectedSize(INSERTS_PER_ENTRY);
    saveAndRegisterFutures(tenantId, futures, entityId, tsKvEntry, 0L);
    return Futures.allAsList(futures);
}

From source file:com.yahoo.yqlplus.engine.internal.generate.ProgramInvocation.java

public final GambitRuntime getRuntime(TaskContext context) {
    final ListeningExecutorService tasks = ((ScopedTracingExecutor) this.tasks).createSubExecutor(context);
    return new GambitRuntime() {
        @Override/*from   ww w.  j a v a 2  s .  c  om*/
        public ListenableFuture<List<Object>> scatter(List<Callable<Object>> targets) {
            List<ListenableFuture<Object>> resultList = Lists.newArrayListWithExpectedSize(targets.size());
            for (Callable<Object> out : targets) {
                resultList.add(fork(out));
            }
            return Futures.allAsList(resultList);
        }

        @Override
        public ListenableFuture<List<Object>> scatterAsync(List<Callable<ListenableFuture<Object>>> targets) {
            List<ListenableFuture<Object>> resultList = Lists.newArrayListWithExpectedSize(targets.size());
            for (Callable<ListenableFuture<Object>> out : targets) {
                resultList.add(forkAsync(out));
            }
            return Futures.allAsList(resultList);
        }

        @Override
        public ListenableFuture<Object> fork(Callable<Object> target) {
            return tasks.submit(target);
        }

        @Override
        public ListenableFuture<Object> forkAsync(Callable<ListenableFuture<Object>> target) {
            return Futures.dereference(tasks.submit(target));
        }
    };
}

From source file:org.opendaylight.openflowplugin.impl.services.SalMetersBatchServiceImpl.java

@Override
public Future<RpcResult<UpdateMetersBatchOutput>> updateMetersBatch(final UpdateMetersBatchInput input) {
    final List<BatchUpdateMeters> batchUpdateMeters = input.getBatchUpdateMeters();
    LOG.trace("Updating meters @ {} : {}", PathUtil.extractNodeId(input.getNode()), batchUpdateMeters.size());

    final ArrayList<ListenableFuture<RpcResult<UpdateMeterOutput>>> resultsLot = new ArrayList<>();
    for (BatchUpdateMeters batchMeter : batchUpdateMeters) {
        final UpdateMeterInput updateMeterInput = new UpdateMeterInputBuilder(input)
                .setOriginalMeter(new OriginalMeterBuilder(batchMeter.getOriginalBatchedMeter()).build())
                .setUpdatedMeter(new UpdatedMeterBuilder(batchMeter.getUpdatedBatchedMeter()).build())
                .setMeterRef(createMeterRef(input.getNode(), batchMeter)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.updateMeter(updateMeterInput)));
    }/*from www .j av a  2s  .c o  m*/

    final Iterable<Meter> meters = Iterables.transform(batchUpdateMeters,
            new Function<BatchUpdateMeters, Meter>() {
                @Nullable
                @Override
                public Meter apply(@Nullable final BatchUpdateMeters input) {
                    return input.getUpdatedBatchedMeter();
                }
            });

    final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult = Futures.transform(
            Futures.allAsList(resultsLot),
            MeterUtil.<UpdateMeterOutput>createCumulativeFunction(meters, batchUpdateMeters.size()));

    ListenableFuture<RpcResult<UpdateMetersBatchOutput>> updateMetersBulkFuture = Futures
            .transform(commonResult, MeterUtil.METER_UPDATE_TRANSFORM);

    if (input.isBarrierAfter()) {
        updateMetersBulkFuture = BarrierUtil.chainBarrier(updateMetersBulkFuture, input.getNode(),
                transactionService, MeterUtil.METER_UPDATE_COMPOSING_TRANSFORM);
    }

    return updateMetersBulkFuture;
}

From source file:org.opendaylight.openflowplugin.impl.services.SalGroupsBatchServiceImpl.java

@Override
public Future<RpcResult<UpdateGroupsBatchOutput>> updateGroupsBatch(final UpdateGroupsBatchInput input) {
    final List<BatchUpdateGroups> batchUpdateGroups = input.getBatchUpdateGroups();
    LOG.trace("Updating groups @ {} : {}", PathUtil.extractNodeId(input.getNode()), batchUpdateGroups.size());

    final ArrayList<ListenableFuture<RpcResult<UpdateGroupOutput>>> resultsLot = new ArrayList<>();
    for (BatchUpdateGroups batchGroup : batchUpdateGroups) {
        final UpdateGroupInput updateGroupInput = new UpdateGroupInputBuilder(input)
                .setOriginalGroup(new OriginalGroupBuilder(batchGroup.getOriginalBatchedGroup()).build())
                .setUpdatedGroup(new UpdatedGroupBuilder(batchGroup.getUpdatedBatchedGroup()).build())
                .setGroupRef(createGroupRef(input.getNode(), batchGroup)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.updateGroup(updateGroupInput)));
    }//  w w  w  .j  a v a 2  s  . c om

    final Iterable<Group> groups = Iterables.transform(batchUpdateGroups,
            new Function<BatchUpdateGroups, Group>() {
                @Nullable
                @Override
                public Group apply(@Nullable final BatchUpdateGroups input) {
                    return input.getUpdatedBatchedGroup();
                }
            });

    final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult = Futures.transform(
            Futures.allAsList(resultsLot),
            GroupUtil.<UpdateGroupOutput>createCumulatingFunction(groups, batchUpdateGroups.size()));

    ListenableFuture<RpcResult<UpdateGroupsBatchOutput>> updateGroupsBulkFuture = Futures
            .transform(commonResult, GroupUtil.GROUP_UPDATE_TRANSFORM);

    if (input.isBarrierAfter()) {
        updateGroupsBulkFuture = BarrierUtil.chainBarrier(updateGroupsBulkFuture, input.getNode(),
                transactionService, GroupUtil.GROUP_UPDATE_COMPOSING_TRANSFORM);
    }

    return updateGroupsBulkFuture;
}

From source file:org.thingsboard.server.dao.timeseries.CassandraBaseTimeseriesDao.java

@Override
public ListenableFuture<List<TsKvEntry>> findAllAsync(TenantId tenantId, EntityId entityId,
        List<ReadTsKvQuery> queries) {
    List<ListenableFuture<List<TsKvEntry>>> futures = queries.stream()
            .map(query -> findAllAsync(tenantId, entityId, query)).collect(Collectors.toList());
    return Futures.transform(Futures.allAsList(futures),
            new Function<List<List<TsKvEntry>>, List<TsKvEntry>>() {
                @Nullable//from  w w w  . j  a  v  a  2s  .  com
                @Override
                public List<TsKvEntry> apply(@Nullable List<List<TsKvEntry>> results) {
                    if (results == null || results.isEmpty()) {
                        return null;
                    }
                    return results.stream().flatMap(List::stream).collect(Collectors.toList());
                }
            }, readResultsProcessingExecutor);
}

From source file:org.opendaylight.distributed.tx.it.provider.datawriter.DtxNetconfAsyncWriter.java

/**
 * Asynchronously write configuration to NetConf device with distributed-tx API
 *//*  w  w w  .j a  va  2s  .  co  m*/
@Override
public void writeData() {
    int putsPerTx = input.getPutsPerTx();
    int counter = 0;
    List<ListenableFuture<Void>> putFutures = new ArrayList<ListenableFuture<Void>>(putsPerTx);
    List<NodeId> nodeIdList = new ArrayList(this.nodeIdSet);
    Set<InstanceIdentifier<?>> txIidSet = new HashSet<>();
    NodeId nodeId = nodeIdList.get(0);
    InstanceIdentifier msNodeId = NETCONF_TOPO_IID.child(Node.class, new NodeKey(nodeId));
    InterfaceName ifName = nodeIfList.get(nodeId).get(0);

    if (input.getOperation() == OperationType.DELETE) {
        //Build subInterfaces for delete operation
        configInterface();
    }

    txIidSet.add(msNodeId);
    dtx = dTxProvider.newTx(txIidSet);
    startTime = System.nanoTime();
    for (int i = 1; i <= input.getLoop(); i++) {
        KeyedInstanceIdentifier<InterfaceConfiguration, InterfaceConfigurationKey> specificInterfaceCfgIid = netconfIid
                .child(InterfaceConfiguration.class, new InterfaceConfigurationKey(
                        new InterfaceActive(DTXITConstants.INTERFACE_ACTIVE), ifName));

        InterfaceConfigurationBuilder interfaceConfigurationBuilder = new InterfaceConfigurationBuilder();
        interfaceConfigurationBuilder.setInterfaceName(ifName);
        interfaceConfigurationBuilder
                .setDescription(DTXITConstants.TEST_DESCRIPTION + input.getOperation() + i);
        interfaceConfigurationBuilder.setActive(new InterfaceActive(DTXITConstants.INTERFACE_ACTIVE));
        InterfaceConfiguration config = interfaceConfigurationBuilder.build();

        CheckedFuture<Void, DTxException> writeFuture = null;
        if (input.getOperation() == OperationType.PUT) {
            //Put configuration to the same interface
            writeFuture = dtx.putAndRollbackOnFailure(DTXLogicalTXProviderType.NETCONF_TX_PROVIDER,
                    LogicalDatastoreType.CONFIGURATION, specificInterfaceCfgIid, config, msNodeId);
        } else if (input.getOperation() == OperationType.MERGE) {
            //Merge configuration to the same interface
            writeFuture = dtx.mergeAndRollbackOnFailure(DTXLogicalTXProviderType.NETCONF_TX_PROVIDER,
                    LogicalDatastoreType.CONFIGURATION, specificInterfaceCfgIid, config, msNodeId);
        } else {
            //Delete subInterfaces
            InterfaceName subIfName = new InterfaceName(DTXITConstants.INTERFACE_NAME_PREFIX + i);
            KeyedInstanceIdentifier<InterfaceConfiguration, InterfaceConfigurationKey> subSpecificInterfaceCfgIid = netconfIid
                    .child(InterfaceConfiguration.class, new InterfaceConfigurationKey(
                            new InterfaceActive(DTXITConstants.INTERFACE_ACTIVE), subIfName));
            writeFuture = dtx.deleteAndRollbackOnFailure(DTXLogicalTXProviderType.NETCONF_TX_PROVIDER,
                    LogicalDatastoreType.CONFIGURATION, subSpecificInterfaceCfgIid, msNodeId);
        }
        putFutures.add(writeFuture);
        counter++;

        if (counter == putsPerTx) {
            ListenableFuture<Void> aggregatePutFuture = Futures.transform(Futures.allAsList(putFutures),
                    new Function<List<Void>, Void>() {
                        @Nullable
                        @Override
                        public Void apply(@Nullable List<Void> voids) {
                            return null;
                        }
                    });
            try {
                aggregatePutFuture.get();
                CheckedFuture<Void, TransactionCommitFailedException> submitFuture = dtx.submit();
                try {
                    submitFuture.checkedGet();
                    txSucceed++;
                } catch (TransactionCommitFailedException e) {
                    txError++;
                }
            } catch (Exception e) {
                txError++;
                dtx.cancel();
            }

            counter = 0;
            dtx = dTxProvider.newTx(txIidSet);
            putFutures = new ArrayList<ListenableFuture<Void>>((int) putsPerTx);
        }
    }

    ListenableFuture<Void> aggregatePutFuture = Futures.transform(Futures.allAsList(putFutures),
            new Function<List<Void>, Void>() {
                @Nullable
                @Override
                public Void apply(@Nullable List<Void> voids) {
                    return null;
                }
            });

    try {
        aggregatePutFuture.get();
        CheckedFuture<Void, TransactionCommitFailedException> restSubmitFuture = dtx.submit();
        try {
            restSubmitFuture.checkedGet();
            txSucceed++;
        } catch (Exception e) {
            txError++;
        }
    } catch (Exception e) {
        txError++;
    }
    endTime = System.nanoTime();
}

From source file:org.opendaylight.controller.cluster.datastore.TransactionProxy.java

private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readAllData() {
    final Set<String> allShardNames = txContextFactory.getActorContext().getConfiguration().getAllShardNames();
    final Collection<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>> futures = new ArrayList<>(
            allShardNames.size());/*from ww  w .j  a va  2s. c o  m*/

    for (String shardName : allShardNames) {
        futures.add(singleShardRead(shardName, YangInstanceIdentifier.EMPTY));
    }

    final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> listFuture = Futures.allAsList(futures);
    final ListenableFuture<Optional<NormalizedNode<?, ?>>> aggregateFuture;

    aggregateFuture = Futures.transform(listFuture,
            new Function<List<Optional<NormalizedNode<?, ?>>>, Optional<NormalizedNode<?, ?>>>() {
                @Override
                public Optional<NormalizedNode<?, ?>> apply(final List<Optional<NormalizedNode<?, ?>>> input) {
                    try {
                        return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.EMPTY, input,
                                txContextFactory.getActorContext().getSchemaContext(),
                                txContextFactory.getActorContext().getDatastoreContext().getLogicalStoreType());
                    } catch (DataValidationFailedException e) {
                        throw new IllegalArgumentException("Failed to aggregate", e);
                    }
                }
            });

    return MappingCheckedFuture.create(aggregateFuture, ReadFailedException.MAPPER);
}

From source file:com.facebook.presto.spiller.GenericPartitioningSpiller.java

private synchronized ListenableFuture<?> flush(Predicate<PageBuilder> flushCondition) {
    requireNonNull(flushCondition, "flushCondition is null");
    ImmutableList.Builder<ListenableFuture<?>> futures = ImmutableList.builder();

    for (int partition = 0; partition < spillers.length; partition++) {
        PageBuilder pageBuilder = pageBuilders[partition];
        if (flushCondition.test(pageBuilder)) {
            futures.add(flush(partition));
        }//  w  ww . j a  v a2  s.  co m
    }

    return Futures.allAsList(futures.build());
}

From source file:org.thingsboard.server.dao.sql.timeseries.JpaTimeseriesDao.java

private ListenableFuture<List<TsKvEntry>> findAllAsync(TenantId tenantId, EntityId entityId,
        ReadTsKvQuery query) {/*  w w  w.  j a v  a  2  s  . co m*/
    if (query.getAggregation() == Aggregation.NONE) {
        return findAllAsyncWithLimit(entityId, query);
    } else {
        long stepTs = query.getStartTs();
        List<ListenableFuture<Optional<TsKvEntry>>> futures = new ArrayList<>();
        while (stepTs < query.getEndTs()) {
            long startTs = stepTs;
            long endTs = stepTs + query.getInterval();
            long ts = startTs + (endTs - startTs) / 2;
            futures.add(findAndAggregateAsync(entityId, query.getKey(), startTs, endTs, ts,
                    query.getAggregation()));
            stepTs = endTs;
        }
        ListenableFuture<List<Optional<TsKvEntry>>> future = Futures.allAsList(futures);
        return Futures.transform(future, new Function<List<Optional<TsKvEntry>>, List<TsKvEntry>>() {
            @Nullable
            @Override
            public List<TsKvEntry> apply(@Nullable List<Optional<TsKvEntry>> results) {
                if (results == null || results.isEmpty()) {
                    return null;
                }
                return results.stream().filter(Optional::isPresent).map(Optional::get)
                        .collect(Collectors.toList());
            }
        }, service);
    }
}