Example usage for com.google.common.util.concurrent Futures transform

List of usage examples for com.google.common.util.concurrent Futures transform

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures transform.

Prototype

public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
        Function<? super I, ? extends O> function) 

Source Link

Document

Returns a new ListenableFuture whose result is the product of applying the given Function to the result of the given Future .

Usage

From source file:me.grapebaba.hyperledger.fabric.MemberServiceImpl.java

@Override
public ListenableFuture<Enrollment> enroll(EnrollmentRequest enrollmentRequest) {
    Preconditions.checkNotNull(enrollmentRequest.getEnrollmentID());
    Preconditions.checkNotNull(enrollmentRequest.getEnrollmentSecret());

    final KeyPair signingKeyPair = crypto.ecdsaKeyGen();
    PublicKey signingPublicKey = signingKeyPair.getPublic();

    final KeyPair encryptionKeyPair = crypto.ecdsaKeyGen();
    PublicKey encryptionPublicKey = encryptionKeyPair.getPublic();

    final Timestamp timestamp = Timestamp.newBuilder().setSeconds(System.currentTimeMillis() / 1000).setNanos(0)
            .build();//  w w  w  .ja  va  2 s . co  m
    final Ca.Identity id = Ca.Identity.newBuilder().setId(enrollmentRequest.getEnrollmentID()).build();
    final Ca.Token tok = Ca.Token.newBuilder()
            .setTok(ByteString.copyFrom(enrollmentRequest.getEnrollmentSecret(), Charset.defaultCharset()))
            .build();
    final Ca.PublicKey signingPubKey = Ca.PublicKey.newBuilder().setType(Ca.CryptoType.ECDSA)
            .setKey(ByteString.copyFrom(signingPublicKey.getEncoded())).build();
    final Ca.PublicKey encryptionPubKey = Ca.PublicKey.newBuilder().setType(Ca.CryptoType.ECDSA)
            .setKey(ByteString.copyFrom(encryptionPublicKey.getEncoded())).build();
    final Ca.ECertCreateReq eCertCreateReq = Ca.ECertCreateReq.newBuilder().setId(id).setTok(tok)
            .setTs(timestamp).setSign(signingPubKey).setEnc(encryptionPubKey).buildPartial();

    ListenableFuture<ByteString> updatedTokenFuture = Futures.transform(
            ecapStub.createCertificatePair(eCertCreateReq), new Function<Ca.ECertCreateResp, ByteString>() {
                @Nullable
                @Override
                public ByteString apply(@Nullable Ca.ECertCreateResp input) {
                    return crypto.eciesDecrypt(encryptionKeyPair.getPrivate(), input.getTok().getTok());
                }
            });

    ListenableFuture<Ca.ECertCreateResp> eCertCreateResp = Futures.transformAsync(updatedTokenFuture,
            new AsyncFunction<ByteString, Ca.ECertCreateResp>() {
                @Override
                public ListenableFuture<Ca.ECertCreateResp> apply(@Nullable ByteString input) throws Exception {
                    final Ca.Token tok = Ca.Token.newBuilder().setTok(input).build();
                    ByteString origin = eCertCreateReq.toBuilder().setTok(tok).buildPartial().toByteString();
                    BigInteger[] sig = crypto.ecdsaSign(signingKeyPair.getPrivate(), origin);
                    Ca.Signature signature = Ca.Signature.newBuilder()
                            .setR(ByteString.copyFrom(BigIntegers.asUnsignedByteArray(sig[0])))
                            .setS(ByteString.copyFrom(BigIntegers.asUnsignedByteArray(sig[1])))
                            .setType(Ca.CryptoType.ECDSA).build();

                    return ecapStub.createCertificatePair(eCertCreateReq.toBuilder().setSig(signature).build());
                }
            });

    return Futures.transform(eCertCreateResp, new Function<Ca.ECertCreateResp, Enrollment>() {
        @Nullable
        @Override
        public Enrollment apply(@Nullable Ca.ECertCreateResp input) {
            return Enrollment.newBuilder().withKey(signingKeyPair.getPrivate())
                    .withCert(input.getCerts().getSign()).withChainKey(input.getPkchain()).build();
        }
    });
}

From source file:io.v.x.jni.test.fortune.FortuneServerImpl.java

@Override
public ListenableFuture<Void> glob(VContext context, ServerCall call, String pattern,
        final ServerSendStream<GlobReply> stream) {
    final GlobReply.Entry entry = new GlobReply.Entry(
            new MountEntry("helloworld", ImmutableList.<MountedServer>of(), false, false));
    final GlobReply.Error error = new GlobReply.Error(
            new GlobError("Hello, world!", new VException("Some error")));
    return Futures.transform(stream.send(entry), new AsyncFunction<Void, Void>() {
        @Override/*from  w  ww  .  j a  va  2  s . c  o m*/
        public ListenableFuture<Void> apply(Void input) throws Exception {
            return stream.send(error);
        }
    });
}

From source file:org.onosproject.store.flowext.impl.DefaultFlowRuleExtRouter.java

/**
 * apply the sub batch of flow extension rules.
 *
 * @param batchOperation batch of flow rules.
 *                       A batch can contain flow rules for a single device only.
 * @return Future response indicating success/failure of the batch operation
 * all the way down to the device./*from www .j a va2s.c  o m*/
 */
@Override
public Future<FlowExtCompletedOperation> applySubBatch(FlowRuleBatchRequest batchOperation) {
    // TODO Auto-generated method stub
    if (batchOperation.ops().isEmpty()) {
        return Futures.immediateFuture(
                new FlowExtCompletedOperation(batchOperation.batchId(), true, Collections.emptySet()));
    }
    // get the deviceId all the collection belongs to
    DeviceId deviceId = getBatchDeviceId(batchOperation.ops());

    if (deviceId == null) {
        log.error("This Batch exists more than two deviceId");
        return null;
    }
    ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(deviceId);

    if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
        return applyBatchInternal(batchOperation);
    }

    log.trace("Forwarding storeBatch to {}, which is the primary (master) for device {}",
            replicaInfo.master().orNull(), deviceId);

    ClusterMessage message = new ClusterMessage(clusterService.getLocalNode().id(), APPLY_EXTEND_FLOWS,
            SERIALIZER.encode(batchOperation));

    try {
        ListenableFuture<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message,
                replicaInfo.master().get());
        // here should add another decode process
        return Futures.transform(responseFuture, new DecodeTo<FlowExtCompletedOperation>(SERIALIZER));
    } catch (IOException e) {
        return Futures.immediateFailedFuture(e);
    }
}

From source file:io.crate.action.sql.DDLStatementDispatcher.java

private ListenableFuture<Long> wrapRowCountFuture(ListenableFuture<?> wrappedFuture, final Long rowCount) {
    return Futures.transform(wrappedFuture, new Function<Object, Long>() {
        @Nullable/*from w w w  .  ja  v  a 2  s. co  m*/
        @Override
        public Long apply(@Nullable Object input) {
            return rowCount;
        }
    });
}

From source file:org.opendaylight.controller.sal.connect.netconf.sal.tx.NetconfDeviceWriteOnlyTx.java

@Override
public CheckedFuture<Void, TransactionCommitFailedException> submit() {
    final ListenableFuture<Void> commmitFutureAsVoid = Futures.transform(commit(),
            new Function<RpcResult<TransactionStatus>, Void>() {
                @Override/*from   w  w  w  .  ja v a  2s.c o m*/
                public Void apply(final RpcResult<TransactionStatus> input) {
                    return null;
                }
            });

    return Futures.makeChecked(commmitFutureAsVoid,
            new Function<Exception, TransactionCommitFailedException>() {
                @Override
                public TransactionCommitFailedException apply(final Exception input) {
                    return new TransactionCommitFailedException(
                            "Submit of transaction " + getIdentifier() + " failed", input);
                }
            });
}

From source file:org.opendaylight.openflowplugin.impl.services.SalFlowsBatchServiceImpl.java

@Override
public Future<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBatch(final UpdateFlowsBatchInput input) {
    LOG.trace("Updating flows @ {} : {}", PathUtil.extractNodeId(input.getNode()),
            input.getBatchUpdateFlows().size());
    final ArrayList<ListenableFuture<RpcResult<UpdateFlowOutput>>> resultsLot = new ArrayList<>();
    for (BatchUpdateFlows batchFlow : input.getBatchUpdateFlows()) {
        final UpdateFlowInput updateFlowInput = new UpdateFlowInputBuilder(input)
                .setOriginalFlow(new OriginalFlowBuilder(batchFlow.getOriginalBatchedFlow()).build())
                .setUpdatedFlow(new UpdatedFlowBuilder(batchFlow.getUpdatedBatchedFlow()).build())
                .setFlowRef(createFlowRef(input.getNode(), batchFlow)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.updateFlow(updateFlowInput)));
    }//  www .ja v a2  s. c o  m

    final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult = Futures.transform(
            Futures.successfulAsList(resultsLot),
            FlowUtil.<UpdateFlowOutput>createCumulatingFunction(input.getBatchUpdateFlows()));

    ListenableFuture<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBulkFuture = Futures.transform(commonResult,
            FlowUtil.FLOW_UPDATE_TRANSFORM);

    if (input.isBarrierAfter()) {
        updateFlowsBulkFuture = BarrierUtil.chainBarrier(updateFlowsBulkFuture, input.getNode(),
                transactionService, FlowUtil.FLOW_UPDATE_COMPOSING_TRANSFORM);
    }

    return updateFlowsBulkFuture;
}

From source file:org.kiji.schema.impl.cassandra.CassandraKijiResult.java

/**
 * Query Cassandra for a Kiji qualified-column or column-family in a Kiji row. The result is a
 * future containing an iterator over the result cells.
 *
 * @param tableURI The table URI./*from  w w w .  ja v a2s . com*/
 * @param entityId The entity ID of the row in the Kiji table.
 * @param columnRequest The requested column.
 * @param dataRequest The data request defining the request options.
 * @param layout The table's layout.
 * @param translator A column name translator for the table.
 * @param decoderProvider A decoder provider for the table.
 * @param admin The Cassandra connection to use for querying.
 * @param <T> The value type of the column.
 * @return A future containing an iterator of cells in the column.
 */
public static <T> ListenableFuture<Iterator<KijiCell<T>>> getColumn(final KijiURI tableURI,
        final EntityId entityId, final Column columnRequest, final KijiDataRequest dataRequest,
        final KijiTableLayout layout, final CassandraColumnNameTranslator translator,
        final CellDecoderProvider decoderProvider, final CassandraAdmin admin) {
    final KijiColumnName column = columnRequest.getColumnName();
    final CassandraColumnName cassandraColumn;
    try {
        cassandraColumn = translator.toCassandraColumnName(column);
    } catch (NoSuchColumnException e) {
        throw new IllegalArgumentException(String.format("No such column '%s' in table %s.", column, tableURI));
    }

    final ColumnId localityGroupId = layout.getFamilyMap().get(column.getFamily()).getLocalityGroup().getId();
    final CassandraTableName table = CassandraTableName.getLocalityGroupTableName(tableURI, localityGroupId);

    if (column.isFullyQualified()) {

        final Statement statement = CQLUtils.getQualifiedColumnGetStatement(layout, table, entityId,
                cassandraColumn, dataRequest, columnRequest);

        return Futures.transform(admin.executeAsync(statement),
                RowDecoders.<T>getQualifiedColumnDecoderFunction(column, decoderProvider));
    } else {

        if (columnRequest.getMaxVersions() != 0) {
            LOG.warn(
                    "Cassandra Kiji can not efficiently get a column family with max versions"
                            + " (column family: {}, max version: {}). Filtering versions on the client.",
                    column, columnRequest.getMaxVersions());
        }

        if (dataRequest.getMaxTimestamp() != Long.MAX_VALUE
                || dataRequest.getMinTimestamp() != Long.MIN_VALUE) {
            LOG.warn(
                    "Cassandra Kiji can not efficiently restrict a timestamp on a column family: "
                            + " (column family: {}, data request: {}). Filtering timestamps on the client.",
                    column, dataRequest);
        }

        final Statement statement = CQLUtils.getColumnFamilyGetStatement(layout, table, entityId,
                cassandraColumn, columnRequest);

        return Futures.transform(admin.executeAsync(statement), RowDecoders.<T>getColumnFamilyDecoderFunction(
                table, column, columnRequest, dataRequest, layout, translator, decoderProvider));
    }
}

From source file:io.mandrel.transport.thrift.nifty.ThriftClientManager.java

public <T, C extends NiftyClientChannel> ListenableFuture<T> createClient(
        final NiftyClientConnector<C> connector, final Class<T> type, @Nullable final Duration connectTimeout,
        @Nullable final Duration receiveTimeout, @Nullable final Duration readTimeout,
        @Nullable final Duration writeTimeout, final int maxFrameSize, @Nullable final String clientName,
        final List<? extends ThriftClientEventHandler> eventHandlers, @Nullable HostAndPort socksProxy) {
    checkNotNull(connector, "connector is null");
    checkNotNull(type, "type is null");
    checkNotNull(eventHandlers, "eventHandlers is null");

    final ListenableFuture<C> connectFuture = createChannel(connector, connectTimeout, receiveTimeout,
            readTimeout, writeTimeout, maxFrameSize, socksProxy);

    ListenableFuture<T> clientFuture = Futures.transform(connectFuture, new Function<C, T>() {
        @Nullable//from ww w.  j a  v a 2 s.  c  om
        @Override
        public T apply(@NotNull C channel) {
            String name = Strings.isNullOrEmpty(clientName) ? connector.toString() : clientName;

            try {
                return createClient(channel, type, name, eventHandlers);
            } catch (Throwable t) {
                // The channel was created successfully, but client creation failed so the
                // channel must be closed now
                channel.close();
                throw t;
            }
        }
    });

    return clientFuture;
}

From source file:org.opendaylight.openflowplugin.impl.services.SalMetersBatchServiceImpl.java

@Override
public Future<RpcResult<RemoveMetersBatchOutput>> removeMetersBatch(final RemoveMetersBatchInput input) {
    LOG.trace("Removing meters @ {} : {}", PathUtil.extractNodeId(input.getNode()),
            input.getBatchRemoveMeters().size());
    final ArrayList<ListenableFuture<RpcResult<RemoveMeterOutput>>> resultsLot = new ArrayList<>();
    for (BatchRemoveMeters addMeter : input.getBatchRemoveMeters()) {
        final RemoveMeterInput removeMeterInput = new RemoveMeterInputBuilder(addMeter)
                .setMeterRef(createMeterRef(input.getNode(), addMeter)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.removeMeter(removeMeterInput)));
    }/*  ww  w  .j  a  va2 s.  co m*/

    final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult = Futures.transform(
            Futures.allAsList(resultsLot),
            MeterUtil.<RemoveMeterOutput>createCumulativeFunction(input.getBatchRemoveMeters()));

    ListenableFuture<RpcResult<RemoveMetersBatchOutput>> removeMetersBulkFuture = Futures
            .transform(commonResult, MeterUtil.METER_REMOVE_TRANSFORM);

    if (input.isBarrierAfter()) {
        removeMetersBulkFuture = BarrierUtil.chainBarrier(removeMetersBulkFuture, input.getNode(),
                transactionService, MeterUtil.METER_REMOVE_COMPOSING_TRANSFORM);
    }

    return removeMetersBulkFuture;
}

From source file:org.opendaylight.openflowplugin.impl.services.SalGroupsBatchServiceImpl.java

@Override
public Future<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBatch(final RemoveGroupsBatchInput input) {
    LOG.trace("Removing groups @ {} : {}", PathUtil.extractNodeId(input.getNode()),
            input.getBatchRemoveGroups().size());
    final ArrayList<ListenableFuture<RpcResult<RemoveGroupOutput>>> resultsLot = new ArrayList<>();
    for (BatchRemoveGroups addGroup : input.getBatchRemoveGroups()) {
        final RemoveGroupInput removeGroupInput = new RemoveGroupInputBuilder(addGroup)
                .setGroupRef(createGroupRef(input.getNode(), addGroup)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.removeGroup(removeGroupInput)));
    }//w  ww . j a  va  2  s .  c o  m

    final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult = Futures.transform(
            Futures.allAsList(resultsLot),
            GroupUtil.<RemoveGroupOutput>createCumulatingFunction(input.getBatchRemoveGroups()));

    ListenableFuture<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBulkFuture = Futures
            .transform(commonResult, GroupUtil.GROUP_REMOVE_TRANSFORM);

    if (input.isBarrierAfter()) {
        removeGroupsBulkFuture = BarrierUtil.chainBarrier(removeGroupsBulkFuture, input.getNode(),
                transactionService, GroupUtil.GROUP_REMOVE_COMPOSING_TRANSFORM);
    }

    return removeGroupsBulkFuture;
}