Example usage for com.google.common.util.concurrent Futures immediateFuture

List of usage examples for com.google.common.util.concurrent Futures immediateFuture

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures immediateFuture.

Prototype

@CheckReturnValue
public static <V> ListenableFuture<V> immediateFuture(@Nullable V value) 

Source Link

Document

Creates a ListenableFuture which has its value set immediately upon construction.

Usage

From source file:com.google.gapid.server.Client.java

public ListenableFuture<Path.Capture> importCapture(byte[] data) {
    LOG.log(FINE, "RPC->importCapture(<{0} bytes>)", data.length);
    return Futures.transformAsync(
            client.importCapture(ImportCaptureRequest.newBuilder().setData(ByteString.copyFrom(data)).build()),
            in -> Futures.immediateFuture(throwIfError(in.getCapture(), in.getError())));
}

From source file:com.google.api.server.spi.config.datastore.testing.FakeAsyncMemcacheService.java

@Override
public Future<Void> putAll(Map<?, ?> values) {
    memcacheService.putAll(values);
    return Futures.immediateFuture(null);
}

From source file:com.vsct.strowgr.monitoring.gui.cassandra.CassandraClient.java

private static AsyncFunction<ResultSet, Multimap<Key, Value>> iterate(Multimap<Key, Value> result) {
    return new AsyncFunction<ResultSet, Multimap<Key, Value>>() {
        @Override/*from   w  w w . j  a v  a  2  s. c  o  m*/
        public ListenableFuture<Multimap<Key, Value>> apply(ResultSet rs) throws Exception {
            int remaining = rs.getAvailableWithoutFetching();

            for (Row row : rs) {
                Date d = row.get(0, Date.class);
                String name = row.get(1, String.class);
                String correlationId = row.get(2, UUID.class).toString();

                Key k = new Key();
                k.correlationId = correlationId;
                Value v = new Value();
                v.name = name;
                v.timestamp = d;

                if (showPayload) {
                    v.payload = row.get(3, String.class);
                }

                result.put(k, v);

                messageProcessed.incrementAndGet();
                if (--remaining == 0)
                    break;
            }

            boolean wasLastPage = rs.getExecutionInfo().getPagingState() == null;
            if (wasLastPage) {
                return Futures.immediateFuture(result);
            } else {
                ListenableFuture<ResultSet> future = rs.fetchMoreResults();
                return Futures.transform(future, iterate(result));
            }

        }
    };
}

From source file:org.openecomp.sdnc.datachange.DataChangeProvider.java

@Override
public Future<RpcResult<DataChangeNotificationOutput>> dataChangeNotification(
        DataChangeNotificationInput input) {
    final String SVC_OPERATION = "data-change-notification";

    Properties parms = new Properties();
    DataChangeNotificationOutputBuilder serviceDataBuilder = new DataChangeNotificationOutputBuilder();

    log.info(SVC_OPERATION + " called.");

    if (input == null || input.getAaiEventId() == null) {
        log.debug("exiting " + SVC_OPERATION + " because of invalid input");
        serviceDataBuilder.setDataChangeResponseCode("403");
        RpcResult<DataChangeNotificationOutput> rpcResult = RpcResultBuilder
                .<DataChangeNotificationOutput>status(true).withResult(serviceDataBuilder.build()).build();
        return Futures.immediateFuture(rpcResult);
    }//from   w w w.j  a  v  a 2s . c om

    // add input to parms
    log.info("Adding INPUT data for " + SVC_OPERATION + " input: " + input);
    DataChangeNotificationInputBuilder inputBuilder = new DataChangeNotificationInputBuilder(input);
    MdsalHelper.toProperties(parms, inputBuilder.build());

    // Call SLI sync method
    // Get SvcLogicService reference

    DataChangeClient svcLogicClient = new DataChangeClient();
    Properties respProps = null;

    try {
        if (svcLogicClient.hasGraph("DataChange", SVC_OPERATION, null, "sync")) {
            try {
                respProps = svcLogicClient.execute("DataChange", SVC_OPERATION, null, "sync",
                        serviceDataBuilder, parms);
            } catch (Exception e) {
                log.error("Caught exception executing service logic for " + SVC_OPERATION, e);
                serviceDataBuilder.setDataChangeResponseCode("500");
            }
        } else {
            log.error("No service logic active for DataChange: '" + SVC_OPERATION + "'");
            serviceDataBuilder.setDataChangeResponseCode("503");
        }
    } catch (Exception e) {
        log.error("Caught exception looking for service logic", e);
        serviceDataBuilder.setDataChangeResponseCode("500");
    }

    String errorCode = serviceDataBuilder.getDataChangeResponseCode();

    if (errorCode != null && errorCode.length() != 0 && !(errorCode.equals("0") || errorCode.equals("200"))) {
        log.error("Returned FAILED for " + SVC_OPERATION + " error code: '" + errorCode + "'");
    } else {
        log.info("Returned SUCCESS for " + SVC_OPERATION + " ");
    }

    RpcResult<DataChangeNotificationOutput> rpcResult = RpcResultBuilder
            .<DataChangeNotificationOutput>status(true).withResult(serviceDataBuilder.build()).build();
    // return error
    return Futures.immediateFuture(rpcResult);
}

From source file:gobblin.runtime.StreamModelTaskRunner.java

protected void run() throws Exception {
    // Get the fork operator. By default IdentityForkOperator is used with a single branch.
    ForkOperator forkOperator = closer.register(this.taskContext.getForkOperator());

    RecordStreamWithMetadata<?, ?> stream = this.extractor.recordStream(this.shutdownRequested);
    ConnectableFlowable connectableStream = stream.getRecordStream().publish();
    stream = stream.withRecordStream(connectableStream);

    stream = stream.mapRecords(r -> {
        this.task.onRecordExtract();
        return r;
    });/*  ww w .j  a v  a  2 s  .  c om*/
    if (this.task.isStreamingTask()) {

        // Start watermark manager and tracker
        if (this.watermarkTracker.isPresent()) {
            this.watermarkTracker.get().start();
        }
        this.watermarkManager.get().start();

        ((StreamingExtractor) this.taskContext.getRawSourceExtractor()).start(this.watermarkStorage.get());

        stream = stream.mapRecords(r -> {
            AcknowledgableWatermark ackableWatermark = new AcknowledgableWatermark(r.getWatermark());
            if (watermarkTracker.isPresent()) {
                watermarkTracker.get().track(ackableWatermark);
            }
            r.addCallBack(ackableWatermark);
            return r;
        });
    }
    if (this.converter instanceof MultiConverter) {
        // if multiconverter, unpack it
        for (Converter cverter : ((MultiConverter) this.converter).getConverters()) {
            stream = cverter.processStream(stream, this.taskState);
        }
    } else {
        stream = this.converter.processStream(stream, this.taskState);
    }
    stream = this.rowChecker.processStream(stream, this.taskState);

    Forker.ForkedStream<?, ?> forkedStreams = new Forker().forkStream(stream, forkOperator, this.taskState);

    boolean isForkAsync = !this.task.areSingleBranchTasksSynchronous(this.taskContext)
            || forkedStreams.getForkedStreams().size() > 1;
    int bufferSize = this.taskState.getPropAsInt(ConfigurationKeys.FORK_RECORD_QUEUE_CAPACITY_KEY,
            ConfigurationKeys.DEFAULT_FORK_RECORD_QUEUE_CAPACITY);

    for (int fidx = 0; fidx < forkedStreams.getForkedStreams().size(); fidx++) {
        RecordStreamWithMetadata<?, ?> forkedStream = forkedStreams.getForkedStreams().get(fidx);
        if (forkedStream != null) {
            if (isForkAsync) {
                forkedStream = forkedStream.mapStream(f -> f
                        .observeOn(Schedulers.from(this.taskExecutor.getForkExecutor()), false, bufferSize));
            }
            Fork fork = new Fork(this.taskContext, forkedStream.getSchema(),
                    forkedStreams.getForkedStreams().size(), fidx, this.taskMode);
            fork.consumeRecordStream(forkedStream);
            this.forks.put(Optional.of(fork), Optional.of(Futures.immediateFuture(null)));
            this.task.configureStreamingFork(fork, this.watermarkingStrategy);
        }
    }

    connectableStream.connect();

    if (!ExponentialBackoff.awaitCondition()
            .callable(() -> this.forks.keySet().stream().map(Optional::get).allMatch(Fork::isDone))
            .initialDelay(1000L).maxDelay(1000L).maxWait(TimeUnit.MINUTES.toMillis(60)).await()) {
        throw new TimeoutException("Forks did not finish withing specified timeout.");
    }
}

From source file:org.opendaylight.sfc.ovs.provider.SfcOvsRpc.java

/**
 * This method writes a new OVS Bridge into OVSDB Config DataStore. This write event triggers
 * creation of the OVS Bridge in running OpenVSwitch instance identified by OVS Node ip:port
 * locator.//from w w w .  j  a  v a 2 s.c o  m
 *
 * <p>
 * @param input RPC input including a OVS Bridge name and parent OVS Node ip:port locator
 * @return RPC output: true if write to OVSDB Config DataStore was successful, otherwise false.
 */
@Override
public Future<RpcResult<CreateOvsBridgeOutput>> createOvsBridge(CreateOvsBridgeInput input) {
    Preconditions.checkNotNull(input, "create-ovs-bridge RPC input must be not null!");
    Preconditions.checkNotNull(input.getOvsNode(),
            "create-ovs-bridge RPC input container ovs-node must be not null!");

    RpcResultBuilder<CreateOvsBridgeOutput> rpcResultBuilder;
    NodeId nodeId = null;

    OvsNode ovsNode = input.getOvsNode();

    //create parent OVS Node InstanceIdentifier (based on ip:port locator)
    if (ovsNode.getPort() != null && ovsNode.getIp() != null) {
        nodeId = new NodeId(OVSDB_NODE_PREFIX + ovsNode.getIp().getIpv4Address().getValue() + ":"
                + ovsNode.getPort().getValue());

        //create parent OVS Node InstanceIdentifier (based on ip)
    } else if (ovsNode.getIp() != null) {
        IpAddress ipAddress = new IpAddress(ovsNode.getIp().getValue());
        Node node = SfcOvsUtil.getManagerNodeByIp(ipAddress, executor);
        if (node != null) {
            nodeId = node.getNodeId();
        }
    }

    if (nodeId != null) {
        InstanceIdentifier<Node> nodeIID = SfcOvsUtil.buildOvsdbNodeIID(nodeId);

        //build OVS Bridge
        //TODO: seperate into function as it will grow in future (including DP locators, etc.)
        OvsdbBridgeAugmentationBuilder ovsdbBridgeBuilder = new OvsdbBridgeAugmentationBuilder();
        ovsdbBridgeBuilder.setBridgeName(new OvsdbBridgeName(input.getName()));
        ovsdbBridgeBuilder.setManagedBy(new OvsdbNodeRef(nodeIID));

        Object[] methodParams = { ovsdbBridgeBuilder.build() };
        SfcOvsDataStoreAPI sfcOvsDataStoreAPI = new SfcOvsDataStoreAPI(
                SfcOvsDataStoreAPI.Method.PUT_OVSDB_BRIDGE, methodParams);

        if ((boolean) SfcOvsUtil.submitCallable(sfcOvsDataStoreAPI, executor)) {
            rpcResultBuilder = RpcResultBuilder
                    .success(new CreateOvsBridgeOutputBuilder().setResult(true).build());
        } else {
            String message = "Error writing OVS Bridge: '" + input.getName()
                    + "' into OVSDB Configuration DataStore.";
            rpcResultBuilder = RpcResultBuilder.<CreateOvsBridgeOutput>failed()
                    .withError(RpcError.ErrorType.APPLICATION, message);
        }

    } else {
        String message = "Error writing OVS Bridge: '" + input.getName()
                + "' into OVSDB Configuration DataStore (cannot determine parent NodeId).";
        rpcResultBuilder = RpcResultBuilder.<CreateOvsBridgeOutput>failed()
                .withError(RpcError.ErrorType.APPLICATION, message);
    }

    return Futures.immediateFuture(rpcResultBuilder.build());
}

From source file:org.opendaylight.ipsec.impl.IPsecImpl.java

@Override
public Future<RpcResult<ConnAddOutput>> connAdd(ConnAddInput input) {

    IPsecConnection connection = new IPsecConnection(input.getKeyexchange(), input.getIke(), input.getAh(),
            input.getEsp(), input.getAuthby(), input.getLeft(), input.getRight(), input.getLeftid(),
            input.getRightid(), input.getLeftcert(), input.getRightcert(), input.getLeftsubnet(),
            input.getRightsubnet(), input.getLeftfirewall(), input.getRightfirewall(), input.getAuto());

    ConnAddOutputBuilder builder = new ConnAddOutputBuilder();
    if (input.getName() == null || input.getName().equals("")) {
        builder.setResult("name cannot be empty");
    } else {/*from   w  ww .j a v  a  2 s. co  m*/
        if (input.getConnectionType() == null || input.getConnectionType().equals("")) {
            builder.setResult("connectionType cannot be empty");
        } else if (input.getConnectionType().equals("active")) {
            LOG.info("active connection: " + input.getName());
            if (IPsecConnectionBuffer.getActiveByName(input.getName()) != null) {
                builder.setResult("conn already exist");
            } else {
                IPsecConnectionBuffer.addActive(input.getName(), connection);
                builder.setResult("success");
            }
        } else if (input.getConnectionType().equals("passive")) {
            LOG.info("passive connection: " + input.getName());
            IPsecConnectionBuffer.addPassive(input.getName(), connection);
            builder.setResult("success");
        } else {
            builder.setResult("connection-type can only be active or passive");
        }
    }
    RpcResult<ConnAddOutput> rpcResult = Rpcs.<ConnAddOutput>getRpcResult(true, builder.build(),
            Collections.<RpcError>emptySet());
    return Futures.immediateFuture(rpcResult);
}

From source file:org.apache.fluo.core.impl.SharedBatchWriter.java

ListenableFuture<Void> writeMutationsAsyncFuture(Collection<Mutation> ml) {
    if (ml.size() == 0) {
        return Futures.immediateFuture(null);
    }/*from  ww  w . j  a  v a  2 s .co m*/

    ListenableFutureTask<Void> lf = ListenableFutureTask.create(DO_NOTHING, null);
    try {
        MutationBatch mb = new MutationBatch(ml, lf);
        mutQueue.put(mb);
        return lf;
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:org.opendaylight.bgpcep.pcep.topology.provider.TopologyProgramming.java

@Override
public ListenableFuture<RpcResult<SubmitUpdateLspOutput>> submitUpdateLsp(final SubmitUpdateLspInput input) {
    Preconditions.checkArgument(input.getNode() != null);
    Preconditions.checkArgument(input.getName() != null);

    final SubmitUpdateLspOutputBuilder b = new SubmitUpdateLspOutputBuilder();
    b.setResult(AbstractInstructionExecutor.schedule(scheduler, new AbstractInstructionExecutor(input) {
        @Override// w  w  w.  jav a  2  s.c  o m
        protected ListenableFuture<OperationResult> invokeOperation() {
            return TopologyProgramming.this.manager.updateLsp(input);
        }
    }));

    final RpcResult<SubmitUpdateLspOutput> res = SuccessfulRpcResult.create(b.build());
    return Futures.immediateFuture(res);
}

From source file:org.apache.jackrabbit.oak.plugins.document.BatchCommit.java

void populateResults(NodeDocument before) {
    DocumentStore store = queue.getStore();
    for (UpdateOp op : ops) {
        results.add(Futures.immediateFuture(before));
        NodeDocument after = new NodeDocument(store);
        before.deepCopy(after);//w  ww. ja  v a 2 s . c om
        UpdateUtils.applyChanges(after, op);
        before = after;
    }
}