Example usage for com.google.common.util.concurrent Futures transform

List of usage examples for com.google.common.util.concurrent Futures transform

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures transform.

Prototype

public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
        Function<? super I, ? extends O> function) 

Source Link

Document

Returns a new ListenableFuture whose result is the product of applying the given Function to the result of the given Future .

Usage

From source file:org.robotninjas.barge.log.RaftLogBase.java

public ListenableFuture<SnapshotInfo> performSnapshot() throws RaftException {
    SnapshotInfo snapshotInfo;/*from  w ww . j  a  v a2s  . c  o  m*/
    try {
        Snapshotter snapshotter;
        synchronized (this) {
            ListenableFuture<Snapshotter> snapshotterFuture = stateMachine.prepareSnapshot(currentTerm,
                    lastApplied);
            snapshotter = snapshotterFuture.get();
        }

        snapshotInfo = snapshotter.finishSnapshot();
    } catch (Exception e) {
        LOGGER.error("Error during snapshot", e);
        throw RaftException.propagate(e);
    }

    LOGGER.info("Wrote snapshot {}", snapshotInfo);

    ListenableFuture<Object> entryFuture = proposeEntry(Entry.newBuilder().setSnapshot(snapshotInfo));

    return Futures.transform(entryFuture, G8.fn((result) -> {
        //      if (!Objects.equal(result, Boolean.TRUE))
        //        throw new IllegalStateException();
        this.lastSnapshot = snapshotInfo;
        return snapshotInfo;
    }));
}

From source file:zipkin.storage.cassandra3.CassandraSpanStore.java

/**
 * This fans out into a number of requests. The returned future will fail if any of the
 * inputs fail./*  w ww.  j a  v a  2 s. c  o  m*/
 *
 * <p>When {@link QueryRequest#serviceName service name} is unset, service names will be
 * fetched eagerly, implying an additional query.
 *
 * <p>The duration query is the most expensive query in cassandra, as it turns into 1 request per
 * hour of {@link QueryRequest#lookback lookback}. Because many times lookback is set to a day,
 * this means 24 requests to the backend!
 *
 * <p>See https://github.com/openzipkin/zipkin-java/issues/200
 */
@Override
public ListenableFuture<List<List<Span>>> getTraces(final QueryRequest request) {
    // Over fetch on indexes as they don't return distinct (trace id, timestamp) rows.
    final int traceIndexFetchSize = request.limit * indexFetchMultiplier;
    ListenableFuture<Map<TraceIdUDT, Long>> traceIdToTimestamp = getTraceIdsByServiceNames(request);
    List<String> annotationKeys = CassandraUtil.annotationKeys(request);
    ListenableFuture<Collection<TraceIdUDT>> traceIds;
    if (annotationKeys.isEmpty()) {
        // Simplest case is when there is no annotation query. Limit is valid since there's no AND
        // query that could reduce the results returned to less than the limit.
        traceIds = Futures.transform(traceIdToTimestamp, CassandraUtil.traceIdsSortedByDescTimestamp());
    } else {
        // While a valid port of the scala cassandra span store (from zipkin 1.35), there is a fault.
        // each annotation key is an intersection, meaning we likely return < traceIndexFetchSize.
        List<ListenableFuture<Map<TraceIdUDT, Long>>> futureKeySetsToIntersect = new ArrayList<>();
        futureKeySetsToIntersect.add(traceIdToTimestamp);
        for (String annotationKey : annotationKeys) {
            futureKeySetsToIntersect.add(getTraceIdsByAnnotation(annotationKey, request.endTs, request.lookback,
                    traceIndexFetchSize));
        }
        // We achieve the AND goal, by intersecting each of the key sets.
        traceIds = Futures.transform(allAsList(futureKeySetsToIntersect), CassandraUtil.intersectKeySets());
        // @xxx the sorting by timestamp desc is broken here^
    }
    return transform(traceIds, new AsyncFunction<Collection<TraceIdUDT>, List<List<Span>>>() {
        @Override
        public ListenableFuture<List<List<Span>>> apply(Collection<TraceIdUDT> traceIds) {
            ImmutableSet<TraceIdUDT> set = ImmutableSet
                    .copyOf(Iterators.limit(traceIds.iterator(), request.limit));
            return transform(getSpansByTraceIds(set, maxTraceCols),
                    new Function<List<Span>, List<List<Span>>>() {
                        @Override
                        public List<List<Span>> apply(List<Span> input) {
                            return GroupByTraceId.apply(input, strictTraceId, true);
                        }
                    });
        }

        @Override
        public String toString() {
            return "getSpansByTraceIds";
        }
    });
}

From source file:org.opendaylight.netvirt.natservice.internal.VpnFloatingIpHandler.java

@Override
public void onAddFloatingIp(final BigInteger dpnId, final String routerId, final Uuid networkId,
        final String interfaceName, final InternalToExternalPortMap mapping) {
    String externalIp = mapping.getExternalIp();
    String internalIp = mapping.getInternalIp();
    Uuid floatingIpId = mapping.getExternalId();
    Uuid subnetId = NatUtil.getFloatingIpPortSubnetIdFromFloatingIpId(dataBroker, floatingIpId);
    String floatingIpPortMacAddress = NatUtil.getFloatingIpPortMacFromFloatingIpId(dataBroker, floatingIpId);
    Optional<Subnets> externalSubnet = NatUtil.getOptionalExternalSubnets(dataBroker, subnetId);
    final String vpnName = externalSubnet.isPresent() ? subnetId.getValue()
            : NatUtil.getAssociatedVPN(dataBroker, networkId, LOG);
    final String subnetVpnName = externalSubnet.isPresent() ? subnetId.getValue() : null;
    if (vpnName == null) {
        LOG.info("No VPN associated with ext nw {} to handle add floating ip configuration {} in router {}",
                networkId, externalIp, routerId);
        return;//from   w ww  .  j a  v a  2  s  .co m
    }
    String rd = NatUtil.getVpnRd(dataBroker, vpnName);
    String nextHopIp = NatUtil.getEndpointIpAddressForDPN(dataBroker, dpnId);
    LOG.debug("Nexthop ip for prefix {} is {}", externalIp, nextHopIp);
    WriteTransaction writeTx = dataBroker.newWriteOnlyTransaction();
    ProviderTypes provType = NatEvpnUtil.getExtNwProvTypeFromRouterName(dataBroker, routerId);
    if (provType == null) {
        return;
    }
    if (provType == ProviderTypes.VXLAN) {
        Uuid floatingIpInterface = NatEvpnUtil.getFloatingIpInterfaceIdFromFloatingIpId(dataBroker,
                floatingIpId);
        evpnDnatFlowProgrammer.onAddFloatingIp(dpnId, routerId, vpnName, internalIp, externalIp, networkId,
                interfaceName, floatingIpInterface.getValue(), floatingIpPortMacAddress, rd, nextHopIp,
                writeTx);
        if (writeTx != null) {
            writeTx.submit();
        }
        return;
    }

    if (nvpnManager.getEnforceOpenstackSemanticsConfig()) {
        NatOverVxlanUtil.validateAndCreateVxlanVniPool(dataBroker, nvpnManager, idManager,
                NatConstants.ODL_VNI_POOL_NAME);
    }

    GenerateVpnLabelInput labelInput = new GenerateVpnLabelInputBuilder().setVpnName(vpnName)
            .setIpPrefix(externalIp).build();
    Future<RpcResult<GenerateVpnLabelOutput>> labelFuture = vpnService.generateVpnLabel(labelInput);

    ListenableFuture<RpcResult<Void>> future = Futures.transform(
            JdkFutureAdapters.listenInPoolThread(labelFuture),
            (AsyncFunction<RpcResult<GenerateVpnLabelOutput>, RpcResult<Void>>) result -> {
                if (result.isSuccessful()) {
                    GenerateVpnLabelOutput output = result.getResult();
                    long label = output.getLabel();
                    LOG.debug("Generated label {} for prefix {}", label, externalIp);
                    floatingIPListener.updateOperationalDS(routerId, interfaceName, label, internalIp,
                            externalIp);
                    //Inform BGP
                    long l3vni = 0;
                    if (nvpnManager.getEnforceOpenstackSemanticsConfig()) {
                        l3vni = NatOverVxlanUtil.getInternetVpnVni(idManager, vpnName, l3vni).longValue();
                    }
                    NatUtil.addPrefixToBGP(dataBroker, bgpManager, fibManager, vpnName, rd, subnetId,
                            externalIp + "/32", nextHopIp, networkId.getValue(), floatingIpPortMacAddress,
                            label, l3vni, LOG, RouteOrigin.STATIC, dpnId);

                    List<Instruction> instructions = new ArrayList<>();
                    List<ActionInfo> actionsInfos = new ArrayList<>();
                    actionsInfos.add(new ActionNxResubmit(NwConstants.PDNAT_TABLE));
                    instructions.add(new InstructionApplyActions(actionsInfos).buildInstruction(0));
                    makeTunnelTableEntry(vpnName, dpnId, label, instructions);

                    //Install custom FIB routes
                    List<ActionInfo> actionInfoFib = new ArrayList<>();
                    List<Instruction> customInstructions = new ArrayList<>();
                    actionInfoFib.add(
                            new ActionSetFieldEthernetDestination(new MacAddress(floatingIpPortMacAddress)));
                    customInstructions.add(new InstructionApplyActions(actionInfoFib).buildInstruction(0));
                    customInstructions
                            .add(new InstructionGotoTable(NwConstants.PDNAT_TABLE).buildInstruction(1));

                    makeLFibTableEntry(dpnId, label, NwConstants.PDNAT_TABLE);
                    CreateFibEntryInput input = new CreateFibEntryInputBuilder().setVpnName(vpnName)
                            .setSourceDpid(dpnId).setInstruction(customInstructions)
                            .setIpAddress(externalIp + "/32").setServiceId(label)
                            .setInstruction(customInstructions).build();
                    //Future<RpcResult<java.lang.Void>> createFibEntry(CreateFibEntryInput input);
                    Future<RpcResult<Void>> future1 = fibService.createFibEntry(input);
                    LOG.debug("Add Floating Ip {} , found associated to fixed port {}", externalIp,
                            interfaceName);
                    if (floatingIpPortMacAddress != null) {
                        String networkVpnName = NatUtil.getAssociatedVPN(dataBroker, networkId, LOG);
                        vpnManager.setupSubnetMacIntoVpnInstance(networkVpnName, subnetVpnName,
                                floatingIpPortMacAddress, dpnId, writeTx, NwConstants.ADD_FLOW);
                        vpnManager.setupArpResponderFlowsToExternalNetworkIps(routerId,
                                Collections.singleton(externalIp), floatingIpPortMacAddress, dpnId, networkId,
                                writeTx, NwConstants.ADD_FLOW);
                        writeTx.submit();
                    }
                    return JdkFutureAdapters.listenInPoolThread(future1);
                } else {
                    String errMsg = String.format("Could not retrieve the label for prefix %s in VPN %s, %s",
                            externalIp, vpnName, result.getErrors());
                    LOG.error(errMsg);
                    return Futures.immediateFailedFuture(new RuntimeException(errMsg));
                }
            });

    Futures.addCallback(future, new FutureCallback<RpcResult<Void>>() {

        @Override
        public void onFailure(Throwable error) {
            LOG.error("Error in generate label or fib install process", error);
        }

        @Override
        public void onSuccess(RpcResult<Void> result) {
            if (result.isSuccessful()) {
                LOG.info("Successfully installed custom FIB routes for prefix {}", externalIp);
            } else {
                LOG.error("Error in rpc call to create custom Fib entries for prefix {} in DPN {}, {}",
                        externalIp, dpnId, result.getErrors());
            }
        }
    });

    // Handle GARP transmission
    final IpAddress extrenalAddress = IpAddressBuilder.getDefaultInstance(externalIp);
    sendGarpOnInterface(dpnId, networkId, extrenalAddress, floatingIpPortMacAddress);

}

From source file:org.thingsboard.server.dao.entityview.EntityViewServiceImpl.java

@Override
public ListenableFuture<List<EntityView>> findEntityViewsByQuery(TenantId tenantId,
        EntityViewSearchQuery query) {/*  w  w w.  j a  v a2s .c o  m*/
    ListenableFuture<List<EntityRelation>> relations = relationService.findByQuery(tenantId,
            query.toEntitySearchQuery());
    ListenableFuture<List<EntityView>> entityViews = Futures.transformAsync(relations, r -> {
        EntitySearchDirection direction = query.toEntitySearchQuery().getParameters().getDirection();
        List<ListenableFuture<EntityView>> futures = new ArrayList<>();
        for (EntityRelation relation : r) {
            EntityId entityId = direction == EntitySearchDirection.FROM ? relation.getTo() : relation.getFrom();
            if (entityId.getEntityType() == EntityType.ENTITY_VIEW) {
                futures.add(findEntityViewByIdAsync(tenantId, new EntityViewId(entityId.getId())));
            }
        }
        return Futures.successfulAsList(futures);
    });

    entityViews = Futures.transform(entityViews, new Function<List<EntityView>, List<EntityView>>() {
        @Nullable
        @Override
        public List<EntityView> apply(@Nullable List<EntityView> entityViewList) {
            return entityViewList == null ? Collections.emptyList()
                    : entityViewList.stream()
                            .filter(entityView -> query.getEntityViewTypes().contains(entityView.getType()))
                            .collect(Collectors.toList());
        }
    });

    return entityViews;
}

From source file:org.opendaylight.sfc.provider.SfcProviderRpc.java

@Override
public Future<RpcResult<Void>> putServiceFunction(PutServiceFunctionInput input) {
    printTraceStart(LOG);/*w w w  .j  av  a 2 s.co  m*/
    LOG.info("\n####### Input: " + input);

    if (dataBroker == null) {
        return Futures.immediateFuture(
                RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, "No data provider.").build());
    }

    // Data PLane Locator
    List<SfDataPlaneLocator> sfDataPlaneLocatorList = input.getSfDataPlaneLocator();

    ServiceFunctionBuilder sfbuilder = new ServiceFunctionBuilder();
    ServiceFunctionKey sfkey = new ServiceFunctionKey(input.getName());
    ServiceFunction sf = sfbuilder.setName(input.getName()).setType(input.getType()).setKey(sfkey)
            .setIpMgmtAddress(input.getIpMgmtAddress()).setSfDataPlaneLocator(sfDataPlaneLocatorList).build();

    InstanceIdentifier<ServiceFunction> sfEntryIID = InstanceIdentifier.builder(ServiceFunctions.class)
            .child(ServiceFunction.class, sf.getKey()).build();

    WriteTransaction writeTx = dataBroker.newWriteOnlyTransaction();
    writeTx.merge(LogicalDatastoreType.CONFIGURATION, sfEntryIID, sf, true);
    printTraceStop(LOG);
    return Futures.transform(writeTx.submit(),
            (Function<Void, RpcResult<Void>>) input1 -> RpcResultBuilder.<Void>success().build());
}

From source file:com.google.cloud.bigtable.grpc.v2.BigtableDataGrpcClient.java

@Override
public ListenableFuture<List<Row>> readRowsAsync(ReadRowsRequest request) {
    return Futures.transform(getStreamingFuture(request, readRowsAsync), ROW_TRANSFORMER);
}

From source file:org.apache.cassandra.repair.RepairRunnable.java

protected void runMayThrow() throws Exception {
    final TraceState traceState;

    final String tag = "repair:" + cmd;

    final AtomicInteger progress = new AtomicInteger();
    final int totalProgress = 3 + options.getRanges().size(); // calculate neighbors, validation, prepare for repair + number of ranges to repair

    String[] columnFamilies = options.getColumnFamilies()
            .toArray(new String[options.getColumnFamilies().size()]);
    Iterable<ColumnFamilyStore> validColumnFamilies = storageService.getValidColumnFamilies(false, false,
            keyspace, columnFamilies);//from w w  w . j a  v a 2s  .  c om

    final long startTime = System.currentTimeMillis();
    String message = String.format("Starting repair command #%d, repairing keyspace %s with %s", cmd, keyspace,
            options);
    logger.info(message);
    fireProgressEvent(tag, new ProgressEvent(ProgressEventType.START, 0, 100, message));
    if (options.isTraced()) {
        StringBuilder cfsb = new StringBuilder();
        for (ColumnFamilyStore cfs : validColumnFamilies)
            cfsb.append(", ").append(cfs.keyspace.getName()).append(".").append(cfs.name);

        UUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR);
        traceState = Tracing.instance.begin("repair",
                ImmutableMap.of("keyspace", keyspace, "columnFamilies", cfsb.substring(2)));
        Tracing.traceRepair(message);
        traceState.enableActivityNotification(tag);
        for (ProgressListener listener : listeners)
            traceState.addProgressListener(listener);
        Thread queryThread = createQueryThread(cmd, sessionId);
        queryThread.setName("RepairTracePolling");
        queryThread.start();
    } else {
        traceState = null;
    }

    final Set<InetAddress> allNeighbors = new HashSet<>();
    Map<Range, Set<InetAddress>> rangeToNeighbors = new HashMap<>();
    try {
        for (Range<Token> range : options.getRanges()) {
            Set<InetAddress> neighbors = ActiveRepairService.getNeighbors(keyspace, range,
                    options.getDataCenters(), options.getHosts());
            rangeToNeighbors.put(range, neighbors);
            allNeighbors.addAll(neighbors);
        }
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        logger.error("Repair failed:", e);
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    // Validate columnfamilies
    List<ColumnFamilyStore> columnFamilyStores = new ArrayList<>();
    try {
        Iterables.addAll(columnFamilyStores, validColumnFamilies);
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    String[] cfnames = new String[columnFamilyStores.size()];
    for (int i = 0; i < columnFamilyStores.size(); i++) {
        cfnames[i] = columnFamilyStores.get(i).name;
    }

    final UUID parentSession = UUIDGen.getTimeUUID();
    SystemDistributedKeyspace.startParentRepair(parentSession, keyspace, cfnames, options.getRanges());
    long repairedAt;
    try {
        ActiveRepairService.instance.prepareForRepair(parentSession, allNeighbors, options, columnFamilyStores);
        repairedAt = ActiveRepairService.instance.getParentRepairSession(parentSession).getRepairedAt();
        progress.incrementAndGet();
    } catch (Throwable t) {
        SystemDistributedKeyspace.failParentRepair(parentSession, t);
        fireErrorAndComplete(tag, progress.get(), totalProgress, t.getMessage());
        return;
    }

    // Set up RepairJob executor for this repair command.
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(
            new JMXConfigurableThreadPoolExecutor(options.getJobThreads(), Integer.MAX_VALUE, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("Repair#" + cmd), "internal"));

    List<ListenableFuture<RepairSessionResult>> futures = new ArrayList<>(options.getRanges().size());
    for (Range<Token> range : options.getRanges()) {
        final RepairSession session = ActiveRepairService.instance.submitRepairSession(parentSession, range,
                keyspace, options.getParallelism(), rangeToNeighbors.get(range), repairedAt, executor, cfnames);
        if (session == null)
            continue;
        // After repair session completes, notify client its result
        Futures.addCallback(session, new FutureCallback<RepairSessionResult>() {
            public void onSuccess(RepairSessionResult result) {
                String message = String.format("Repair session %s for range %s finished", session.getId(),
                        session.getRange().toString());
                logger.info(message);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }

            public void onFailure(Throwable t) {
                String message = String.format("Repair session %s for range %s failed with error %s",
                        session.getId(), session.getRange().toString(), t.getMessage());
                logger.error(message, t);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }
        });
        futures.add(session);
    }

    // After all repair sessions completes(successful or not),
    // run anticompaction if necessary and send finish notice back to client
    final Collection<Range<Token>> successfulRanges = new ArrayList<>();
    final AtomicBoolean hasFailure = new AtomicBoolean();
    final ListenableFuture<List<RepairSessionResult>> allSessions = Futures.successfulAsList(futures);
    ListenableFuture anticompactionResult = Futures.transform(allSessions,
            new AsyncFunction<List<RepairSessionResult>, Object>() {
                @SuppressWarnings("unchecked")
                public ListenableFuture apply(List<RepairSessionResult> results) throws Exception {
                    // filter out null(=failed) results and get successful ranges
                    for (RepairSessionResult sessionResult : results) {
                        if (sessionResult != null) {
                            successfulRanges.add(sessionResult.range);
                        } else {
                            hasFailure.compareAndSet(false, true);
                        }
                    }
                    return ActiveRepairService.instance.finishParentSession(parentSession, allNeighbors,
                            successfulRanges);
                }
            });
    Futures.addCallback(anticompactionResult, new FutureCallback<Object>() {
        public void onSuccess(Object result) {
            SystemDistributedKeyspace.successfulParentRepair(parentSession, successfulRanges);
            if (hasFailure.get()) {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress,
                        "Some repair failed"));
            } else {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.SUCCESS, progress.get(),
                        totalProgress, "Repair completed successfully"));
            }
            repairComplete();
        }

        public void onFailure(Throwable t) {
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress, t.getMessage()));
            SystemDistributedKeyspace.failParentRepair(parentSession, t);
            repairComplete();
        }

        private void repairComplete() {
            String duration = DurationFormatUtils.formatDurationWords(System.currentTimeMillis() - startTime,
                    true, true);
            String message = String.format("Repair command #%d finished in %s", cmd, duration);
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.COMPLETE, progress.get(), totalProgress, message));
            logger.info(message);
            if (options.isTraced() && traceState != null) {
                for (ProgressListener listener : listeners)
                    traceState.removeProgressListener(listener);
                // Because DebuggableThreadPoolExecutor#afterExecute and this callback
                // run in a nondeterministic order (within the same thread), the
                // TraceState may have been nulled out at this point. The TraceState
                // should be traceState, so just set it without bothering to check if it
                // actually was nulled out.
                Tracing.instance.set(traceState);
                Tracing.traceRepair(message);
                Tracing.instance.stopSession();
            }
            executor.shutdownNow();
        }
    });
}

From source file:org.opendaylight.camera.impl.CameraProvider.java

/**
 * Read the CameraStatus and, if currently Off, try to write the status to
 * On. If that succeeds, then we essentially have an exclusive lock and can
 * proceed to click the photo./*from w  w w .  j av a  2s. c  o  m*/
 *
 * @param input
 * @param futureResult
 * @param tries
 */
private void checkStatusandClickPhoto(final ClickPhotoInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {
    /*
     * We create a ReadWriteTransaction by using the databroker. Then, we
     * read the status of the camera with getCameraStatus() using the
     * databroker again. Once we have the status, we analyze it and then
     * databroker submit function is called to effectively change the camera
     * status. This all affects the MD-SAL tree, more specifically the part
     * of the tree that contain the camera (the nodes).
     */
    LOG.info("In checkStatusandClickPhoto()");
    final ReadWriteTransaction tx = db.newReadWriteTransaction();
    ListenableFuture<Optional<CameraParams>> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, CAMERA_IID);

    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            new AsyncFunction<Optional<CameraParams>, Void>() {

                @SuppressWarnings("deprecation")
                @Override
                public ListenableFuture<Void> apply(Optional<CameraParams> cameraParamsData) throws Exception {
                    // TODO Auto-generated method stub
                    if (cameraParamsData.isPresent()) {
                        status = cameraParamsData.get().getCameraStatus();
                    } else {
                        throw new Exception("Error reading CameraParams data from the store.");
                    }
                    LOG.info("Read camera status: {}", status);
                    if (status == CameraStatus.Off) {
                        //Check if numberOfPhotosAvailable is not 0, if yes Notify outOfStock
                        if (numberOfPhotosAvailable.get() == 0) {
                            LOG.info("No more photos availble for clicking");
                            notificationProvider.publish(new CameraOutOfPhotosBuilder().build());
                            return Futures.immediateFailedCheckedFuture(
                                    new TransactionCommitFailedException("", clickNoMorePhotosError()));
                        }
                        LOG.info("Setting Camera status to On");
                        // We're not currently clicking photo - try to
                        // update the status to On
                        // to indicate we're going to click photo. This acts
                        // as a lock to prevent
                        // concurrent clicking.
                        tx.put(LogicalDatastoreType.OPERATIONAL, CAMERA_IID,
                                buildCameraParams(CameraStatus.On));
                        return tx.submit();
                    }

                    LOG.info("Oops - already clicking photo!");
                    // Return an error since we are already clicking photo.
                    // This will get
                    // propagated to the commitFuture below which will
                    // interpret the null
                    // TransactionStatus in the RpcResult as an error
                    // condition.
                    return Futures.immediateFailedCheckedFuture(
                            new TransactionCommitFailedException("", clickPhotoInUseError()));
                }

                private RpcError clickNoMorePhotosError() {
                    return RpcResultBuilder.newError(ErrorType.APPLICATION, "resource-denied",
                            "No more photos available for clicking", "out-of-stock", null, null);
                }
            });
    Futures.addCallback(commitFuture, new FutureCallback<Void>() {

        @Override
        public void onFailure(Throwable t) {
            if (t instanceof OptimisticLockFailedException) {
                // Another thread is likely trying to click a photo
                // simultaneously and updated the
                // status before us. Try reading the status again - if
                // another click-photo is
                // now in progress, we should get CameraStatus.Off and fail.
                if ((tries - 1) > 0) {
                    LOG.info("Got OptimisticLockFailedException - trying again");
                    checkStatusandClickPhoto(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, t.getMessage()).build());
                }
            } else {
                LOG.info("Failed to commit Camera status", t);
                // Probably already clicking a photo.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) t).getErrorList()).build());
            }
        }

        @Override
        public void onSuccess(Void result) {
            // OK to click a photo
            currentClickPhotoTask.set(executor.submit(new ClickPhotoTask(input, futureResult)));

        }

    });
}

From source file:com.orangerhymelabs.helenus.cassandra.document.DocumentRepository.java

public ListenableFuture<Boolean> exists(Identifier id) {
    ListenableFuture<ResultSet> future = submitExists(id);
    return Futures.transform(future, new Function<ResultSet, Boolean>() {
        @Override//from w  w w.  j av a2s .c  o  m
        public Boolean apply(ResultSet result) {
            return result.one().getLong(0) > 0;
        }
    });
}

From source file:zipkin.storage.cassandra.CassandraSpanStore.java

/**
 * This fans out into a potentially large amount of requests related to the amount of annotations
 * queried. The returned future will fail if any of the inputs fail.
 *
 * <p>When {@link QueryRequest#serviceName service name} is unset, service names will be
 * fetched eagerly, implying an additional query.
 */// ww  w.  j a v  a2 s.  c  om
@Override
public ListenableFuture<List<List<Span>>> getTraces(final QueryRequest request) {
    // Over fetch on indexes as they don't return distinct (trace id, timestamp) rows.
    final int traceIndexFetchSize = request.limit * indexFetchMultiplier;
    ListenableFuture<Map<Long, Long>> traceIdToTimestamp;
    if (request.spanName != null) {
        traceIdToTimestamp = getTraceIdsBySpanName(request.serviceName, request.spanName, request.endTs * 1000,
                request.lookback * 1000, traceIndexFetchSize);
    } else if (request.serviceName != null) {
        traceIdToTimestamp = getTraceIdsByServiceNames(Collections.singletonList(request.serviceName),
                request.endTs * 1000, request.lookback * 1000, traceIndexFetchSize);
    } else {
        checkArgument(selectTraceIdsByServiceNames != null,
                "getTraces without serviceName requires Cassandra 2.2 or later");
        traceIdToTimestamp = transform(getServiceNames(), new AsyncFunction<List<String>, Map<Long, Long>>() {
            @Override
            public ListenableFuture<Map<Long, Long>> apply(List<String> serviceNames) {
                return getTraceIdsByServiceNames(serviceNames, request.endTs * 1000, request.lookback * 1000,
                        traceIndexFetchSize);
            }
        });
    }

    List<String> annotationKeys = CassandraUtil.annotationKeys(request);

    ListenableFuture<Set<Long>> traceIds;
    if (annotationKeys.isEmpty()) {
        // Simplest case is when there is no annotation query. Limit is valid since there's no AND
        // query that could reduce the results returned to less than the limit.
        traceIds = Futures.transform(traceIdToTimestamp, CassandraUtil.keyset());
    } else {
        // While a valid port of the scala cassandra span store (from zipkin 1.35), there is a fault.
        // each annotation key is an intersection, meaning we likely return < traceIndexFetchSize.
        List<ListenableFuture<Map<Long, Long>>> futureKeySetsToIntersect = new ArrayList<>();
        futureKeySetsToIntersect.add(traceIdToTimestamp);
        for (String annotationKey : annotationKeys) {
            futureKeySetsToIntersect.add(getTraceIdsByAnnotation(annotationKey, request.endTs * 1000,
                    request.lookback * 1000, traceIndexFetchSize));
        }
        // We achieve the AND goal, by intersecting each of the key sets.
        traceIds = Futures.transform(allAsList(futureKeySetsToIntersect), CassandraUtil.intersectKeySets());
    }
    return transform(traceIds, new AsyncFunction<Set<Long>, List<List<Span>>>() {
        @Override
        public ListenableFuture<List<List<Span>>> apply(Set<Long> traceIds) {
            traceIds = ImmutableSet.copyOf(Iterators.limit(traceIds.iterator(), request.limit));
            return transform(getSpansByTraceIds(traceIds, maxTraceCols),
                    new Function<List<Span>, List<List<Span>>>() {
                        @Override
                        public List<List<Span>> apply(List<Span> input) {
                            // Indexes only contain Span.traceId, so our matches are imprecise on Span.traceIdHigh
                            return FluentIterable.from(GroupByTraceId.apply(input, strictTraceId, true))
                                    .filter(new Predicate<List<Span>>() {
                                        @Override
                                        public boolean apply(List<Span> input) {
                                            return input.get(0).traceIdHigh == 0 || request.test(input);
                                        }
                                    }).toList();
                        }
                    });
        }

        @Override
        public String toString() {
            return "getSpansByTraceIds";
        }
    });
}