Example usage for com.google.common.util.concurrent Futures immediateFailedCheckedFuture

List of usage examples for com.google.common.util.concurrent Futures immediateFailedCheckedFuture

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures immediateFailedCheckedFuture.

Prototype

@GwtIncompatible("TODO")
@CheckReturnValue
public static <V, X extends Exception> CheckedFuture<V, X> immediateFailedCheckedFuture(X exception) 

Source Link

Document

Returns a CheckedFuture which has an exception set immediately upon construction.

Usage

From source file:org.opendaylight.faas.fabric.general.FabricManagementAPIProvider.java

@Override
public Future<RpcResult<ComposeFabricOutput>> composeFabric(final ComposeFabricInput input) {
    ComposeFabricInputBuilder inputBuilder = new ComposeFabricInputBuilder(input);
    String msg = null;//ww  w. j av a  2 s. c om
    if ((msg = checkFabricOptions(inputBuilder)) != null) {
        return Futures.immediateFailedCheckedFuture(new IllegalArgumentException(msg));
    }

    final FabricId fabricId = new FabricId(String.format("fabric:%d", this.genNextFabricNum()));

    final InstanceIdentifier<Node> fnodepath = MdSalUtils.createFNodeIId(fabricId);
    final InstanceIdentifier<FabricNode> fabricpath = fnodepath.augmentation(FabricNode.class);

    NodeBuilder fnodeBuilder = new NodeBuilder();
    buildNodeAttribute(fnodeBuilder, input, fabricId);

    FabricNodeBuilder fabricBuilder = new FabricNodeBuilder();
    FabricAttributeBuilder attrBuilder = new FabricAttributeBuilder();
    buildFabricAttribute(attrBuilder, inputBuilder);

    FabricRendererFactory rendererFactory = rendererMgr.getFabricRendererFactory(input.getType());
    FabricRenderer renderer = rendererFactory.composeFabric(fabricpath, attrBuilder, input);
    if (renderer == null) {
        return Futures.immediateFailedCheckedFuture(
                new RuntimeException("Can not compose fabric due the renderer return false."));
    }

    fabricBuilder.setFabricAttribute(attrBuilder.build());
    FabricInstance fabric = FabricInstanceCache.INSTANCE.addFabric(fabricId, input.getType(), renderer);
    fabric.addListener(rendererFactory.createListener(fabricpath, fabricBuilder.getFabricAttribute()));

    final FabricNode fabricNode = fabricBuilder.build();
    fnodeBuilder.addAugmentation(FabricNode.class, fabricNode);

    ReadWriteTransaction trans = dataBroker.newReadWriteTransaction();

    trans.put(LogicalDatastoreType.OPERATIONAL, fnodepath, fnodeBuilder.build(), true);
    trans.put(LogicalDatastoreType.CONFIGURATION, fnodepath, fnodeBuilder.build(), true);
    trans.put(LogicalDatastoreType.OPERATIONAL, MdSalUtils.createTopoIId(fabricId.getValue()),
            MdSalUtils.newTopo(fabricId.getValue()));

    CheckedFuture<Void, TransactionCommitFailedException> future = trans.submit();

    return Futures.transform(future, (AsyncFunction<Void, RpcResult<ComposeFabricOutput>>) submitResult -> {
        RpcResultBuilder<ComposeFabricOutput> resultBuilder = RpcResultBuilder.<ComposeFabricOutput>success();
        ComposeFabricOutputBuilder outputBuilder = new ComposeFabricOutputBuilder();
        outputBuilder.setFabricId(fabricId);

        FabricInstanceCache.INSTANCE.retrieveFabric(fabricId).notifyFabricCreated(fabricNode);
        return Futures.immediateFuture(resultBuilder.withResult(outputBuilder.build()).build());
    });
}

From source file:org.opendaylight.oven.impl.OvenProvider.java

/**
 * Read the OvenStatus and, if currently Waiting, try to write the status to Preheating. 
 * If that succeeds, then we can proceed to cook the food. 
 *
 * @param input//from  w w w  .  jav a  2 s .  co m
 * @param futureResult
 * @param tries
 */
private void checkStatusAndCookFood(final CookFoodInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {
    /*
     * We create a ReadWriteTransaction by using the databroker. Then, we
     * read the status of the oven with getOvenStatus() using the
     * databroker again. Once we have the status, we analyze it and then
     * databroker submit function is called to effectively change the oven
     * status. This all affects the MD-SAL tree, more specifically the part
     * of the tree that contain the oven (the nodes).
     */
    LOG.info("In checkStatusAndCookFood()");
    final ReadWriteTransaction tx = db.newReadWriteTransaction();
    final ListenableFuture<Optional<OvenParams>> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL,
            OVEN_IID);
    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            new AsyncFunction<Optional<OvenParams>, Void>() {

                @Override
                public ListenableFuture<Void> apply(Optional<OvenParams> ovenParamsData) throws Exception {
                    if (ovenParamsData.isPresent()) {
                        status = ovenParamsData.get().getOvenStatus();
                    } else {
                        throw new Exception("Error reading OvenParams.status data from the store.");
                    }
                    LOG.info("Read oven status: {}", status);

                    if (status == OvenStatus.Waiting) {
                        //Check if numberOfMealAvailable is not 0, if yes Notify outOfStock
                        if (numberOfMealAvailable.get() == 0) {
                            LOG.info("No more meal availble to cook");
                            notificationProvider.publish(new KitchenOutOfFoodBuilder().build());
                            return Futures.immediateFailedCheckedFuture(
                                    new TransactionCommitFailedException("", cookNoMoreMealError()));
                        }

                        LOG.info("Setting Camera status to Preheating");
                        // We're not currently cooking food - we try to update the status to On
                        // to indicate we're going to cook food. This acts as a lock to prevent
                        // concurrent cooking.
                        tx.put(LogicalDatastoreType.OPERATIONAL, OVEN_IID,
                                buildOvenParams(OvenStatus.Preheating));
                        return tx.submit();
                    }
                    LOG.info("The oven is actually on use, cancel actual program before.");
                    // Return an error since we are already cooking food. This will get
                    // propagated to the commitFuture below which will interpret the null
                    // TransactionStatus in the RpcResult as an error condition.
                    return Futures.immediateFailedCheckedFuture(
                            new TransactionCommitFailedException("", cookOvenInUseError()));
                }

                private RpcError cookNoMoreMealError() {
                    return RpcResultBuilder.newError(ErrorType.APPLICATION, "resource-denied",
                            "No more food available to cook", "out-of-stock", null, null);
                }
            });
    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onFailure(Throwable t) {
            if (t instanceof OptimisticLockFailedException) {
                // Another thread is likely trying to cook food simultaneously and updated the
                // status before us. Try reading the status again - if another cookFood is
                // now in progress, we should get OvenStatus.Waiting and fail.
                if ((tries - 1) > 0) {
                    LOG.info("Got OptimisticLockFailedException - trying again");
                    checkStatusAndCookFood(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, t.getMessage()).build());
                }
            } else {
                LOG.info("Failed to commit Oven status", t);
                // Probably already cooking.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) t).getErrorList()).build());
            }
        }

        @Override
        public void onSuccess(Void result) {
            // OK to cook
            currentCookingMealTask.set(executor.submit(new CookMealTask(input, futureResult)));

        }

    });
}

From source file:org.opendaylight.streamhandler.impl.StreamhandlerImpl.java

@Override
public Future<RpcResult<QuerySqlApiOutput>> querySqlApi(QuerySqlApiInput input) {

    final SettableFuture<RpcResult<QuerySqlApiOutput>> futureResult = SettableFuture.create();
    String query = input.getQueryString();
    String fromTime = input.getFromTime();
    String toTime = input.getToTime();
    Short limit = input.getLimit();
    List<String> eventFields = input.getEventFields();
    List<Map<String, Object>> output = null;

    if (query == null) {
        return Futures.immediateFailedCheckedFuture(
                new TransactionCommitFailedException("invalid-input", RpcResultBuilder
                        .newError(ErrorType.APPLICATION, "Field missing", "Mandatory field Query missing")));

    }/*  w  ww . j  av  a 2  s.  c  o m*/
    if (limit == null) {
        limit = Short.parseShort(commonServices.defaultLimit);
    }
    if (fromTime == null && toTime == null && limit != null) {
        if (commonServices.dbType.equalsIgnoreCase(StreamConstants.HBASE)) {
            if (checkIfQueryContainsStreamWithSpaces(query)) {
                query = updateWhenQueryContainsStream(query);
                query = query + ORDER_BY + "centinel.stream.event_timestamp DESC limit " + limit;
            } else if (checkIfQueryContainsAlertWithSpaces(query)) {
                query = updateWhenQueryContainsAlert(query);
                query = query + ORDER_BY + "centinel.alert.check_result:triggeredAt DESC limit " + limit;
            } else if (checkIfQueryContainsDashboardWithSpaces(query)) {
                query = updateWhenQueryContainsDashboard(query);
                query = query + ORDER_BY + "centinel.dashboard.resetTime DESC limit " + limit;
            } else if (checkIfQueryContainsData(query)) {
                query = updateWhenQueryContainsData(query);
                query = amendLimitToQuery(query, limit);
            } else {

                return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException(
                        "invalid-input", RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid query",
                                "supported columns are stream, alert, dashboard and data ")));
            }

        } else {
            return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                    RpcResultBuilder.newError(ErrorType.APPLICATION, "DB type not supported",
                            "DB type " + commonServices.dbType + "not supported")));

        }
    } else if (fromTime != null && toTime != null && limit != null) {
        if (commonServices.dbType.equalsIgnoreCase(StreamConstants.HBASE)) {
            if (query.contains(StreamConstants.STREAM)) {
                query = replaceFirstCentinelForDBType(query);
                if (checkIfQueryContainsStreamDot(query)) {
                    query = query.replace(StreamConstants.STREAM_DOT,
                            StreamConstants.CENTINEL_DOT + StreamConstants.STREAM_DOT);
                    query = query + AND + "centinel.stream.event_timestamp>=" + getSingleQuotedValue(fromTime)
                            + AND + "centinel.stream.event_timestamp<=" + getSingleQuotedValue(toTime);
                } else if (!checkIfQueryContainsStreamDot(query)) {
                    query = query + WHERE + "centinel.stream.event_timestamp>=" + getSingleQuotedValue(fromTime)
                            + AND + "centinel.stream.event_timestamp<=" + getSingleQuotedValue(toTime);
                }
                query = replaceFirstStream(query);
                query = amendLimitToQuery(query, limit);
            } else if (checkIfQueryContainsAlertWithSpaces(query)) {
                query = replaceFirstCentinelForDBType(query);
                if (checkIfQueryContainsAlertDot(query)) {
                    query = query.replace(StreamConstants.ALERT_DOT,
                            StreamConstants.CENTINEL_DOT + StreamConstants.ALERT_DOT);
                    query = query + AND + "centinel.alert.check_result:triggeredAt>="
                            + getSingleQuotedValue(fromTime) + AND + "centinel.alert.check_result:triggeredAt<="
                            + getSingleQuotedValue(toTime);

                } else if (!checkIfQueryContainsAlertDot(query)) {
                    query = query + WHERE + "centinel.alert.check_result:triggeredAt>="
                            + getSingleQuotedValue(fromTime) + AND + "centinel.alert.check_result:triggeredAt<="
                            + getSingleQuotedValue(toTime);
                }
                query = replaceFirstAlert(query);
                query = amendLimitToQuery(query, limit);
            } else if (checkIfQueryContainsDashboardWithSpaces(query)) {
                query = replaceFirstCentinelForDBType(query);
                if (checkIfQueryContainsDashboardDot(query)) {
                    query = query.replace(StreamConstants.DASHBOARD_DOT,
                            StreamConstants.CENTINEL_DOT + StreamConstants.DASHBOARD_DOT);
                    query = query + AND + "centinel.dashboard.resetTime>=" + getSingleQuotedValue(fromTime)
                            + AND + "centinel.dashboard.resetTime<=" + getSingleQuotedValue(toTime);

                } else if (!checkIfQueryContainsDashboardDot(query)) {
                    query = query + WHERE + "centinel.dashboard.resetTime>=" + getSingleQuotedValue(fromTime)
                            + AND + "centinel.dashboard.resetTime<=" + getSingleQuotedValue(toTime);
                }
                query = replaceFirstDashboard(query);
                query = amendLimitToQuery(query, limit);
            } else if (checkIfQueryContainsData(query)) {
                query = updateWhenQueryContainsData(query);
                query = amendLimitToQuery(query, limit);
            } else {

                return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException(
                        "invalid-input", RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid query",
                                "supported columns are stream, alert, dashboard and data ")));
            }

        } else {
            return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                    RpcResultBuilder.newError(ErrorType.APPLICATION, "DB type not supported",
                            "DB type " + commonServices.dbType + "not supported")));
        }
    }

    query = query.replace(StreamConstants.COLON, StreamConstants.UNDERSCORE);
    query = commonServices.matchRegEx(query);

    Map<String, String> drillQuery = new HashMap<String, String>();
    drillQuery.put(StreamConstants.QUERY_TYPE, StreamConstants.SQL);
    drillQuery.put(StreamConstants.QUERY, query);
    LOG.info("Drill Query: " + query);

    ClientResponse response = commonServices.drillRESTPost(drillQuery, commonServices.drillHostname,
            commonServices.drillPort);

    if (response != null && response.getStatus() != 200) {
        LOG.info("Error in Drill: " + response.getStatus());
        return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                RpcResultBuilder.newError(ErrorType.APPLICATION, "Error connecting drill",
                        response.getClientResponseStatus().toString())));
    } else {

        output = commonServices.parseResponse(response.getEntity(String.class), eventFields);

    }

    List<org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.streamhandler.rev150105.query.sql.api.output.Records> recordList = new ArrayList<org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.streamhandler.rev150105.query.sql.api.output.Records>();

    for (Map<String, Object> out : output) {

        Iterator<Entry<String, Object>> itr = out.entrySet().iterator();
        List<Fields> fieldsList = new ArrayList<Fields>();
        while (itr.hasNext()) {
            Entry<String, Object> obj = itr.next();
            fieldsList.add(new FieldsBuilder().setFieldValue(obj.getValue().toString())
                    .setFieldName(obj.getKey()).build());
        }

        org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.streamhandler.rev150105.query.sql.api.output.Records recordObj = new org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.streamhandler.rev150105.query.sql.api.output.RecordsBuilder()
                .setFields(fieldsList).build();
        recordList.add(recordObj);
    }

    QuerySqlApiOutput queryOutput = new QuerySqlApiOutputBuilder().setRecords(recordList).build();
    futureResult.set(RpcResultBuilder.<QuerySqlApiOutput>success(queryOutput).build());
    return futureResult;

}

From source file:org.opendaylight.centinel.impl.CentinelStreamImpl.java

@Override
public Future<RpcResult<DeleteStreamOutput>> deleteStream(final DeleteStreamInput input) {

    final ReadWriteTransaction tx = dataProvider.newReadWriteTransaction();
    final SettableFuture<RpcResult<DeleteStreamOutput>> futureResult = SettableFuture.create();
    boolean idMatches = false;
    if (input.getStreamID() == null || input.getStreamID().isEmpty() || input.getStreamID().trim().isEmpty()) {
        LOG.debug("STREAM ID CANNOT BE NULL");
        return Futures.immediateFailedCheckedFuture(
                new TransactionCommitFailedException("inalid-input", streamIdcannotbenullError()));
    }/*from  w w  w  .  j  av a  2  s  .c o  m*/
    final DeleteStreamOutputBuilder deleteStreamRuleOutputBuilder = new DeleteStreamOutputBuilder();
    deleteStreamRuleOutputBuilder.setMessage(input.getStreamID());

    ListenableFuture<Optional<StreamRecord>> readFutureOperational = tx.read(LogicalDatastoreType.OPERATIONAL,
            streamRecordId);
    ListenableFuture<Optional<StreamRecord>> readFutureConfigure = tx.read(LogicalDatastoreType.CONFIGURATION,
            streamRecordId);

    String configId = null;

    try {

        Optional<StreamRecord> record = readFutureOperational.get();
        if (record.isPresent()) {
            StreamRecord operationalRecord = readFutureOperational.get().get();
            List<StreamList> streamList = new ArrayList<StreamList>();
            if (!operationalRecord.getStreamList().isEmpty()) {
                streamList = operationalRecord.getStreamList();
                Iterator<StreamList> iterator = streamList.iterator();

                while (iterator.hasNext()) {
                    StreamList operationalObject = iterator.next();
                    if (operationalObject.getStreamID().equals(input.getStreamID())) {
                        configId = operationalObject.getConfigID();
                        idMatches = true;
                    }
                }
                if (!idMatches) {
                    return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException(
                            "invalid-input", RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-input",
                                    "Invalid Stream id or The stream is not present in operational data store")));
                }
            }
        } else {
            return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                    RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-input",
                            "Record is not present in operational data store")));
        }
    }

    catch (Exception ex) {

        deleteStreamRuleOutputBuilder
                .setMessage("Stream with stream id" + input.getStreamID() + "does not exists");
        return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-input",
                        "Invalid Stream id or The stream is not present in operational data store")));

    }
    final String confID = configId;
    final ListenableFuture<Void> commitFuture = Futures.transform(readFutureConfigure,
            new AsyncFunction<Optional<StreamRecord>, Void>() {

                @Override
                public ListenableFuture<Void> apply(final Optional<StreamRecord> streamRulesRecord)
                        throws Exception {

                    List<StreamList> streamRulesLists = new ArrayList<StreamList>();
                    if (streamRulesRecord.isPresent()) {
                        streamRulesLists = streamRulesRecord.get().getStreamList();
                    }
                    Iterator<StreamList> iterator = streamRulesLists.iterator();

                    while (iterator.hasNext()) {
                        StreamList configObject = iterator.next();
                        if (configObject.getConfigID().equalsIgnoreCase(confID)) {
                            tx.delete(LogicalDatastoreType.CONFIGURATION,
                                    streamRecordId.child(StreamList.class, configObject.getKey()));
                        }

                    }
                    return tx.submit();
                }
            });
    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onSuccess(final Void result) {

            futureResult.set(RpcResultBuilder.<DeleteStreamOutput>success(deleteStreamRuleOutputBuilder.build())
                    .build());
        }

        @Override
        public void onFailure(final Throwable ex) {

            LOG.debug("Failed to commit Rule", ex);

            futureResult.set(RpcResultBuilder.<DeleteStreamOutput>failed()
                    .withRpcErrors(((TransactionCommitFailedException) ex).getErrorList()).build());
        }
    });
    return futureResult;
}

From source file:org.opendaylight.toaster.ToasterImpl.java

/**
 * Read the ToasterStatus and, if currently Up, try to write the status to
 * Down. If that succeeds, then we essentially have an exclusive lock and
 * can proceed to make toast.//  w  w  w .  ja v  a 2s  .c om
 */
private void checkStatusAndMakeToast(final MakeToastInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {
    LOG.info("checkStatusAndMakeToast");

    final ReadWriteTransaction tx = dataService.newReadWriteTransaction();
    ListenableFuture<Optional<Toaster>> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID);

    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            new AsyncFunction<Optional<Toaster>, Void>() {

                @Override
                public ListenableFuture<Void> apply(final Optional<Toaster> toasterData) throws Exception {

                    ToasterStatus toasterStatus = ToasterStatus.Up;
                    if (toasterData.isPresent()) {
                        toasterStatus = toasterData.get().getToasterStatus();
                    }

                    LOG.debug("Read toaster status: {}", toasterStatus);

                    if (toasterStatus == ToasterStatus.Up) {
                        if (outOfBread()) {
                            LOG.debug("Toaster is out of bread");
                            return Futures.immediateFailedCheckedFuture(
                                    new TransactionCommitFailedException("", makeToasterOutOfBreadError()));
                        }

                        LOG.debug("Setting Toaster status to Down");

                        // We're not currently making toast - try to update the status to Down
                        // to indicate we're going to make toast. This acts as a lock to prevent
                        // concurrent toasting.
                        tx.put(LogicalDatastoreType.OPERATIONAL, TOASTER_IID, buildToaster(ToasterStatus.Down));
                        return tx.submit();
                    }

                    LOG.debug("Oops - already making toast!");

                    // Return an error since we are already making toast. This will get
                    // propagated to the commitFuture below which will interpret the null
                    // TransactionStatus in the RpcResult as an error condition.
                    return Futures.immediateFailedCheckedFuture(
                            new TransactionCommitFailedException("", makeToasterInUseError()));
                }
            });

    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onSuccess(final Void result) {
            // OK to make toast
            currentMakeToastTask.set(executor.submit(new MakeToastTask(input, futureResult)));
        }

        @Override
        public void onFailure(final Throwable ex) {
            if (ex instanceof OptimisticLockFailedException) {

                // Another thread is likely trying to make toast simultaneously and updated the
                // status before us. Try reading the status again - if another make toast is
                // now in progress, we should get ToasterStatus.Down and fail.

                if ((tries - 1) > 0) {
                    LOG.debug("Got OptimisticLockFailedException - trying again");

                    checkStatusAndMakeToast(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, ex.getMessage()).build());
                }

            } else {

                LOG.debug("Failed to commit Toaster status", ex);

                // Probably already making toast.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) ex).getErrorList()).build());
            }
        }
    });
}

From source file:ncmount.impl.NcmountDomProvider.java

/**
 * Write list of routes to specified netconf device.
 * The resulting routes conform to Cisco-IOS-XR-ip-static-cfg.yang yang model.
 *
 * @param input Input list of simple routes
 * @return Success if routes were written to mounted netconf device
 *///from w  w  w  .java  2s .  c  om
private CheckedFuture<DOMRpcResult, DOMRpcException> writeNode(final NormalizedNode<?, ?> normalizedNode) {
    // TODO: Method need to be implemented.
    LOG.info("invoked RPC Write-Node: {}", normalizedNode);
    return Futures
            .immediateFailedCheckedFuture((DOMRpcException) new MethodNotImplemented("method not implemented"));

}

From source file:org.opendaylight.centinel.impl.CentinelStreamImpl.java

@Override
public Future<RpcResult<GetStreamOutput>> getStream(GetStreamInput input) {
    final ReadWriteTransaction tx = dataProvider.newReadWriteTransaction();
    final SettableFuture<RpcResult<GetStreamOutput>> futureResult = SettableFuture.create();
    boolean idMatches = false;
    if (input.getStreamID() == null || input.getStreamID().isEmpty() || input.getStreamID().trim().isEmpty()) {
        LOG.debug("STREAM ID CANNOT BE NULL");
        return Futures.immediateFailedCheckedFuture(
                new TransactionCommitFailedException("inalid-input", streamIdcannotbenullError()));
    }//  w w w  .  j ava  2  s  . c o m
    final GetStreamOutputBuilder getStreamOutputBuilder = new GetStreamOutputBuilder();
    ListenableFuture<Optional<StreamRecord>> streamRuleReadFuture = tx.read(LogicalDatastoreType.OPERATIONAL,
            streamRecordId);
    try {
        Optional<StreamRecord> streamRecord = streamRuleReadFuture.get();
        List<StreamList> streamList = new ArrayList<StreamList>();
        if (streamRecord.isPresent()) {
            streamList = streamRecord.get().getStreamList();
        } else {
            return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException("invalid-input",
                    RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-input",
                            "Record is not present in operational data store")));
        }
        if (streamList.isEmpty()) {
            return Futures.immediateFailedCheckedFuture(
                    new TransactionCommitFailedException("inalid-input", RpcResultBuilder
                            .newError(ErrorType.APPLICATION, "invalid-input", "NO stream in datastore")));
        } else {
            java.util.Iterator<StreamList> iterator = streamList.iterator();

            while (iterator.hasNext()) {
                StreamList streamListObj = iterator.next();
                if (streamListObj.getStreamID().equals(input.getStreamID())) {
                    idMatches = true;
                    getStreamOutputBuilder.setConfigID(streamListObj.getConfigID())
                            .setContentPack(streamListObj.getContentPack())
                            .setDescription(streamListObj.getDescription())
                            .setNodeType(streamListObj.getNodeType()).setRuleID(streamListObj.getRuleID())
                            .setRuleTypeClassifier(streamListObj.getRuleTypeClassifier())
                            .setStreamID(streamListObj.getStreamID()).setTimeStamp(streamListObj.getTimeStamp())
                            .setTitle(streamListObj.getTitle());

                    if (!streamListObj.getStreamRules().isEmpty()) {

                        Iterator<StreamRules> it = streamListObj.getStreamRules().iterator();
                        List<StreamRules> streamRule = new ArrayList<StreamRules>();
                        StreamRules streamRuleListObj = null;
                        while (it.hasNext()) {
                            streamRuleListObj = it.next();
                            streamRule.add(streamRuleListObj);
                        }
                        getStreamOutputBuilder.setStreamRules(streamRule);
                    }
                }
            }
            if (!idMatches) {
                return Futures.immediateFailedCheckedFuture(new TransactionCommitFailedException(
                        "invalid-input", RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-input",
                                "Invalid Stream id or The stream is not present in operational data store")));
            }
            futureResult.set(RpcResultBuilder.<GetStreamOutput>success(getStreamOutputBuilder.build()).build());
        }
    }

    catch (Exception ex) {
        LOG.error("Exception occured while getting record from operational data store", ex);
    }
    return futureResult;
}

From source file:diskCacheV111.srm.dcache.Storage.java

@Override
public CheckedFuture<Pin, ? extends SRMException> pinFile(SRMUser user, URI surl, String clientHost,
        long pinLifetime, String requestToken) {
    try {/*from w  w w. j av a 2s .co  m*/
        return Futures.makeChecked(PinCompanion.pinFile(asDcacheUser(user).getSubject(), config.getPath(surl),
                clientHost, pinLifetime, requestToken, _isOnlinePinningEnabled, _poolMonitor, _pnfsStub,
                _poolManagerStub, _pinManagerStub, _executor), new ToSRMException());
    } catch (SRMAuthorizationException | SRMInvalidPathException e) {
        return Futures.immediateFailedCheckedFuture(e);
    }
}

From source file:com.bt.sitb.opendaylight.controller.sample.btil.provider.OpendaylightBtil.java

private void checkStatusAndMakeToast(final MakeToastInput input,
        final SettableFuture<RpcResult<Void>> futureResult, final int tries) {

    LOG.info("checkStatusAndMakeToast input={}", input);
    // Read the BtilStatus and, if currently Up, try to write the status to Down.
    // If that succeeds, then we essentially have an exclusive lock and can proceed
    // to make toast.

    LOG.info("Reading from id={}", TOASTER_IID);

    final ReadWriteTransaction tx = dataProvider.newReadWriteTransaction();
    ListenableFuture<Optional<SitbBtil>> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID);

    final ListenableFuture<Void> commitFuture = Futures.transform(readFuture,
            new AsyncFunction<Optional<SitbBtil>, Void>() {

                @Override//from   www.j  ava2s  .c  o m
                public ListenableFuture<Void> apply(final Optional<SitbBtil> btilData) throws Exception {

                    SitbBtil.BtilStatus btilStatus = SitbBtil.BtilStatus.Up;
                    if (btilData.isPresent()) {
                        btilStatus = btilData.get().getBtilStatus();
                    }

                    LOG.debug("Read btil status: {}", btilStatus);

                    if (btilStatus == SitbBtil.BtilStatus.Up) {

                        if (outOfBread()) {
                            LOG.debug("Btil is out of bread");

                            return Futures.immediateFailedCheckedFuture(
                                    new TransactionCommitFailedException("", makeBtilOutOfBreadError()));
                        }

                        LOG.debug("Setting Btil status to Down");

                        // We're not currently making toast - try to update the status to Down
                        // to indicate we're going to make toast. This acts as a lock to prevent
                        // concurrent toasting.
                        tx.put(LogicalDatastoreType.OPERATIONAL, TOASTER_IID,
                                buildBtil(SitbBtil.BtilStatus.Down));
                        return tx.submit();
                    }

                    LOG.debug("Oops - already making toast!");

                    // Return an error since we are already making toast. This will get
                    // propagated to the commitFuture below which will interpret the null
                    // TransactionStatus in the RpcResult as an error condition.
                    return Futures.immediateFailedCheckedFuture(
                            new TransactionCommitFailedException("", makeBtilInUseError()));
                }
            });

    Futures.addCallback(commitFuture, new FutureCallback<Void>() {
        @Override
        public void onSuccess(final Void result) {
            // OK to make toast
            currentMakeToastTask.set(executor.submit(new MakeToastTask(input, futureResult)));
        }

        @Override
        public void onFailure(final Throwable ex) {
            if (ex instanceof OptimisticLockFailedException) {

                // Another thread is likely trying to make toast simultaneously and updated the
                // status before us. Try reading the status again - if another make toast is
                // now in progress, we should get BtilStatus.Down and fail.

                if ((tries - 1) > 0) {
                    LOG.debug("Got OptimisticLockFailedException - trying again");

                    checkStatusAndMakeToast(input, futureResult, tries - 1);
                } else {
                    futureResult.set(RpcResultBuilder.<Void>failed()
                            .withError(ErrorType.APPLICATION, ex.getMessage()).build());
                }

            } else {

                LOG.debug("Failed to commit Btil status", ex);

                // Probably already making toast.
                futureResult.set(RpcResultBuilder.<Void>failed()
                        .withRpcErrors(((TransactionCommitFailedException) ex).getErrorList()).build());
            }
        }
    });
}

From source file:diskCacheV111.srm.dcache.Storage.java

@Override
public CheckedFuture<String, ? extends SRMException> unPinFile(SRMUser user, String fileId, String pinId) {
    if (PinCompanion.isFakePinId(pinId)) {
        return Futures.immediateCheckedFuture(null);
    }// w w  w .  jav  a  2s  .co  m

    try {
        Subject subject = (user == null) ? Subjects.ROOT : asDcacheUser(user).getSubject();
        return Futures.makeChecked(UnpinCompanion.unpinFile(subject, new PnfsId(fileId), Long.parseLong(pinId),
                _pinManagerStub, _executor), new ToSRMException());
    } catch (SRMAuthorizationException e) {
        return Futures.immediateFailedCheckedFuture(e);
    }
}