Example usage for com.google.common.util.concurrent AtomicDouble addAndGet

List of usage examples for com.google.common.util.concurrent AtomicDouble addAndGet

Introduction

In this page you can find the example usage for com.google.common.util.concurrent AtomicDouble addAndGet.

Prototype

public final double addAndGet(double delta) 

Source Link

Document

Atomically adds the given value to the current value.

Usage

From source file:org.deeplearning4j.models.rntn.RNTN.java

public INDArray getValueGradient(final List<Tree> trainingBatch) {

    // We use TreeMap for each of these so that they stay in a
    // canonical sorted order
    // TODO: factor out the initialization routines
    // binaryTD stands for Transform Derivatives
    final MultiDimensionalMap<String, String, INDArray> binaryTD = MultiDimensionalMap.newTreeBackedMap();
    // the derivatives of the INd4j for the binary nodes
    final MultiDimensionalMap<String, String, INDArray> binaryINDArrayTD = MultiDimensionalMap
            .newTreeBackedMap();/*from www.j  a  va  2 s. c  o  m*/
    // binaryCD stands for Classification Derivatives
    final MultiDimensionalMap<String, String, INDArray> binaryCD = MultiDimensionalMap.newTreeBackedMap();

    // unaryCD stands for Classification Derivatives
    final Map<String, INDArray> unaryCD = new TreeMap<>();

    // word vector derivatives
    final Map<String, INDArray> wordVectorD = new TreeMap<>();

    for (MultiDimensionalMap.Entry<String, String, INDArray> entry : binaryTransform.entrySet()) {
        int numRows = entry.getValue().rows();
        int numCols = entry.getValue().columns();

        binaryTD.put(entry.getFirstKey(), entry.getSecondKey(), Nd4j.create(numRows, numCols));
    }

    if (!combineClassification) {
        for (MultiDimensionalMap.Entry<String, String, INDArray> entry : binaryClassification.entrySet()) {
            int numRows = entry.getValue().rows();
            int numCols = entry.getValue().columns();

            binaryCD.put(entry.getFirstKey(), entry.getSecondKey(), Nd4j.create(numRows, numCols));
        }
    }

    if (useDoubleTensors) {
        for (MultiDimensionalMap.Entry<String, String, INDArray> entry : binaryTensors.entrySet()) {
            int numRows = entry.getValue().size(1);
            int numCols = entry.getValue().size(2);
            int numSlices = entry.getValue().slices();

            binaryINDArrayTD.put(entry.getFirstKey(), entry.getSecondKey(),
                    Nd4j.create(numRows, numCols, numSlices));
        }
    }

    for (Map.Entry<String, INDArray> entry : unaryClassification.entrySet()) {
        int numRows = entry.getValue().rows();
        int numCols = entry.getValue().columns();
        unaryCD.put(entry.getKey(), Nd4j.create(numRows, numCols));
    }

    for (String s : vocabCache.words()) {
        INDArray vector = featureVectors.vector(s);
        int numRows = vector.rows();
        int numCols = vector.columns();
        wordVectorD.put(s, Nd4j.create(numRows, numCols));
    }

    final List<Tree> forwardPropTrees = new CopyOnWriteArrayList<>();
    //if(!forwardPropTrees.isEmpty())
    Parallelization.iterateInParallel(trainingBatch, new Parallelization.RunnableWithParams<Tree>() {

        public void run(Tree currentItem, Object[] args) {
            Tree trainingTree = new Tree(currentItem);
            trainingTree.connect(new ArrayList<>(currentItem.children()));
            // this will attach the error vectors and the node vectors
            // to each node in the tree
            forwardPropagateTree(trainingTree);
            forwardPropTrees.add(trainingTree);

        }
    }, rnTnActorSystem);

    // TODO: we may find a big speedup by separating the derivatives and then summing
    final AtomicDouble error = new AtomicDouble(0);
    if (!forwardPropTrees.isEmpty())
        Parallelization.iterateInParallel(forwardPropTrees, new Parallelization.RunnableWithParams<Tree>() {

            public void run(Tree currentItem, Object[] args) {
                backpropDerivativesAndError(currentItem, binaryTD, binaryCD, binaryINDArrayTD, unaryCD,
                        wordVectorD);
                error.addAndGet(currentItem.errorSum());

            }
        }, new Parallelization.RunnableWithParams<Tree>() {

            public void run(Tree currentItem, Object[] args) {
            }
        }, rnTnActorSystem, new Object[] { binaryTD, binaryCD, binaryINDArrayTD, unaryCD, wordVectorD });

    // scale the error by the number of sentences so that the
    // regularization isn't drowned out for large training batchs
    double scale = trainingBatch == null || trainingBatch.isEmpty() ? 1.0f : (1.0f / trainingBatch.size());
    value = error.doubleValue() * scale;

    value += scaleAndRegularize(binaryTD, binaryTransform, scale, regTransformMatrix);
    value += scaleAndRegularize(binaryCD, binaryClassification, scale, regClassification);
    value += scaleAndRegularizeINDArray(binaryINDArrayTD, binaryTensors, scale, regTransformINDArray);
    value += scaleAndRegularize(unaryCD, unaryClassification, scale, regClassification);
    value += scaleAndRegularize(wordVectorD, featureVectors, scale, regWordVector);

    INDArray derivative = Nd4j.toFlattened(getNumParameters(), binaryTD.values().iterator(),
            binaryCD.values().iterator(), binaryINDArrayTD.values().iterator(), unaryCD.values().iterator(),
            wordVectorD.values().iterator());

    if (derivative.length() != numParameters)
        throw new IllegalStateException("Gradient has wrong number of parameters " + derivative.length()
                + " should have been " + numParameters);

    if (paramAdaGrad == null)
        paramAdaGrad = new AdaGrad(1, derivative.columns());

    derivative = paramAdaGrad.getGradient(derivative, 0);

    return derivative;
}

From source file:com.linkedin.pinot.core.query.aggregation.function.quantile.digest.QuantileDigest.java

public List<Bucket> getHistogram(List<Long> bucketUpperBounds) {
    checkArgument(Ordering.natural().isOrdered(bucketUpperBounds),
            "buckets must be sorted in increasing order");

    final ImmutableList.Builder<Bucket> builder = ImmutableList.builder();
    final PeekingIterator<Long> iterator = Iterators.peekingIterator(bucketUpperBounds.iterator());

    final AtomicDouble sum = new AtomicDouble();
    final AtomicDouble lastSum = new AtomicDouble();

    // for computing weighed average of values in bucket
    final AtomicDouble bucketWeightedSum = new AtomicDouble();

    final double normalizationFactor = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read()));

    postOrderTraversal(root, new Callback() {
        @Override// www. j  a  v a 2s  .  com
        public boolean process(Node node) {

            while (iterator.hasNext() && iterator.peek() <= node.getUpperBound()) {
                double bucketCount = sum.get() - lastSum.get();

                Bucket bucket = new Bucket(bucketCount / normalizationFactor,
                        bucketWeightedSum.get() / bucketCount);

                builder.add(bucket);
                lastSum.set(sum.get());
                bucketWeightedSum.set(0);
                iterator.next();
            }

            bucketWeightedSum.addAndGet(node.getMiddle() * node.weightedCount);
            sum.addAndGet(node.weightedCount);
            return iterator.hasNext();
        }
    });

    while (iterator.hasNext()) {
        double bucketCount = sum.get() - lastSum.get();
        Bucket bucket = new Bucket(bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount);

        builder.add(bucket);

        iterator.next();
    }

    return builder.build();
}

From source file:com.vmware.admiral.adapter.kubernetes.service.KubernetesRemoteApiClient.java

public void doInfo(KubernetesContext context, CompletionHandler completionHandler) {
    getNodes(context, (o, ex) -> {/* w ww .j a v  a  2 s. c o  m*/
        if (ex != null) {
            completionHandler.handle(null, ex);
        } else {
            NodeList nodeList = o.getBody(NodeList.class);
            AtomicDouble usedCPU = new AtomicDouble(0D);
            AtomicDouble totalCPU = new AtomicDouble(0D);
            AtomicDouble totalMem = new AtomicDouble(0D);
            AtomicDouble usedMem = new AtomicDouble(0D);
            AtomicInteger counter = new AtomicInteger(nodeList.items.size());
            AtomicBoolean hasError = new AtomicBoolean();
            List<KubernetesNodeData> nodes = new ArrayList<>(nodeList.items.size());
            if (nodeList != null && nodeList.items != null) {
                for (Node node : nodeList.items) {
                    if (node == null || node.metadata == null || node.metadata.name == null) {
                        continue;
                    }
                    getStats(context, node, (o2, ex2) -> {
                        if (ex2 != null) {
                            logger.log(Level.WARNING, String.format(
                                    "Error while getting stats " + "for node %s", node.metadata.name), ex2);
                            if (hasError.compareAndSet(false, true)) {
                                completionHandler.handle(null, ex2);
                            }
                        } else {
                            @SuppressWarnings("unchecked")
                            Map<String, Object> data = o2.getBody(Map.class);
                            KubernetesNodeData nodeData = new KubernetesNodeData();
                            nodeData.name = node.metadata.name;
                            if (data != null && data.containsKey("allocatedResources")) {
                                @SuppressWarnings("unchecked")
                                Map<String, Double> resources = (Map<String, Double>) data
                                        .get("allocatedResources");
                                Double val = null;
                                if ((val = resources.get("cpuRequestsFraction")) != null) {
                                    Double totalForNode = resources.get("cpuCapacity");
                                    totalCPU.addAndGet(totalForNode);
                                    nodeData.usedCPU = val;
                                    usedCPU.addAndGet(val * totalForNode);
                                }
                                if ((val = resources.get("memoryCapacity")) != null) {
                                    nodeData.totalMem = val;
                                    totalMem.addAndGet(val);
                                }
                                if ((val = resources.get("memoryRequests")) != null) {
                                    nodeData.availableMem = nodeData.totalMem - val;
                                    usedMem.addAndGet(val);
                                }
                            }
                            synchronized (nodes) {
                                nodes.add(nodeData);
                            }
                            if (counter.decrementAndGet() == 0 && !hasError.get()) {
                                Map<String, String> properties = new HashMap<>();
                                properties.put(ContainerHostService.DOCKER_HOST_CPU_USAGE_PCT_PROP_NAME,
                                        Double.toString(usedCPU.get() / totalCPU.get()));
                                properties.put(ContainerHostService.DOCKER_HOST_AVAILABLE_MEMORY_PROP_NAME,
                                        Double.toString(totalMem.get() - usedMem.get()));
                                properties.put(ContainerHostService.DOCKER_HOST_TOTAL_MEMORY_PROP_NAME,
                                        Double.toString(totalMem.get()));
                                properties.put(ContainerHostService.KUBERNETES_HOST_NODE_LIST_PROP_NAME,
                                        Utils.toJson(nodes));
                                Operation result = new Operation();
                                result.setBody(properties);
                                completionHandler.handle(result, null);
                            }
                        }
                    });
                }
            }
        }
    });
}

From source file:com.court.controller.CollectionSheetFxmlController.java

private void performSearch(ComboBox<String> search_typ_combo, TextField search_txt) {

    if (search_txt.getText() != null && !search_txt.getText().isEmpty()) {
        ImageView progressIndicator = new ImageView();
        progressIndicator.setImage(new Image(FileHandler.LOADING_DEFAULT_GIF));
        VBox v = new VBox(progressIndicator);
        v.setAlignment(Pos.CENTER);/*from  ww w. j a  va 2s .c o m*/
        table_bpane.setCenter(v);

        Task<List<Member>> mTask = new Task<List<Member>>() {

            {
                setOnSucceeded(d -> {
                    AtomicDouble ins_total = new AtomicDouble(0.0);
                    AtomicDouble sub_total = new AtomicDouble(0.0);
                    List<Member> mList = getValue();

                    mList.stream().forEach(m -> {

                        List<MemberLoan> instOnly = m.getMemberLoans().stream()
                                .sorted(Comparator.comparing(MemberLoan::getChildId).reversed())
                                .filter(p -> !p.isIsComplete())
                                .filter(FxUtilsHandler.checkIfNotYetPaid(p -> p.getPaidUntil()))
                                .filter(p -> p.isStatus())
                                .filter(p -> (p.getLastInstall() < p.getLoanDuration()))
                                .filter(FxUtilsHandler.distinctByKey(p -> p.getMemberLoanCode()))
                                .collect(Collectors.toList());

                        //===================installments finished, but kotaonly loans================
                        List<MemberLoan> kotaOnly = m.getMemberLoans().stream()
                                .sorted(Comparator.comparing(MemberLoan::getChildId).reversed())
                                .filter(p -> !p.isIsComplete())
                                .filter(FxUtilsHandler.checkIfNotYetPaid(p -> p.getPaidUntil()))
                                .filter(p -> p.isStatus())
                                .filter(p -> (p.getLastInstall() >= p.getLoanDuration()))
                                .filter(FxUtilsHandler.distinctByKey(p -> p.getMemberLoanCode()))
                                .collect(Collectors.toList());

                        double sum = instOnly.stream().mapToDouble(p -> p.getLoanInstallment()).sum()
                                + kotaOnly.stream().mapToDouble(p -> p.getKotaLeft()).sum();

                        ins_total.addAndGet(sum);

                        List<MemberSubscriptions> mbrSubs = new ArrayList<>(m.getMemberSubscriptions());

                        boolean flag = FxUtilsHandler.hasPreviousSubscriptions(m.getId());
                        if (flag) {
                            sub_total.addAndGet(mbrSubs.stream().mapToDouble(a -> a.getAmount()).sum());
                        } else {
                            sub_total.addAndGet(
                                    mbrSubs.stream().filter(s -> !s.getRepaymentType().equalsIgnoreCase("Once"))
                                            .mapToDouble(a -> a.getAmount()).sum());
                        }

                    });
                    total = ins_total.doubleValue() + sub_total.doubleValue();
                    chk_amt_txt.setText(TextFormatHandler.CURRENCY_DECIMAL_FORMAT.format(total));

                    Pagination paginationTable = initCollectionTable(mList);
                    table_bpane.getChildren().remove(0);
                    table_bpane.setCenter(paginationTable);
                });

                setOnFailed(workerStateEvent -> getException().printStackTrace());
            }

            @Override
            protected List<Member> call() throws Exception {
                return memberList();
            }

        };

        Thread mThread = new Thread(mTask, "m-task");
        mThread.setDaemon(true);
        mThread.start();
    }
}