Example usage for java.util.stream Collectors counting

List of usage examples for java.util.stream Collectors counting

Introduction

In this page you can find the example usage for java.util.stream Collectors counting.

Prototype

public static <T> Collector<T, ?, Long> counting() 

Source Link

Document

Returns a Collector accepting elements of type T that counts the number of input elements.

Usage

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects,
        final boolean replace_if_present) {
    try {//from   w w  w . j  a  v  a 2  s  .  co m
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects");

        final BulkRequestBuilder brb = new_objects.stream()
                .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE)
                        .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy),
                        (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context),
                                Either.left(val), replace_if_present, true)),
                        (acc1, acc2) -> {
                            throw new RuntimeException("Internal logic error - Parallel not supported");
                        });

        final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() {
            // WARNING: mutable/imperative code ahead...
            long _curr_written = 0;
            List<Object> _id_list = null;
            HashMap<String, String> _mapping_failures = null;

            @Override
            public void accept(final BulkResponse result,
                    final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) {

                if (result.hasFailures() && (rw_context
                        .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) {
                    final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context
                            .typeContext();
                    // Recursive builder in case I need to build a second batch of docs                        
                    BulkRequestBuilder brb2 = null;

                    if (null == _id_list) {
                        _id_list = new LinkedList<Object>();
                    }
                    HashMap<String, String> temp_mapping_failures = null;
                    final Iterator<BulkItemResponse> it = result.iterator();
                    while (it.hasNext()) {
                        final BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            if (bir.getFailure().getMessage().startsWith("MapperParsingException")) {
                                final Set<String> fixed_type_fields = rw_context.typeContext()
                                        .fixed_type_fields();
                                if (!fixed_type_fields.isEmpty()) {
                                    // Obtain the field name from the exception (if we fail then drop the record) 
                                    final String field = getFieldFromParsingException(
                                            bir.getFailure().getMessage());
                                    if ((null == field) || fixed_type_fields.contains(field)) {
                                        continue;
                                    }
                                } //(else roll on to...)                                                

                                // OK this is the case where I might be able to apply auto types:
                                if (null == brb2) {
                                    brb2 = _state.client.prepareBulk()
                                            .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh(
                                                    CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy);
                                }
                                String failed_json = null;
                                if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request
                                    if (null == temp_mapping_failures) {
                                        temp_mapping_failures = new HashMap<String, String>();
                                    }
                                    final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId());
                                    if (ar instanceof IndexRequest) {
                                        IndexRequest ir = (IndexRequest) ar;
                                        failed_json = ir.source().toUtf8();
                                        temp_mapping_failures.put(bir.getId(), failed_json);
                                    }
                                } else { // have already grabbed all the failure _ids and stuck in a map
                                    failed_json = _mapping_failures.get(bir.getId());
                                }
                                if (null != failed_json) {
                                    brb2.add(singleObjectIndexRequest(
                                            Either.right(Tuples._2T(bir.getIndex(),
                                                    ElasticsearchContextUtils.getNextAutoType(
                                                            auto_context.getPrefix(), bir.getType()))),
                                            Either.right(Tuples._2T(bir.getId(), failed_json)), false, true));
                                }
                            }
                            // Ugh otherwise just silently fail I guess? 
                            //(should I also look for transient errors and resubmit them after a pause?!)
                        } else { // (this item worked)
                            _id_list.add(bir.getId());
                            _curr_written++;
                        }
                    }
                    if (null != brb2) { // found mapping errors to retry with
                        if (null == _mapping_failures) // (first level of recursion)
                            _mapping_failures = temp_mapping_failures;

                        // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw)
                        ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> {
                            future2.completeExceptionally(error);
                        });
                    } else { // relative success, plus we've built the list anyway
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                } else { // No errors with this iteration of the bulk request         
                    _curr_written += result.getItems().length;

                    if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed
                        final Supplier<List<Object>> get_objects = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).map(bir -> bir.getId())
                                    .collect(Collectors.toList());
                        };
                        final Supplier<Long> get_count_workaround = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).collect(Collectors.counting());
                        };
                        get_count_workaround.get();
                        future.complete(Tuples._2T(get_objects, get_count_workaround));
                    } else { // have already calculated everything so just return it                     
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                }
            }
        };

        return ElasticsearchFutureUtils.wrap(brb.execute(),
                new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler,
                (error, future) -> {
                    future.completeExceptionally(error);
                });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.uber.hoodie.common.table.view.HoodieTableFileSystemViewTest.java

@Test
public void testStreamLatestVersionInPartition() throws IOException {
    // Put some files in the partition
    String fullPartitionPath = basePath + "/2016/05/01/";
    new File(fullPartitionPath).mkdirs();
    String commitTime1 = "1";
    String commitTime2 = "2";
    String commitTime3 = "3";
    String commitTime4 = "4";
    String fileId1 = UUID.randomUUID().toString();
    String fileId2 = UUID.randomUUID().toString();
    String fileId3 = UUID.randomUUID().toString();
    String fileId4 = UUID.randomUUID().toString();

    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime1, 1, fileId1)).createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime4, 1, fileId1)).createNewFile();
    new File(
            fullPartitionPath + FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, commitTime4, 0))
                    .createNewFile();// w  w w  . j  ava 2s.  c o  m
    new File(
            fullPartitionPath + FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, commitTime4, 1))
                    .createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime1, 1, fileId2)).createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime2, 1, fileId2)).createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime3, 1, fileId2)).createNewFile();
    new File(
            fullPartitionPath + FSUtils.makeLogFileName(fileId2, HoodieLogFile.DELTA_EXTENSION, commitTime3, 0))
                    .createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime3, 1, fileId3)).createNewFile();
    new File(fullPartitionPath + FSUtils.makeDataFileName(commitTime4, 1, fileId3)).createNewFile();
    new File(
            fullPartitionPath + FSUtils.makeLogFileName(fileId4, HoodieLogFile.DELTA_EXTENSION, commitTime4, 0))
                    .createNewFile();

    new File(basePath + "/.hoodie/" + commitTime1 + ".commit").createNewFile();
    new File(basePath + "/.hoodie/" + commitTime2 + ".commit").createNewFile();
    new File(basePath + "/.hoodie/" + commitTime3 + ".commit").createNewFile();
    new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();

    // Now we list the entire partition
    FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
    assertEquals(11, statuses.length);
    refreshFsView(null);

    // Check files as of lastest commit.
    List<FileSlice> allSlices = rtView.getAllFileSlices("2016/05/01").collect(Collectors.toList());
    assertEquals(8, allSlices.size());
    Map<String, Long> fileSliceMap = allSlices.stream()
            .collect(Collectors.groupingBy(slice -> slice.getFileId(), Collectors.counting()));
    assertEquals(2, fileSliceMap.get(fileId1).longValue());
    assertEquals(3, fileSliceMap.get(fileId2).longValue());
    assertEquals(2, fileSliceMap.get(fileId3).longValue());
    assertEquals(1, fileSliceMap.get(fileId4).longValue());

    List<HoodieDataFile> dataFileList = roView.getLatestDataFilesBeforeOrOn("2016/05/01", commitTime4)
            .collect(Collectors.toList());
    assertEquals(3, dataFileList.size());
    Set<String> filenames = Sets.newHashSet();
    for (HoodieDataFile status : dataFileList) {
        filenames.add(status.getFileName());
    }
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime4, 1, fileId1)));
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime3, 1, fileId2)));
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime4, 1, fileId3)));

    filenames = Sets.newHashSet();
    List<HoodieLogFile> logFilesList = rtView.getLatestFileSlicesBeforeOrOn("2016/05/01", commitTime4)
            .map(slice -> slice.getLogFiles()).flatMap(logFileList -> logFileList).collect(Collectors.toList());
    assertEquals(logFilesList.size(), 4);
    for (HoodieLogFile logFile : logFilesList) {
        filenames.add(logFile.getFileName());
    }
    assertTrue(filenames
            .contains(FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, commitTime4, 0)));
    assertTrue(filenames
            .contains(FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, commitTime4, 1)));
    assertTrue(filenames
            .contains(FSUtils.makeLogFileName(fileId2, HoodieLogFile.DELTA_EXTENSION, commitTime3, 0)));
    assertTrue(filenames
            .contains(FSUtils.makeLogFileName(fileId4, HoodieLogFile.DELTA_EXTENSION, commitTime4, 0)));

    // Reset the max commit time
    List<HoodieDataFile> dataFiles = roView.getLatestDataFilesBeforeOrOn("2016/05/01", commitTime3)
            .collect(Collectors.toList());
    assertEquals(dataFiles.size(), 3);
    filenames = Sets.newHashSet();
    for (HoodieDataFile status : dataFiles) {
        filenames.add(status.getFileName());
    }
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime1, 1, fileId1)));
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime3, 1, fileId2)));
    assertTrue(filenames.contains(FSUtils.makeDataFileName(commitTime3, 1, fileId3)));

    logFilesList = rtView.getLatestFileSlicesBeforeOrOn("2016/05/01", commitTime3)
            .map(slice -> slice.getLogFiles()).flatMap(logFileList -> logFileList).collect(Collectors.toList());
    assertEquals(logFilesList.size(), 1);
    assertTrue(logFilesList.get(0).getFileName()
            .equals(FSUtils.makeLogFileName(fileId2, HoodieLogFile.DELTA_EXTENSION, commitTime3, 0)));
}

From source file:com.ggvaidya.scinames.dataset.BinomialChangesSceneController.java

private AdditionalData<String, Map.Entry<String, String>> createSummaryAdditionalData() {
    List<Map.Entry<String, String>> summary = new ArrayList<>();

    // Calculate some summary values.
    long numChanges = potentialChanges.size();
    summary.add(new AbstractMap.SimpleEntry<String, String>("Number of binomial changes",
            String.valueOf(potentialChanges.size())));

    // How many have a note?
    summary.add(new AbstractMap.SimpleEntry<String, String>("Number of changes with annotations",
            String.valueOf(potentialChanges.stream().filter(ch -> ch.getNote().isPresent()).count())));

    // Calculate overall addition and deletion.

    // Summarize by types of change.
    Map<ChangeType, List<Change>> potentialChangesByType = potentialChanges.stream()
            .collect(Collectors.groupingBy(ch -> ch.getType()));
    summary.addAll(potentialChangesByType.keySet().stream().sorted()
            .map(type -> new AbstractMap.SimpleEntry<String, String>(
                    "Number of binomial changes of type '" + type + "'",
                    String.valueOf(potentialChangesByType.get(type).size())))
            .collect(Collectors.toList()));

    // Summarize by reason.
    Map<String, Long> potentialChangesByReason = potentialChanges.stream()
            .map(pc -> pc.getType() + " because of " + calculateReason(pc))
            .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
    summary.addAll(potentialChangesByReason.keySet().stream().sorted()
            .map(reason -> new AbstractMap.SimpleEntry<String, String>(
                    "Number of binomial changes for reason '" + reason + "'",
                    potentialChangesByReason.get(reason).toString()))
            .collect(Collectors.toList()));

    // Make an additional data about it.
    Map<String, List<Map.Entry<String, String>>> map = new HashMap<>();
    map.put("Summary", summary);

    List<TableColumn<Map.Entry<String, String>, String>> cols = new ArrayList<>();

    TableColumn<Map.Entry<String, String>, String> colKey = new TableColumn<>("Property");
    colKey.setCellValueFactory(cdf -> new ReadOnlyStringWrapper(cdf.getValue().getKey()));
    cols.add(colKey);//w  w w .  ja v a 2  s  .  c o m

    TableColumn<Map.Entry<String, String>, String> colValue = new TableColumn<>("Value");
    colValue.setCellValueFactory(cdf -> new ReadOnlyStringWrapper(cdf.getValue().getValue()));
    cols.add(colValue);

    TableColumn<Map.Entry<String, String>, String> colPercent = new TableColumn<>("Percentage");
    colPercent.setCellValueFactory(cdf -> {
        String result = "NA";

        if (cdf.getValue() != null && cdf.getValue().getValue() != null
                && !cdf.getValue().getValue().equals("null")) {
            long longVal = Long.parseLong(cdf.getValue().getValue());

            result = (longVal == 0) ? "NA" : (((double) longVal / numChanges * 100) + "%");
        }

        return new ReadOnlyStringWrapper(result);
    });
    cols.add(colPercent);

    return new AdditionalData<String, Entry<String, String>>("Summary", Arrays.asList("Summary"), map, cols);
}

From source file:no.asgari.civilization.server.action.GameAction.java

public List<CivHighscoreDTO> getCivHighscore() {
    if (!CivSingleton.instance().itemsCache().containsKey(GameType.WAW)) {
        readItemFromExcel(GameType.WAW, new ItemReader());
    }/*from w  w w.j a v  a2 s .c  o m*/

    ItemReader itemReader = CivSingleton.instance().itemsCache().get(GameType.WAW);
    if (itemReader == null) {
        return Collections.emptyList();
    }

    List<PBF> pbfs = pbfCollection.find().toArray();

    try {
        Map<String, Long> numberOfCivsWinning = pbfs.stream()
                .filter(pbf -> !Strings.isNullOrEmpty(pbf.getWinner())).filter(pbf -> !pbf.isActive())
                .filter(pbf -> {
                    String playerWhoWon = pbf.getWinner();
                    return pbf.getPlayers().stream().filter(p -> p.getUsername().equals(playerWhoWon))
                            .filter(p -> p.getCivilization() != null).findFirst().isPresent();
                }).map(pbf -> {
                    String playerWhoWon = pbf.getWinner();
                    Playerhand playerhand = pbf.getPlayers().stream()
                            .filter(p -> p.getUsername().equals(playerWhoWon))
                            .filter(p -> p.getCivilization() != null).findFirst().get();
                    return playerhand.getCivilization().getName();
                }).collect(Collectors.groupingBy(e -> e, Collectors.counting()));

        Map<String, Long> numberOfCivAttempts = pbfs.stream()
                .filter(pbf -> !Strings.isNullOrEmpty(pbf.getWinner())).filter(pbf -> !pbf.isActive())
                .flatMap(pbf -> pbf.getPlayers().stream()).filter(p -> p.getCivilization() != null)
                .map(p -> p.getCivilization().getName())
                .collect(Collectors.groupingBy(e -> e, Collectors.counting()));

        return itemReader.shuffledCivs.stream().map(civ -> new CivHighscoreDTO(civ.getName(),
                numberOfCivsWinning.get(civ.getName()), numberOfCivAttempts.get(civ.getName()))).sorted()
                .collect(toList());
    } catch (Exception ex) {
        ex.printStackTrace();
        return Collections.emptyList();
    }
}

From source file:no.asgari.civilization.server.action.GameAction.java

public List<WinnerDTO> getWinners() {
    List<PBF> pbfs = pbfCollection.find().toArray();
    final ListMultimap<String, String> multimap = ArrayListMultimap.create();

    pbfs.stream().filter(pbf -> !Strings.isNullOrEmpty(pbf.getWinner()))
            .forEach(pbf -> multimap.put(pbf.getWinner(), pbf.getId()));

    Map<String, Long> attemptsPerUsername = pbfs.stream().filter(pbf -> !Strings.isNullOrEmpty(pbf.getWinner()))
            .filter(pbf -> !pbf.isActive()).flatMap(pbf -> pbf.getPlayers().stream())
            .map(Playerhand::getUsername).collect(Collectors.groupingBy(e -> e, Collectors.counting()));

    List<Player> allPlayers = playerCollection.find().toArray();
    List<WinnerDTO> filteredPlayers = allPlayers.stream()
            .filter(p -> !multimap.containsKey(p.getUsername()) && p.getUsername() != null).map(p -> {
                long attempts = attemptsPerUsername.get(p.getUsername()) == null ? 0L
                        : attemptsPerUsername.get(p.getUsername());
                WinnerDTO winner = new WinnerDTO(p.getUsername(), 0, attempts);
                return winner;
            }).collect(toList());/*from  w w w  .  j av a 2s .co  m*/

    List<WinnerDTO> winners = multimap.keySet().stream()
            .map(user -> new WinnerDTO(user, multimap.get(user).size(), attemptsPerUsername.get(user)))
            .collect(toList());

    winners.addAll(filteredPlayers);
    Collections.sort(winners);
    return winners;
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

/**
 * Replicates the request to all nodes in the given set of node identifiers
 *
 * @param nodeIds             the NodeIdentifiers that identify which nodes to send the request to
 * @param method              the HTTP method to use
 * @param uri                 the URI to send the request to
 * @param entity              the entity to use
 * @param headers             the HTTP Headers
 * @param performVerification whether or not to verify that all nodes in the cluster are connected and that all nodes can perform request. Ignored if request is not mutable.
 * @param response            the response to update with the results
 * @param executionPhase      <code>true</code> if this is the execution phase, <code>false</code> otherwise
 * @param monitor             a monitor that will be notified when the request completes (successfully or otherwise)
 * @return an AsyncClusterResponse that can be used to obtain the result
 *//*  w  w  w  . java  2 s .  c  o  m*/
AsyncClusterResponse replicate(final Set<NodeIdentifier> nodeIds, final String method, final URI uri,
        final Object entity, final Map<String, String> headers, final boolean performVerification,
        StandardAsyncClusterResponse response, final boolean executionPhase, final boolean merge,
        final Object monitor) {
    try {
        // state validation
        Objects.requireNonNull(nodeIds);
        Objects.requireNonNull(method);
        Objects.requireNonNull(uri);
        Objects.requireNonNull(entity);
        Objects.requireNonNull(headers);

        if (nodeIds.isEmpty()) {
            throw new IllegalArgumentException("Cannot replicate request to 0 nodes");
        }

        // verify all of the nodes exist and are in the proper state
        for (final NodeIdentifier nodeId : nodeIds) {
            final NodeConnectionStatus status = clusterCoordinator.getConnectionStatus(nodeId);
            if (status == null) {
                throw new UnknownNodeException("Node " + nodeId + " does not exist in this cluster");
            }

            if (status.getState() != NodeConnectionState.CONNECTED) {
                throw new IllegalClusterStateException(
                        "Cannot replicate request to Node " + nodeId + " because the node is not connected");
            }
        }

        logger.debug("Replicating request {} {} with entity {} to {}; response is {}", method, uri, entity,
                nodeIds, response);

        // Update headers to indicate the current revision so that we can
        // prevent multiple users changing the flow at the same time
        final Map<String, String> updatedHeaders = new HashMap<>(headers);
        final String requestId = updatedHeaders.computeIfAbsent(REQUEST_TRANSACTION_ID_HEADER,
                key -> UUID.randomUUID().toString());

        long verifyClusterStateNanos = -1;
        if (performVerification) {
            final long start = System.nanoTime();
            verifyClusterState(method, uri.getPath());
            verifyClusterStateNanos = System.nanoTime() - start;
        }

        int numRequests = responseMap.size();
        if (numRequests >= maxConcurrentRequests) {
            numRequests = purgeExpiredRequests();
        }

        if (numRequests >= maxConcurrentRequests) {
            final Map<String, Long> countsByUri = responseMap.values().stream().collect(
                    Collectors.groupingBy(StandardAsyncClusterResponse::getURIPath, Collectors.counting()));

            logger.error(
                    "Cannot replicate request {} {} because there are {} outstanding HTTP Requests already. Request Counts Per URI = {}",
                    method, uri.getPath(), numRequests, countsByUri);
            throw new IllegalStateException("There are too many outstanding HTTP requests with a total "
                    + numRequests + " outstanding requests");
        }

        // create a response object if one was not already passed to us
        if (response == null) {
            // create the request objects and replicate to all nodes.
            // When the request has completed, we need to ensure that we notify the monitor, if there is one.
            final CompletionCallback completionCallback = clusterResponse -> {
                try {
                    onCompletedResponse(requestId);
                } finally {
                    if (monitor != null) {
                        synchronized (monitor) {
                            monitor.notify();
                        }

                        logger.debug("Notified monitor {} because request {} {} has completed", monitor, method,
                                uri);
                    }
                }
            };

            final Runnable responseConsumedCallback = () -> onResponseConsumed(requestId);

            response = new StandardAsyncClusterResponse(requestId, uri, method, nodeIds, responseMapper,
                    completionCallback, responseConsumedCallback, merge);
            responseMap.put(requestId, response);
        }

        if (verifyClusterStateNanos > -1) {
            response.addTiming("Verify Cluster State", "All Nodes", verifyClusterStateNanos);
        }

        logger.debug("For Request ID {}, response object is {}", requestId, response);

        // if mutable request, we have to do a two-phase commit where we ask each node to verify
        // that the request can take place and then, if all nodes agree that it can, we can actually
        // issue the request. This is all handled by calling performVerification, which will replicate
        // the 'vote' request to all nodes and then if successful will call back into this method to
        // replicate the actual request.
        final boolean mutableRequest = isMutableRequest(method, uri.getPath());
        if (mutableRequest && performVerification) {
            logger.debug("Performing verification (first phase of two-phase commit) for Request ID {}",
                    requestId);
            performVerification(nodeIds, method, uri, entity, updatedHeaders, response, merge, monitor);
            return response;
        } else if (mutableRequest) {
            response.setPhase(StandardAsyncClusterResponse.COMMIT_PHASE);
        }

        // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work
        final StandardAsyncClusterResponse finalResponse = response;
        NodeRequestCompletionCallback nodeCompletionCallback = nodeResponse -> {
            logger.debug("Received response from {} for {} {}", nodeResponse.getNodeId(), method,
                    uri.getPath());
            finalResponse.add(nodeResponse);
        };

        // instruct the node to actually perform the underlying action
        if (mutableRequest && executionPhase) {
            updatedHeaders.put(REQUEST_EXECUTION_HTTP_HEADER, "true");
        }

        // replicate the request to all nodes
        final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId,
                method, createURI(uri, nodeId), entity, updatedHeaders, nodeCompletionCallback, finalResponse);
        submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, updatedHeaders);

        return response;
    } catch (final Throwable t) {
        if (monitor != null) {
            synchronized (monitor) {
                monitor.notify();
            }
            logger.debug("Notified monitor {} because request {} {} has failed with Throwable {}", monitor,
                    method, uri, t);
        }

        if (response != null) {
            final RuntimeException failure = (t instanceof RuntimeException) ? (RuntimeException) t
                    : new RuntimeException("Failed to submit Replication Request to background thread", t);
            response.setFailure(failure, new NodeIdentifier());
        }

        throw t;
    }
}

From source file:org.efaps.esjp.accounting.Period_Base.java

/**
 * Gets the label definition./*from   w w  w .  j ava 2  s.  c o  m*/
 *
 * @param _parameter Parameter as passed by the eFaps API
 * @return the label definition
 * @throws EFapsException on error
 */
@SuppressWarnings("unchecked")
public Return getTargetDocInfo4PaymentFieldValue(final Parameter _parameter) throws EFapsException {
    final Return ret = new Return();
    final String key = Period.class.getName() + ".RequestKey4TargetDocInfo4PaymentFieldValue";
    final Map<Instance, String> values;
    if (Context.getThreadContext().containsRequestAttribute(key)) {
        values = (Map<Instance, String>) Context.getThreadContext().getRequestAttribute(key);
    } else {
        values = new HashMap<>();
        Context.getThreadContext().setRequestAttribute(key, values);
        final List<Instance> instances = (List<Instance>) _parameter.get(ParameterValues.REQUEST_INSTANCES);

        final MultiPrintQuery print = new MultiPrintQuery(instances);
        final SelectBuilder selTargetInsts = SelectBuilder.get().linkfrom(CISales.Payment.TargetDocument)
                .linkto(CISales.Payment.CreateDocument).instance();
        print.addSelect(selTargetInsts);
        print.execute();
        while (print.next()) {
            final List<String> labels = new ArrayList<>();
            final Object obj = print.getSelect(selTargetInsts);
            if (obj != null) {
                final List<Instance> targetInsts;
                if (obj instanceof Instance) {
                    targetInsts = new ArrayList<>();
                    targetInsts.add((Instance) obj);
                } else {
                    targetInsts = (List<Instance>) obj;
                }
                for (final Instance targetInst : targetInsts) {
                    final SelectBuilder selActName;
                    if (InstanceUtils.isType(targetInst, CISales.PaymentOrder)) {
                        selActName = SelectBuilder.get()
                                .linkfrom(CISales.ActionDefinitionPaymentOrder2Document.ToLinkAbstract)
                                .linkto(CISales.ActionDefinitionPaymentOrder2Document.FromLinkAbstract)
                                .attribute(CISales.ActionDefinitionPaymentOrder.Name);

                    } else if (InstanceUtils.isType(targetInst, CISales.CollectionOrder)) {
                        selActName = SelectBuilder.get()
                                .linkfrom(CISales.ActionDefinitionCollectionOrder2Document.ToLinkAbstract)
                                .linkto(CISales.ActionDefinitionCollectionOrder2Document.FromLinkAbstract)
                                .attribute(CISales.ActionDefinitionCollectionOrder.Name);

                    } else if (InstanceUtils.isType(targetInst, CISales.IncomingExchange)) {
                        selActName = SelectBuilder.get()
                                .linkfrom(CISales.ActionDefinitionIncomingExchange2Document.ToLinkAbstract)
                                .linkto(CISales.ActionDefinitionIncomingExchange2Document.FromLinkAbstract)
                                .attribute(CISales.ActionDefinitionIncomingExchange.Name);

                    } else {
                        selActName = null;
                    }

                    if (selActName != null) {
                        final PrintQuery print2 = new PrintQuery(targetInst);
                        print2.addSelect(selActName);
                        print2.execute();

                        final String actname = print2.getSelect(selActName);
                        if (actname == null) {
                            labels.add(targetInst.getType().getLabel());
                        } else {
                            labels.add(targetInst.getType().getLabel() + " - " + actname);
                        }
                    } else {
                        labels.add(targetInst.getType().getLabel());
                    }
                }
                final Map<String, Long> map = labels.stream()
                        .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
                final StringBuilder bldr = new StringBuilder();
                for (final Entry<String, Long> entry : map.entrySet()) {
                    if (bldr.length() > 0) {
                        bldr.append(", ");
                    }
                    bldr.append(entry.getValue()).append(" x ").append(entry.getKey());
                }
                values.put(print.getCurrentInstance(), bldr.toString());
            }
        }
    }
    ret.put(ReturnValues.VALUES, values.get(_parameter.getInstance()));
    return ret;
}

From source file:org.onosproject.faultmanagement.impl.AlarmManager.java

@Override
public Map<Alarm.SeverityLevel, Long> getAlarmCounts(DeviceId deviceId) {
    return getAlarms(deviceId).stream().collect(Collectors.groupingBy(Alarm::severity, Collectors.counting()));
}

From source file:org.onosproject.faultmanagement.impl.AlarmManager.java

@Override
public Map<Alarm.SeverityLevel, Long> getAlarmCounts() {
    return getAlarms().stream().collect(Collectors.groupingBy(Alarm::severity, Collectors.counting()));
}

From source file:org.onosproject.faultmanagement.impl.AlarmsManager.java

@Override
public Map<Alarm.SeverityLevel, Long> getAlarmCounts(DeviceId deviceId) {

    return getAlarms(deviceId).stream().collect(Collectors.groupingBy(Alarm::severity, Collectors.counting()));

}