Example usage for java.util Map compute

List of usage examples for java.util Map compute

Introduction

In this page you can find the example usage for java.util Map compute.

Prototype

default V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) 

Source Link

Document

Attempts to compute a mapping for the specified key and its current mapped value (or null if there is no current mapping).

Usage

From source file:pt.souplesse.spark.Server.java

public static void main(String[] args) {
    EntityManagerFactory factory = Persistence.createEntityManagerFactory("guestbook");
    EntityManager manager = factory.createEntityManager();
    JinqJPAStreamProvider streams = new JinqJPAStreamProvider(factory);
    get("/messages", (req, rsp) -> {
        rsp.type("application/json");
        return gson.toJson(streams.streamAll(manager, Message.class).collect(Collectors.toList()));
    });//from w w  w  .j  a v  a2 s  .  c o m
    post("/messages", (req, rsp) -> {
        try {
            Message msg = gson.fromJson(req.body(), Message.class);
            if (StringUtils.isBlank(msg.getMessage()) || StringUtils.isBlank(msg.getName())) {
                halt(400);
            }
            manager.getTransaction().begin();
            manager.persist(msg);
            manager.getTransaction().commit();
        } catch (JsonSyntaxException e) {
            halt(400);
        }
        rsp.type("application/json");
        return gson.toJson(streams.streamAll(manager, Message.class).collect(Collectors.toList()));
    });
    get("/comments", (req, rsp) -> {
        rsp.type("application/json");
        Map<String, List<Body>> body = new HashMap<>();
        try (CloseableHttpClient client = create().build()) {
            String url = String.format("https://api.github.com/repos/%s/events", req.queryMap("repo").value());
            log.info(url);
            body = client.execute(new HttpGet(url), r -> {
                List<Map<String, Object>> list = gson.fromJson(EntityUtils.toString(r.getEntity()), List.class);
                Map<String, List<Body>> result = new HashMap<>();
                list.stream().filter(m -> m.getOrDefault("type", "").equals("IssueCommentEvent"))
                        .map(m -> new Body(((Map<String, String>) m.get("actor")).get("login"),
                                ((Map<String, Map<String, String>>) m.get("payload")).get("comment")
                                        .get("body")))
                        .forEach(b -> result.compute(b.getLogin(), (k, v) -> v == null ? new ArrayList<>()
                                : Lists.asList(b, v.toArray(new Body[v.size()]))));
                return result;
            });
        } catch (IOException e) {
            log.error(null, e);
            halt(400, e.getMessage());
        }
        return gson.toJson(body);
    });
}

From source file:org.jhk.pulsing.web.service.prod.helper.PulseServiceUtil.java

public static Map<Long, String> processTrendingPulseSubscribe(Set<String> tps, ObjectMapper objMapper) {

    @SuppressWarnings("unchecked")
    Map<Long, String> tpSubscriptions = Collections.EMPTY_MAP;
    final Map<String, Integer> count = new HashMap<>();

    tps.parallelStream().forEach(tpsIdValueCounts -> {

        try {/*from  w w w . j  av  a2 s .  co  m*/
            _LOGGER.debug(
                    "PulseServiceUtil.processTrendingPulseSubscribe: trying to convert " + tpsIdValueCounts);

            Map<String, Integer> converted = objMapper.readValue(tpsIdValueCounts,
                    _TRENDING_PULSE_SUBSCRIPTION_TYPE_REF);

            _LOGGER.debug("PulseServiceUtil.processTrendingPulseSubscribe: sucessfully converted "
                    + converted.size());

            //Structure is <id>0x07<value>0x13<timestamp> -> count; i.e. {"10020x07Mocked 10020x13<timestamp>" -> 1}
            //Need to split the String content, gather the count for the searched interval
            //and return the sorted using Java8 stream
            //TODO impl better

            Map<String, Integer> computed = converted.entrySet().stream().reduce(new HashMap<String, Integer>(),
                    (Map<String, Integer> mapped, Entry<String, Integer> entry) -> {
                        String[] split = entry.getKey()
                                .split(CommonConstants.TIME_INTERVAL_PERSIST_TIMESTAMP_DELIM);
                        Integer value = entry.getValue();

                        mapped.compute(split[0], (key, val) -> {
                            return val == null ? value : val + value;
                        });

                        return mapped;
                    }, (Map<String, Integer> result, Map<String, Integer> aggregated) -> {
                        result.putAll(aggregated);
                        return result;
                    });

            computed.entrySet().parallelStream().forEach(entry -> {
                Integer value = entry.getValue();

                count.compute(entry.getKey(), (key, val) -> {
                    return val == null ? value : val + value;
                });
            });

        } catch (Exception cException) {
            cException.printStackTrace();
        }
    });

    if (count.size() > 0) {
        tpSubscriptions = count.entrySet().stream()
                .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
                .collect(Collectors.toMap(
                        entry -> Long.parseLong(
                                entry.getKey().split(CommonConstants.TIME_INTERVAL_ID_VALUE_DELIM)[0]),
                        entry -> entry.getKey().split(CommonConstants.TIME_INTERVAL_ID_VALUE_DELIM)[1],
                        (x, y) -> {
                            throw new AssertionError();
                        }, LinkedHashMap::new));
    }

    return tpSubscriptions;
}

From source file:com.ikanow.aleph2.logging.utils.LoggingUtils.java

/**
 * Merges the BMB message with an existing old entry (if it exists) otherwise merges with null.  If 
 * an entry did not exist, creates new default Map in the tuple for storing merge info. 
 * @param basicMessageBean//from w  w w.ja v  a2  s .c  om
 * @param merge_key
 * @return
 */
public static Tuple2<BasicMessageBean, Map<String, Object>> getOrCreateMergeInfo(
        final Map<String, Tuple2<BasicMessageBean, Map<String, Object>>> merge_logs,
        final BasicMessageBean message, final String merge_key,
        final BiFunction<BasicMessageBean, BasicMessageBean, BasicMessageBean>[] merge_operations) {
    return merge_logs.compute(merge_key, (k, v) -> {
        if (v == null) {
            //final BasicMessageBean bmb = merge_operations.apply(message, null);
            final BasicMessageBean bmb = Arrays.stream(merge_operations).reduce(null,
                    (bmb_a, fn) -> fn.apply(message, bmb_a), (bmb_a, bmb_b) -> bmb_a);
            Map<String, Object> info = new HashMap<String, Object>();
            info.put(LOG_COUNT_FIELD, 0L);
            info.put(LAST_LOG_TIMESTAMP_FIELD, 0L);
            return new Tuple2<BasicMessageBean, Map<String, Object>>(bmb, info);
        } else {
            //merge with old entry
            //final BasicMessageBean bmb = merge_operations.apply(message, merge_logs.get(merge_key)._1);
            final BasicMessageBean bmb = Arrays.stream(merge_operations).reduce(merge_logs.get(merge_key)._1,
                    (bmb_a, fn) -> fn.apply(message, bmb_a), (bmb_a, bmb_b) -> bmb_a);
            return new Tuple2<BasicMessageBean, Map<String, Object>>(bmb, v._2);
        }
    });
}

From source file:com.netflix.genie.agent.execution.statemachine.actions.SetUpJobAction.java

private Map<String, String> createJobEnvironmentMap(final File jobEnvironmentFile) throws SetUpJobException {

    final Map<String, String> env;
    try {//from   w w w. j a v  a  2 s  .co  m
        env = EnvUtils.parseEnvFile(jobEnvironmentFile);
    } catch (final IOException | EnvUtils.ParseException e) {
        throw new SetUpJobException(
                "Failed to parse environment from file: " + jobEnvironmentFile.getAbsolutePath(), e);
    }

    // Variables in environment file are base64 encoded to avoid escaping, quoting.
    // Decode all values.
    env.keySet().forEach(
            key -> env.compute(key, (k, v) -> new String(Base64.decodeBase64(v), StandardCharsets.UTF_8)));

    return Collections.unmodifiableMap(env);
}

From source file:com.act.lcms.db.io.report.IonAnalysisInterchangeModel.java

/**
 * This function is used to compute log frequency distribution of the ion model vs a metric.
 * @param metric The metric on which the frequency distribution is plotted
 * @return A map of a range to the count of molecules that get bucketed in that range
 *///from  w  w w .j a va2  s  .c  o  m
public Map<Pair<Double, Double>, Integer> computeLogFrequencyDistributionOfMoleculeCountToMetric(
        METRIC metric) {
    Map<Pair<Double, Double>, Integer> rangeToHitCount = new HashMap<>();

    // This variable represents the total number of statistics that have zero values.
    Integer countOfZeroStats = 0;

    // This statistic represents the log value of the min statistic.
    Double minLogValue = Double.MAX_VALUE;

    for (ResultForMZ resultForMZ : this.getResults()) {
        for (HitOrMiss molecule : resultForMZ.getMolecules()) {

            Double power = 0.0;

            switch (metric) {
            case TIME:
                power = Math.log10(molecule.getTime());
                break;
            case INTENSITY:
                power = Math.log10(molecule.getIntensity());
                break;
            case SNR:
                power = Math.log10(molecule.getSnr());
                break;
            }

            if (power.equals(Double.NEGATIVE_INFINITY)) {
                // We know the statistic was 0 here.
                countOfZeroStats++;
                break;
            }

            Double floor = Math.floor(power);
            Double lowerBound = Math.pow(10.0, floor);
            Double upperBound = Math.pow(10.0, floor + 1);

            minLogValue = Math.min(minLogValue, lowerBound);
            Pair<Double, Double> key = Pair.of(lowerBound, upperBound);
            rangeToHitCount.compute(key, (k, v) -> (v == null) ? 1 : v + 1);
        }

        // We count the total number of zero statistics and put them in the 0 to minLog metric bucket.
        if (countOfZeroStats > 0) {
            Pair<Double, Double> key = Pair.of(0.0, minLogValue);
            rangeToHitCount.put(key, countOfZeroStats);
        }
    }

    return rangeToHitCount;
}

From source file:org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.java

/**
 * It computes least number of namespace owned by any of the domain and then it filters out all the domains that own
 * namespaces more than this count./*from   w  w  w  .ja  va 2s  . c  o  m*/
 *
 * @param brokerToAntiAffinityNamespaceCount
 * @param candidates
 * @param brokerToDomainMap
 */
private static void filterDomainsNotHavingLeastNumberAntiAffinityNamespaces(
        Map<String, Integer> brokerToAntiAffinityNamespaceCount, Set<String> candidates,
        Map<String, String> brokerToDomainMap) {

    if (brokerToDomainMap == null || brokerToDomainMap.isEmpty()) {
        return;
    }

    final Map<String, Integer> domainNamespaceCount = Maps.newHashMap();
    int leastNamespaceCount = Integer.MAX_VALUE;
    candidates.forEach(broker -> {
        final String domain = brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN);
        final int count = brokerToAntiAffinityNamespaceCount.getOrDefault(broker, 0);
        domainNamespaceCount.compute(domain,
                (domainName, nsCount) -> nsCount == null ? count : nsCount + count);
    });
    // find leastNameSpaceCount
    for (Entry<String, Integer> domainNsCountEntry : domainNamespaceCount.entrySet()) {
        if (domainNsCountEntry.getValue() < leastNamespaceCount) {
            leastNamespaceCount = domainNsCountEntry.getValue();
        }
    }
    final int finalLeastNamespaceCount = leastNamespaceCount;
    // only keep domain brokers which has leastNamespaceCount
    candidates.removeIf(broker -> {
        Integer nsCount = domainNamespaceCount.get(brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN));
        return nsCount != null && nsCount != finalLeastNamespaceCount;
    });
}

From source file:org.apache.pulsar.broker.loadbalance.impl.LoadManagerShared.java

/**
 * It returns map of broker and count of namespace that are belong to the same anti-affinity group as given
 * {@param namespaceName}/*w w w. j  ava2s .  c  om*/
 *
 * @param pulsar
 * @param namespaceName
 * @param brokerToNamespaceToBundleRange
 * @return
 */
public static CompletableFuture<Map<String, Integer>> getAntiAffinityNamespaceOwnedBrokers(
        final PulsarService pulsar, String namespaceName,
        Map<String, Map<String, Set<String>>> brokerToNamespaceToBundleRange) {

    CompletableFuture<Map<String, Integer>> antiAffinityNsBrokersResult = new CompletableFuture<>();
    ZooKeeperDataCache<Policies> policiesCache = pulsar.getConfigurationCache().policiesCache();

    policiesCache.getAsync(path(POLICIES, namespaceName)).thenAccept(policies -> {
        if (!policies.isPresent() || StringUtils.isBlank(policies.get().antiAffinityGroup)) {
            antiAffinityNsBrokersResult.complete(null);
            return;
        }
        final String antiAffinityGroup = policies.get().antiAffinityGroup;
        final Map<String, Integer> brokerToAntiAffinityNamespaceCount = new ConcurrentHashMap<>();
        final List<CompletableFuture<Void>> futures = Lists.newArrayList();
        brokerToNamespaceToBundleRange.forEach((broker, nsToBundleRange) -> {
            nsToBundleRange.forEach((ns, bundleRange) -> {
                CompletableFuture<Void> future = new CompletableFuture<>();
                futures.add(future);
                policiesCache.getAsync(path(POLICIES, ns)).thenAccept(nsPolicies -> {
                    if (nsPolicies.isPresent()
                            && antiAffinityGroup.equalsIgnoreCase(nsPolicies.get().antiAffinityGroup)) {
                        brokerToAntiAffinityNamespaceCount.compute(broker,
                                (brokerName, count) -> count == null ? 1 : count + 1);
                    }
                    future.complete(null);
                }).exceptionally(ex -> {
                    future.complete(null);
                    return null;
                });
            });
        });
        FutureUtil.waitForAll(futures)
                .thenAccept(r -> antiAffinityNsBrokersResult.complete(brokerToAntiAffinityNamespaceCount));
    }).exceptionally(ex -> {
        // namespace-policies has not been created yet
        antiAffinityNsBrokersResult.complete(null);
        return null;
    });
    return antiAffinityNsBrokersResult;
}

From source file:org.briljantframework.data.dataframe.DataFrames.java

public static DataFrame table(Vector a, Vector b) {
    Check.dimension(a.size(), b.size());
    Map<Object, Map<Object, Integer>> counts = new HashMap<>();
    Set<Object> aUnique = new HashSet<>();
    Set<Object> bUnique = new HashSet<>();
    for (int i = 0; i < a.size(); i++) {
        Object va = a.loc().get(i);
        Object vb = b.loc().get(i);
        Map<Object, Integer> countVb = counts.get(va);
        if (countVb == null) {
            countVb = new HashMap<>();
            counts.put(va, countVb);//from w  w w  .j  av a  2s  .co  m
        }
        countVb.compute(vb, (key, value) -> value == null ? 1 : value + 1);
        aUnique.add(va);
        bUnique.add(vb);
    }

    DataFrame.Builder df = DataFrame.builder();
    for (Object i : aUnique) {
        Map<Object, Integer> row = counts.get(i);
        if (row == null) {
            for (Object j : bUnique) {
                df.set(i, j, 0);
            }
        } else {
            for (Object j : bUnique) {
                df.set(i, j, row.getOrDefault(j, 0));
            }
        }
    }
    return df.build();
}

From source file:org.briljantframework.data.vector.Vectors.java

/**
 * <p>/*from w ww  .j av  a2 s . co m*/
 * Counts the number of occurrences for each value (of type {@code T}) in {@code vector}
 *
 * <p>
 * Since {@link Vector#get(Class, Object)} returns {@code NA} if value is not an instance of
 * {@code T}, the resulting {@code Map} might contain a {@code null} key
 *
 * @param cls the class
 * @param vector the vector
 * @param <T> the type
 * @return a map of values to counts
 */
public static <T> Map<T, Integer> count(Class<T> cls, Vector vector) {
    Map<T, Integer> count = new HashMap<>();
    for (T value : vector.toList(cls)) {
        count.compute(value, (x, v) -> v == null ? 1 : v + 1);
    }
    return Collections.unmodifiableMap(count);
}

From source file:org.briljantframework.data.vector.Vectors.java

/**
 * <p>//from  w w w  .j av  a  2 s.  c o  m
 * Counts the number of occurrences for each value (wrapping the in a {@link Object}) in
 * {@code vector}
 *
 * @param vector the vector
 * @return a map of values to counts
 */
public static Map<Object, Integer> count(Vector vector) {
    Map<Object, Integer> freq = new HashMap<>();
    for (Object value : vector.toList(Object.class)) {
        freq.compute(value, (x, i) -> i == null ? 1 : i + 1);
    }
    return Collections.unmodifiableMap(freq);
}