Example usage for java.util.stream Collectors toMap

List of usage examples for java.util.stream Collectors toMap

Introduction

In this page you can find the example usage for java.util.stream Collectors toMap.

Prototype

public static <T, K, U, M extends Map<K, U>> Collector<T, ?, M> toMap(
        Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends U> valueMapper,
        BinaryOperator<U> mergeFunction, Supplier<M> mapFactory) 

Source Link

Document

Returns a Collector that accumulates elements into a Map whose keys and values are the result of applying the provided mapping functions to the input elements.

Usage

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Boolean> optimizeQuery(final List<String> ordered_field_list) {

    // Mongo appears to have an ~100 char list on the query, Fongo does not, so add a mannual check
    // so we don't get the situation where the tests work but it fails operationally

    String approx_index_name = ordered_field_list.stream().collect(Collectors.joining("."));
    if (approx_index_name.length() > 100) {
        throw new MongoException(ErrorUtils.get(ErrorUtils.MONGODB_INDEX_TOO_LONG, approx_index_name));
    }//from ww  w . jav  a 2  s  .co m

    return CompletableFuture.supplyAsync(() -> {
        final BasicDBObject index_keys = new BasicDBObject(ordered_field_list.stream()
                .collect(Collectors.toMap(f -> f, f -> 1, (v1, v2) -> 1, LinkedHashMap::new)));

        _state.orig_coll.createIndex(index_keys, new BasicDBObject("background", true));

        return true;
    });
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public final boolean deregisterOptimizedQuery(final List<String> ordered_field_list) {
    try {/*from w  ww  .  j  ava 2  s  . c o m*/
        final BasicDBObject index_keys = new BasicDBObject(ordered_field_list.stream()
                .collect(Collectors.toMap(f -> f, f -> 1, (v1, v2) -> 1, LinkedHashMap::new)));

        if (_state.orig_coll instanceof FongoDBCollection) { // (doesn't do the exception, so check by hand)
            final String index_keys_str = index_keys.toString();

            final List<DBObject> matching_indexes = _state.orig_coll.getIndexInfo().stream()
                    .filter(dbo -> index_keys_str.equals(dbo.get("key").toString()))
                    .collect(Collectors.toList());
            if (matching_indexes.isEmpty()) {
                throw new MongoException(ErrorUtils.get(ErrorUtils.MISSING_MONGODB_INDEX_KEY, index_keys_str));
            }
        }
        _state.orig_coll.dropIndex(index_keys);

        return true;
    } catch (MongoException ex) {
        return false;
    }
}

From source file:com.vmware.admiral.request.compute.ComputeReservationTaskService.java

private void hostsSelected(ComputeReservationTaskState state, List<String> tenantLinks) {
    if (state.selectedComputePlacementHosts == null || state.selectedComputePlacementHosts.isEmpty()) {
        if (tenantLinks != null && !tenantLinks.isEmpty()) {
            proceedTo(SubStage.QUERYING_GLOBAL);
        } else {//from  w w w .  j a  va 2s.c  om
            failTask("Available compute host can't be selected.", null);
        }
        return;
    }

    final Set<String> resourcePools = new HashSet<>();
    state.selectedComputePlacementHosts.forEach(hs -> resourcePools.addAll(hs.resourcePoolLinks));

    if (state.resourcePoolsPerGroupPlacementLinks != null) {
        state.resourcePoolsPerGroupPlacementLinks = state.resourcePoolsPerGroupPlacementLinks.entrySet()
                .stream().filter((e) -> resourcePools.contains(e.getValue())).collect(Collectors
                        .toMap(Map.Entry::getKey, Map.Entry::getValue, (k1, k2) -> k1, LinkedHashMap::new));
    } else {
        state.resourcePoolsPerGroupPlacementLinks = new LinkedHashMap<>();
    }

    selectReservation(state, state.resourcePoolsPerGroupPlacementLinks);
}

From source file:aiai.ai.launchpad.experiment.ExperimentService.java

private static Map<String, String> toMap(List<ExperimentHyperParams> experimentHyperParams) {
    return experimentHyperParams.stream().collect(Collectors.toMap(ExperimentHyperParams::getKey,
            ExperimentHyperParams::getValues, (a, b) -> b, HashMap::new));
}

From source file:com.blacklocus.metrics.CloudWatchReporter.java

private <T> SortedMap<String, T> mapNames(SortedMap<String, T> original) {
    return original.entrySet().stream()
            .collect(Collectors.toMap(e -> nameTransformer.apply(e.getKey()), Map.Entry::getValue, (k, v) -> {
                throw new RuntimeException(String.format("Duplicate key %s", k));
            }, TreeMap::new));
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public void initializeNewContext(final String signature) {
    _mutable_serializable_signature = signature;

    // Register myself a shutdown hook:
    Runtime.getRuntime().addShutdownHook(new Thread(Lambdas.wrap_runnable_u(() -> {
        if (_mutable_state.has_unflushed_data) {
            this.flushBatchOutput(Optional.empty(), _mutable_state.job.get()).get(60, TimeUnit.SECONDS);
        }/* w w w  .ja va 2 s. co m*/
    })));

    try {
        // Inject dependencies
        final Config parsed_config = ConfigFactory.parseString(signature);
        final AnalyticsContext to_clone = static_instances.get(signature);

        if (null != to_clone) { //copy the fields            
            _service_context = to_clone._service_context;
            _core_management_db = to_clone._core_management_db;
            _security_service = to_clone._security_service;
            _logging_service = to_clone._logging_service;
            _distributed_services = to_clone._distributed_services;
            _storage_service = to_clone._storage_service;
            _globals = to_clone._globals;
            // (apart from bucket, which is handled below, rest of mutable state is not needed)
        } else {
            ModuleUtils.initializeApplication(Collections.emptyList(), Optional.of(parsed_config),
                    Either.right(this));

            _core_management_db = _service_context.getCoreManagementDbService(); // (actually returns the _core_ management db service)
            _distributed_services = _service_context
                    .getService(ICoreDistributedServices.class, Optional.empty()).get();
            _storage_service = _service_context.getStorageService();
            _security_service = _service_context.getSecurityService();
            _logging_service = _service_context.getService(ILoggingService.class, Optional.empty()).get();
            _globals = _service_context.getGlobalProperties();
        }
        // Get bucket 

        final BeanTemplate<DataBucketBean> retrieve_bucket = BeanTemplateUtils
                .from(parsed_config.getString(__MY_BUCKET_ID), DataBucketBean.class);
        this.setBucket(retrieve_bucket.get()); //(also checks on dedup setting)
        final BeanTemplate<SharedLibraryBean> retrieve_library = BeanTemplateUtils
                .from(parsed_config.getString(__MY_TECH_LIBRARY_ID), SharedLibraryBean.class);
        _mutable_state.technology_config.set(retrieve_library.get());
        if (parsed_config.hasPath(__MY_MODULE_LIBRARY_ID)) {
            final BeanTemplate<LibraryContainerBean> retrieve_module = BeanTemplateUtils
                    .from(parsed_config.getString(__MY_MODULE_LIBRARY_ID), LibraryContainerBean.class);
            _mutable_state.library_configs.set(Optional.ofNullable(retrieve_module.get().libs)
                    .orElse(Collections.emptyList()).stream()
                    // (split each lib bean into 2 tuples, ie indexed by _id and path_name)
                    .flatMap(mod -> Arrays.asList(Tuples._2T(mod._id(), mod), Tuples._2T(mod.path_name(), mod))
                            .stream())
                    .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2(), (t1, t2) -> t1 // (can't happen, ignore if it does)
                            , () -> new LinkedHashMap<String, SharedLibraryBean>())));
        }
        if (parsed_config.hasPath(__MY_JOB_ID)) {
            final String job_name = parsed_config.getString(__MY_JOB_ID);

            Optionals.of(() -> retrieve_bucket.get().analytic_thread().jobs()).orElse(Collections.emptyList())
                    .stream().filter(job -> job_name.equals(job.name())).findFirst()
                    .ifPresent(job -> _mutable_state.job.trySet(job));

            getJob().ifPresent(job -> setupOutputs(_mutable_state.bucket.get(), job));
        }
        static_instances.put(signature, this);
    } catch (Exception e) {
        //DEBUG
        //System.out.println(ErrorUtils.getLongForm("{0}", e));         

        throw new RuntimeException(e);
    }
}

From source file:com.vmware.admiral.request.compute.ComputeReservationTaskService.java

private void filterPlacementsByRequirements(ComputeReservationTaskState state,
        List<GroupResourcePlacementState> placements, List<String> tenantLinks,
        ComputeDescription computeDesc) {
    if (placements == null) {
        failTask(null, new IllegalStateException("No placements found"));
        return;/*from w  ww  .  java  2 s .c om*/
    }

    // check if requirements are stated in the compute description
    String requirementsString = getProp(computeDesc.customProperties,
            ComputeConstants.CUSTOM_PROP_PROVISIONING_REQUIREMENTS);
    if (requirementsString == null) {
        proceedTo(isGlobal(state) ? SubStage.SELECTED_GLOBAL : SubStage.SELECTED, s -> {
            s.resourcePoolsPerGroupPlacementLinks = placements.stream()
                    .sorted((g1, g2) -> g1.priority - g2.priority)
                    .collect(Collectors.toMap(gp -> gp.documentSelfLink, gp -> gp.resourcePoolLink,
                            (k1, k2) -> k1, LinkedHashMap::new));
        });
        return;
    }

    // parse requirements and retrieve the tag links from the affinity constraints
    @SuppressWarnings("unchecked")
    List<String> affinitiesAsString = Utils.fromJson(requirementsString, List.class);
    Map<AffinityConstraint, String> tagLinkByConstraint = new HashMap<>();
    for (String affinityAsString : affinitiesAsString) {
        AffinityConstraint constraint = AffinityConstraint.fromString(affinityAsString);
        String tagLink = getTagLinkForConstraint(constraint, computeDesc.tenantLinks);
        tagLinkByConstraint.put(constraint, tagLink);
    }

    // retrieve resource pool instances in order to check which ones satisfy the reqs
    Map<String, ResourcePoolState> resourcePoolsByLink = new HashMap<>();
    List<Operation> getOperations = placements.stream().map(gp -> Operation
            .createGet(getHost(), gp.resourcePoolLink).setReferer(getUri()).setCompletion((o, e) -> {
                if (e == null) {
                    resourcePoolsByLink.put(gp.resourcePoolLink, o.getBody(ResourcePoolState.class));
                }
            })).collect(Collectors.toList());
    OperationJoin.create(getOperations).setCompletion((ops, exs) -> {
        if (exs != null) {
            failTask("Error retrieving resource pools: " + Utils.toString(exs), exs.values().iterator().next());
            return;
        }

        // filter out placements that do not satisfy the HARD constraints, and then sort
        // remaining placements by listing first those with more soft constraints satisfied
        // (placement priority being used as a second criteria)
        proceedTo(isGlobal(state) ? SubStage.SELECTED_GLOBAL : SubStage.SELECTED, s -> {
            s.resourcePoolsPerGroupPlacementLinks = placements.stream()
                    .filter(gp -> checkRpSatisfyHardConstraints(resourcePoolsByLink.get(gp.resourcePoolLink),
                            tagLinkByConstraint))
                    .sorted((gp1, gp2) -> {
                        int softCount1 = getNumberOfSatisfiedSoftConstraints(
                                resourcePoolsByLink.get(gp1.resourcePoolLink), tagLinkByConstraint);
                        int softCount2 = getNumberOfSatisfiedSoftConstraints(
                                resourcePoolsByLink.get(gp2.resourcePoolLink), tagLinkByConstraint);
                        return softCount1 != softCount2 ? softCount1 - softCount2 : gp1.priority - gp2.priority;
                    }).collect(Collectors.toMap(gp -> gp.documentSelfLink, gp -> gp.resourcePoolLink,
                            (k1, k2) -> k1, LinkedHashMap::new));
        });
    }).sendWith(getHost());
}