Example usage for java.util.stream Collectors toMap

List of usage examples for java.util.stream Collectors toMap

Introduction

In this page you can find the example usage for java.util.stream Collectors toMap.

Prototype

public static <T, K, U> Collector<T, ?, Map<K, U>> toMap(Function<? super T, ? extends K> keyMapper,
        Function<? super T, ? extends U> valueMapper) 

Source Link

Document

Returns a Collector that accumulates elements into a Map whose keys and values are the result of applying the provided mapping functions to the input elements.

Usage

From source file:act.installer.brenda.BrendaChebiOntology.java

/**
 * This method processes relatioships "is subtype of" to produce a mapping between each application and its main
 * application, used subsequently (outside of this) to compute each ontology's main application.
 * @param isSubtypeOfRelationships map {chebi id -> subtype's chebi ids}
 * @param applicationChebiId main application's chebi id
 * @return a map {application's chebi id -> related main application's chebi ids}
 *//*from w  w w  .  ja  va  2  s  .c o  m*/
public static Map<String, Set<String>> getApplicationToMainApplicationsMap(
        Map<String, Set<String>> isSubtypeOfRelationships, String applicationChebiId) {

    // Compute the set of main applications. These are the ontologies that are subtypes of the ontology 'application'.
    Set<String> mainApplicationsChebiId = isSubtypeOfRelationships.get(applicationChebiId);

    // Compute the initial list of applications to visit from the set of main applications.
    ArrayList<String> applicationsToVisit = new ArrayList<>(mainApplicationsChebiId);

    // For each main application, map it to a set containing only itself.
    Map<String, Set<String>> applicationToMainApplicationsMap = applicationsToVisit.stream()
            .collect(Collectors.toMap(e -> e, Collections::singleton));

    // Then visit all applications in a BFS fashion, appending new applications to visit to the applicationsToVisit
    // and propagating/merging the set of main applications as we progress down the relationship graph.
    int currentIndex = 0;
    while (currentIndex < applicationsToVisit.size()) {

        String currentApplication = applicationsToVisit.get(currentIndex);
        Set<String> subApplications = isSubtypeOfRelationships.get(currentApplication);

        if (subApplications != null) {
            // add all sub-applications to the set of applications to visit
            applicationsToVisit.addAll(subApplications);
            for (String subApplication : subApplications) {
                Set<String> mainApplicationsSet = applicationToMainApplicationsMap.get(subApplication);
                if (mainApplicationsSet == null) {
                    mainApplicationsSet = new HashSet<>();
                    applicationToMainApplicationsMap.put(subApplication, mainApplicationsSet);
                }
                mainApplicationsSet.addAll(applicationToMainApplicationsMap.get(currentApplication));
            }
        }
        currentIndex++;
    }

    return applicationToMainApplicationsMap;
}

From source file:hydrograph.ui.engine.converter.impl.OutputExcelConverter.java

private Map<String, String> getTypeMap(String delimitedString) {

    HashMap<String, String> typemap = (HashMap<String, String>) Arrays
            .asList(StringUtils.split(delimitedString, "\\|")).stream().map(s -> s.split("\\$"))
            .collect(Collectors.toMap(a -> a[0], a -> a.length > 1 ? a[1] : ""));

    return typemap;
}

From source file:com.ikanow.aleph2.example.flume_harvester.services.FlumeHarvesterSink.java

/** Generates a JSON object assuming the event body is a CSV 
 * @param evt/*  w  w  w  . j  ava  2s. c  om*/
 * @param config
 * @return
 */
protected Optional<JsonNode> getCsvEventJson(final Event evt, CsvConfig config) {
    if (!_csv.isSet()) {
        final CsvState csv = new CsvState();
        // Lazy initialization:
        csv.parser = new CSVParser(Optional.ofNullable(config.separator().charAt(0)).orElse(','),
                Optional.ofNullable(config.quote_char().charAt(0)).orElse('"'),
                Optional.ofNullable(config.escape_char().charAt(0)).orElse('\\'));
        csv.headers = new ArrayList<String>(Optionals.ofNullable(config.header_fields()));

        csv.type_map = !config.non_string_types().isEmpty() ? config.non_string_types()
                : config.non_string_type_map().entrySet().stream() // (reverse the order of the map to get fieldname -> type)
                        .<Tuple2<String, String>>flatMap(
                                kv -> kv.getValue().stream().map(v -> Tuples._2T(kv.getKey(), v)))
                        .collect(Collectors.toMap(t2 -> t2._2().toString(), t2 -> t2._1().toString()));

        Optional.ofNullable(config.ignore_regex())
                .ifPresent(regex -> csv.ignore_regex = Pattern.compile(regex));
        _csv.set(csv);
    }
    try {
        final CsvState csv = _csv.get();
        final String line = new String(evt.getBody(), "UTF-8");
        if ((null != csv.ignore_regex) && csv.ignore_regex.matcher(line).matches()) {
            return Optional.empty();
        }
        final String[] fields = csv.parser.parseLine(line);
        final ObjectNode ret_val_pre = StreamUtils.zipWithIndex(Arrays.stream(fields))
                .reduce(_mapper.createObjectNode(), (acc, v) -> {
                    if (v.getIndex() >= csv.headers.size())
                        return acc;
                    else {
                        final String field_name = csv.headers.get((int) v.getIndex());
                        if ((null == field_name) || field_name.isEmpty()) {
                            return acc;
                        } else {
                            try {
                                return addField(acc, field_name, v.getValue(), csv.type_map);
                            } catch (Exception e) {
                                return acc;
                            }
                        }
                    }
                }, (acc1, acc2) -> acc1); // (can't occur in practice)
        ;
        final ObjectNode ret_val = config.append_event_fields().stream().reduce(ret_val_pre, (acc, v) -> {
            final String value = evt.getHeaders().get(v);
            return (null == value) ? acc : addField(acc, v, value, csv.type_map);
        }, (acc1, acc2) -> acc1 // (can't occur in practice)
        );
        return Optional.of(ret_val);
    } catch (Exception e) {
        return Optional.empty();
    }
}

From source file:com.chiralbehaviors.CoRE.phantasm.graphql.schemas.JooqSchema.java

private GraphQLObjectType objectType(Class<?> record, List<PropertyDescriptor> fields,
        PhantasmProcessor processor) {/*from w  w  w.  ja  va  2  s .c o  m*/
    Map<String, Class<?>> references = TABLES.get(record).getReferences().stream().collect(Collectors
            .toMap(fk -> camel(fk.getFields().get(0).getName()), fk -> fk.getTable().getRecordType()));
    GraphQLObjectType.Builder builder = new GraphQLObjectType.Builder();
    builder.name(translated(record));
    fields.forEach(field -> {
        builder.field(f -> references.containsKey(field.getName())
                ? buildReference(f, field, processor, references.get(field.getName()))
                : buildPrimitive(f, field, processor));
    });
    return builder.build();
}

From source file:com.ikanow.aleph2.harvest.logstash.utils.LogstashUtils.java

/**
 * Reads the given output file and outputs it to the logger with the spec'd log level.
 * @param logger/*from  ww  w  . ja v a  2 s  . c  o m*/
 * @param level
 * @param output_file
 * @throws IOException 
 */
public static void sendOutputToLogger(final IBucketLogger logger, final Level level, final File output_file,
        final Optional<Long> max_lines) throws IOException {
    //      _logger.error("Reading output file: " + output_file + " to send to logger at level: " + level);
    Files.lines(output_file.toPath()).limit(max_lines.orElse(10000L)).forEach(line -> {
        try {
            //convert line to valid json, then parse json, build BMB object from it
            final String fixed_line = line.replaceAll(logstash_colon_search, logstash_colon_replace)
                    .replaceAll(logstash_arrow_search, logstash_arrow_replace)
                    .replaceAll(logstash_newline_search, logstash_newline_replace);
            final String plugin_fixed = fixPlugin(fixed_line);
            final ObjectNode line_object = (ObjectNode) _mapper.readTree(plugin_fixed);
            //move specific fields we want into BMB
            final Date date = parseLogstashDate(line_object.remove("timestamp").asText());
            final Level logstash_level = Level.valueOf(line_object.remove("level").asText());
            final String message = line_object.remove("message").asText();
            //move everything else into details map
            logger.inefficientLog(logstash_level,
                    new BasicMessageBean(date, true, LogstashHarvestService.class.getSimpleName(),
                            "test_output", null, message,
                            StreamSupport
                                    .stream(Spliterators.spliteratorUnknownSize(line_object.fields(),
                                            Spliterator.ORDERED), true)
                                    .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().asText()))));
        } catch (Exception ex) {
            //fallback on conversion failure
            logger.inefficientLog(level, ErrorUtils
                    .buildSuccessMessage(LogstashHarvestService.class.getSimpleName(), "test_output", line));
        }
    });
    //TODO should we delete log file after we've read it?
}

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Nonnull
@Override//from www  . ja  v  a  2 s  .  co  m
public Map<String, ObjectNode> getDefinitionMetadataMap(@Nonnull final List<QualifiedName> names) {
    if (!names.isEmpty()) {
        final List<List<QualifiedName>> parts = Lists.partition(names,
                config.getUserMetadataMaxInClauseItems());
        return parts.stream().map(keys -> _getMetadataMap(keys, SQL.GET_DEFINITION_METADATAS))
                .flatMap(it -> it.entrySet().stream()).collect(Collectors
                        .toMap(it -> QualifiedName.fromString(it.getKey()).toString(), Map.Entry::getValue));
    } else {
        return Collections.emptyMap();
    }
}

From source file:fr.paris.lutece.portal.web.xsl.XslExportJspBeanTest.java

public void testDoModifyXslExportNoToken() throws AccessDeniedException {
    MockHttpServletRequest request = new MockHttpServletRequest();
    AdminUser user = new AdminUser();
    user.setRoles(// ww w  .  j a v a  2s .  c  o  m
            AdminRoleHome.findAll().stream().collect(Collectors.toMap(AdminRole::getKey, Function.identity())));
    Utils.registerAdminUserWithRigth(request, user, XslExportJspBean.RIGHT_MANAGE_XSL_EXPORT);
    String randomName = getRandomName();
    Map<String, String[]> parameters = new HashMap<>();
    parameters.put("title", new String[] { randomName });
    parameters.put("description", new String[] { randomName });
    parameters.put("extension", new String[] { randomName });
    parameters.put("id_xsl_export", new String[] { Integer.toString(_xslExport.getIdXslExport()) });
    Map<String, List<FileItem>> multipartFiles = new HashMap<>();

    _instance.init(request, XslExportJspBean.RIGHT_MANAGE_XSL_EXPORT);

    try {
        _instance.doModifyXslExport(new MultipartHttpServletRequest(request, multipartFiles, parameters));
        fail("Should have thrown");
    } catch (AccessDeniedException e) {
        XslExport stored = XslExportHome.findByPrimaryKey(_xslExport.getIdXslExport());
        assertNotNull(stored);
        assertEquals(_xslExport.getTitle(), stored.getTitle());
        assertEquals(_xslExport.getDescription(), stored.getDescription());
        assertEquals(_xslExport.getExtension(), stored.getExtension());
    }
}

From source file:alfio.manager.system.ConfigurationManager.java

public Map<ConfigurationKeys.SettingCategory, List<Configuration>> loadOrganizationConfig(int organizationId,
        String username) {//from   w  w w .j a  va  2  s  .c om
    User user = userManager.findUserByUsername(username);
    if (!userManager.isOwnerOfOrganization(user, organizationId)) {
        return Collections.emptyMap();
    }
    boolean isAdmin = userManager.isAdmin(user);
    Map<ConfigurationKeys.SettingCategory, List<Configuration>> existing = configurationRepository
            .findOrganizationConfiguration(organizationId).stream()
            .filter(checkActualConfigurationLevel(isAdmin, ORGANIZATION)).sorted().collect(groupByCategory());
    String paymentMethodsBlacklist = getStringConfigValue(
            Configuration.from(organizationId, ConfigurationKeys.PAYMENT_METHODS_BLACKLIST), "");
    Map<SettingCategory, List<Configuration>> result = groupByCategory(
            isAdmin ? union(SYSTEM, ORGANIZATION) : ORGANIZATION_CONFIGURATION, existing);
    List<SettingCategory> toBeRemoved = PaymentProxy.availableProxies().stream()
            .filter(pp -> paymentMethodsBlacklist.contains(pp.getKey()))
            .flatMap(pp -> pp.getSettingCategories().stream()).collect(Collectors.toList());

    if (toBeRemoved.isEmpty()) {
        return result;
    } else {
        return result.entrySet().stream().filter(entry -> !toBeRemoved.contains(entry.getKey()))
                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    }
}

From source file:com.epam.catgenome.manager.bed.BedManager.java

private void makeHistogramFromIterator(CloseableIterator<NggbBedFeature> iterator, BedFile bedFile)
        throws IOException {
    List<Wig> histogram = new ArrayList<>();
    NggbBedFeature firstFeature = iterator.next();
    String currentContig = firstFeature.getContig();

    Map<String, Chromosome> chromosomeMap = referenceGenomeManager.loadChromosomes(bedFile.getReferenceId())
            .stream().collect(Collectors.toMap(BaseEntity::getName, c -> c));
    currentContig = checkFileNonEmpty(currentContig, iterator, chromosomeMap, bedFile);
    Chromosome currentChromosome = Utils.getFromChromosomeMap(chromosomeMap, currentContig);
    int histogramSize = Math.min(
            (int) Math.ceil(currentChromosome.getSize() * HistogramUtils.HISTOGAM_BLOCK_SIZE_PART),
            HistogramUtils.HISTOGRAM_SIZE_LIMIT);
    int intervalLength = currentChromosome.getSize() / histogramSize;
    int intervalEnd = intervalLength;

    Wig currentWig = new Wig();
    currentWig.setStartIndex(1);//w  ww. j a va2 s.  co  m
    currentWig.setEndIndex(intervalLength);

    int featureCount = 1;
    while (iterator.hasNext()) {
        NggbBedFeature feature = iterator.next();
        if (!feature.getContig().equals(currentContig) && currentChromosome != null) {
            currentWig.setValue((float) featureCount);
            histogram.add(currentWig);
            fileManager.writeHistogram(bedFile, currentChromosome.getName(), histogram);
            histogram.clear();
            featureCount = 0;
            currentContig = getNextContig(feature.getContig(), iterator, chromosomeMap);
            if (currentContig == null) {
                currentChromosome = null;
            } else {
                currentChromosome = Utils.getFromChromosomeMap(chromosomeMap, currentContig);
            }
        }

        if (currentChromosome != null && feature.getEnd() > intervalEnd) {
            currentWig.setValue((float) featureCount);
            histogram.add(currentWig);

            currentWig = new Wig(intervalEnd + 1, intervalEnd + 1 + intervalLength);
            intervalEnd = intervalEnd + 1 + intervalLength;
            featureCount = 0;
        }

        featureCount++;
    }

    if (featureCount > 0 && currentChromosome != null) {
        currentWig.setValue((float) featureCount);
        histogram.add(currentWig);
        fileManager.writeHistogram(bedFile, currentChromosome.getName(), histogram);
    }
}

From source file:com.oneops.inductor.Config.java

/**
 * Helper method to read inductor ${@link #env} and returns an env vars map.
 *
 * @param env env can be a file location or a string containing multiple ENV_NAME=VALUE entries.
 *            Entries are separated by newline (file) or ',' (string).
 * @return env var map.// w  ww  . j a v  a 2  s  . c o m
 */
private Map<String, String> readEnvVars(String env) {
    Path path = Paths.get(env);
    List<String> kvList;
    if (path.toFile().exists()) {
        try {
            kvList = Files.readAllLines(path);
        } catch (IOException ioe) {
            logger.warn("Error reading env var file: " + path, ioe);
            kvList = Collections.emptyList();
        }
    } else {
        kvList = Arrays.asList(env.trim().split(","));
    }
    return kvList.stream().map(s -> s.split("=")).filter(p -> p.length == 2)
            .collect(Collectors.toMap(p -> p[0].trim(), p -> p[1].trim()));
}