Example usage for java.util.stream Collectors joining

List of usage examples for java.util.stream Collectors joining

Introduction

In this page you can find the example usage for java.util.stream Collectors joining.

Prototype

public static Collector<CharSequence, ?, String> joining(CharSequence delimiter) 

Source Link

Document

Returns a Collector that concatenates the input elements, separated by the specified delimiter, in encounter order.

Usage

From source file:fi.helsinki.opintoni.service.converter.EventConverter.java

private String getLocations(OodiEvent event) {
    return Lists.newArrayList(event.roomName, event.buildingStreet).stream().filter(Objects::nonNull)
            .collect(Collectors.joining(", "));
}

From source file:com.github.horrorho.inflatabledonkey.args.PropertyLoader.java

String parse(Option option) {
    testIntegers(option);/*from ww  w.  jav a  2  s  .com*/

    if (option.hasArgs()) {
        // Array
        return option.getValuesList().stream().collect(Collectors.joining(" "));
    }
    if (option.hasArg()) {
        // Value
        return option.getValue();
    }
    // Boolean
    return Boolean.TRUE.toString();
}

From source file:dk.dma.msinm.service.MessageSearchParams.java

/**
 * Returns a string representation of the filtering criteria (not sorting or paging)
 * @return a string representation of the filtering criteria
 *//*from  w  w w.ja v  a2 s.c  o m*/
@Override
public String toString() {
    List<String> desc = new ArrayList<>();
    if (isNotBlank(language)) {
        desc.add(String.format("Language: %s", language));
    }
    if (isNotBlank(query)) {
        desc.add(String.format("Query: '%s'", query));
    }
    if (from != null) {
        desc.add(String.format("From: %s", new SimpleDateFormat(DATE_FORMAT).format(from)));
    }
    if (to != null) {
        desc.add(String.format("To: %s", new SimpleDateFormat(DATE_FORMAT).format(to)));
    }
    if (locations.size() > 0) {
        desc.add(String.format("%d locations", locations.size()));
    }
    if (status != null) {
        desc.add(String.format("Status: %s", status));
    }
    if (types.size() > 0) {
        desc.add(String.format("Types: %s", types));
    }
    if (mainTypes.size() > 0) {
        desc.add(String.format("Main types: %s", mainTypes));
    }
    if (areaIds.size() > 0) {
        desc.add(String.format("Area ID's: %s", areaIds));
    }
    if (categoryIds.size() > 0) {
        desc.add(String.format("Category ID's: %s", categoryIds));
    }
    if (chartIds.size() > 0) {
        desc.add(String.format("Chart ID's: %s", chartIds));
    }
    if (bookmarks) {
        desc.add("Bookmarks: true");
    }
    if (updatedFrom != null) {
        desc.add(String.format("Updated from: %s", updatedFrom));
    }
    if (updatedTo != null) {
        desc.add(String.format("Updated to: %s", updatedTo));
    }

    return desc.stream().collect(Collectors.joining(", "));
}

From source file:com.thinkbiganalytics.rest.JerseyConfig.java

@PostConstruct
/**/*from w  w w  .ja va2  s.c o m*/
 * Add ability to scan additional Spring Beans annotated with @Path
 */
private void init() {
    //register any additional beans that are path annotated
    Map<String, Object> map = applicationContext.getBeansWithAnnotation(Path.class);
    String packageNames = "com.thinkbiganalytics";
    if (map != null && !map.isEmpty()) {
        String beanPackageNames = map.values().stream().map(o -> o.getClass().getPackage().getName()).distinct()
                .collect(Collectors.joining(","));
        if (StringUtils.isNotBlank(beanPackageNames)) {
            packageNames += "," + beanPackageNames;
        }
    }
    if (map != null) {
        Set<Class<?>> pathClasses = map.values().stream().map(o -> o.getClass()).collect(Collectors.toSet());
        registerClasses(pathClasses);
    }
    configureSwagger(packageNames);
}

From source file:de.steilerdev.myVerein.server.model.Division.java

/**
 * This function updates the parent and the ancestors.
 * @param parent The new parent./* w  w  w  .  j ava2s  . c o m*/
 */
public void setParent(Division parent) {
    logger.trace("Changing parent for " + this.name);
    if (parent != null) {
        logger.debug("Updating ancestors for " + this.name);
        List<Division> ancestor;
        if (parent.getAncestors() == null) {
            ancestor = new ArrayList<>();
        } else {
            //Need to create a new ArrayList, assigning would lead to fill and use BOTH lists
            ancestor = new ArrayList<>(parent.getAncestors());
        }
        ancestor.add(parent);
        logger.debug("Ancestors " + ancestor.stream().map(Division::getName).collect(Collectors.joining(", "))
                + " for division " + this.name);
        this.ancestors = ancestor;
    }
    this.parent = parent;
    logger.info("Successfully updated parent and ancestors of " + this.name);
}

From source file:com.firewallid.termcloud.TermCloud.java

public void saveTermCloud(JavaPairRDD<String, List<Tuple2<String, Double>>> doc, String fileNamePrefix) {
    doc.filter(titleFeatures -> !titleFeatures._2.isEmpty())
            /* Map list to formatted line of termcloud file */
            .mapToPair(titleFeatures -> new Tuple2<String, String>(titleFeatures._1,
                    titleFeatures._2.parallelStream()
                            .map(feature -> feature._1
                                    + StringEscapeUtils.unescapeJava(conf.get(LINE_DELIMITER)) + feature._2)
                            .collect(Collectors.joining(System.lineSeparator()))))
            /* Save to hdfs file */
            .foreach(/*from   w w w . jav  a 2s. c om*/
                    titleText -> FIFile
                            .writeStringToHDFSFile(
                                    FIFile.generateFullPath(conf.get(TERMCLOUD_FOLDER),
                                            createFileNameTermCloud(fileNamePrefix, titleText._1)),
                                    titleText._2));
}

From source file:com.intellij.lang.jsgraphql.endpoint.doc.psi.JSGraphQLEndpointDocPsiUtil.java

/**
 * Gets the text of the continuous comments placed directly above the specified element
 * @param element element whose previous siblings are enumerated and included if they're documentation comments
 * @return the combined text of the documentation comments, preserving line breaks, or <code>null</code> if no documentation is available
 *//*  w w w  .j  a  va2 s. c  om*/
public static String getDocumentation(PsiElement element) {
    final PsiComment comment = PsiTreeUtil.getPrevSiblingOfType(element, PsiComment.class);
    if (isDocumentationComment(comment)) {
        final List<PsiComment> siblings = Lists.newArrayList(comment);
        getDocumentationCommentSiblings(comment, siblings, PsiElement::getPrevSibling);
        Collections.reverse(siblings);
        return siblings.stream().map(c -> StringUtils.stripStart(c.getText(), "# "))
                .collect(Collectors.joining("\n"));
    }
    return null;
}

From source file:com.yahoo.gondola.tsunami.Tsunami.java

public Tsunami() throws Exception {
    setup();//  w  w  w  .ja v  a 2  s. co m

    // Print status thread
    executorService.execute(() -> {
        while (true) {
            logger.info("writes: {}, reads: {}, errors: {}, waiting for index={} on {}", writes.get(),
                    reads.get(), errors.get(), verifyWaitingForIndex, agents[verifyWaitingFor].hostId);
            logger.info("  " + Arrays.stream(agents).map(agent -> agent.hostId + ": up=" + agent.up)
                    .collect(Collectors.joining(", ")));
            logger.info("  lastWrite: {}, lastRead: {}", lastWrittenIndex, lastReadIndex);
            sleep(10000);
        }
    });

    executorService.execute(new Killer());
    executorService.execute(new Verifier());

    // Initialize writer threads. numWriter writers per gondola
    for (int a = 0; a < agents.length; a++) {
        AgentClient agent = agents[a];
        for (int w = 0; w < numWriters; w++) {
            String writerId = String.format("%c%d", (char) ('A' + a), w);

            executorService.execute(new Writer(writerId, agent.hostname, agent.gondolaCc.getPort()));
        }
    }
}

From source file:io.woolford.processors.nifibenford.BenfordsLaw.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();// w  w w  . j  av a  2  s  . com
    if (flowFile == null) {
        return;
    }

    InputStream inputStream = session.read(flowFile);
    String input = new BufferedReader(new InputStreamReader(inputStream)).lines()
            .collect(Collectors.joining("\n"));

    // TODO: since the values returned by Benford's array don't ever change, these could be hard-coded rather than calling a function each time.
    double[] benfordsArray = getBenfordsArray();
    long[] firstDigitArray = getFirstDigitArray(input);

    long sampleSize = LongStream.of(firstDigitArray).sum();

    ChiSquareTest chiSquareTest = new ChiSquareTest();
    Boolean suspect = chiSquareTest.chiSquareTest(benfordsArray, firstDigitArray,
            context.getProperty(ALPHA).asDouble());

    //TODO: don't perform the chi-squared test if the sample is too small
    if (sampleSize < context.getProperty(MIN_SAMPLE).asLong()) {
        session.transfer(flowFile, INSUFFICIENT_SAMPLE);
    } else if (suspect) {
        session.transfer(flowFile, NON_CONFORMING);
    } else {
        session.transfer(flowFile, CONFORMING);
    }

}

From source file:com.uber.hoodie.utilities.sources.HiveIncrPullSource.java

@Override
public Pair<Optional<JavaRDD<GenericRecord>>, String> fetchNewData(Optional<String> lastCheckpointStr,
        long maxInputBytes) {
    try {/*from   www .ja  v a 2 s  . co  m*/
        // find the source commit to pull
        Optional<String> commitToPull = findCommitToPull(lastCheckpointStr);

        if (!commitToPull.isPresent()) {
            return new ImmutablePair<>(Optional.empty(),
                    lastCheckpointStr.isPresent() ? lastCheckpointStr.get() : "");
        }

        // read the files out.
        List<FileStatus> commitDeltaFiles = Arrays
                .asList(fs.listStatus(new Path(incrPullRootPath, commitToPull.get())));
        String pathStr = commitDeltaFiles.stream().map(f -> f.getPath().toString())
                .collect(Collectors.joining(","));
        String schemaStr = schemaProvider.getSourceSchema().toString();
        final AvroConvertor avroConvertor = new AvroConvertor(schemaStr);
        return new ImmutablePair<>(
                Optional.of(DFSSource.fromFiles(dataFormat, avroConvertor, pathStr, sparkContext)),
                String.valueOf(commitToPull.get()));
    } catch (IOException ioe) {
        throw new HoodieIOException("Unable to read from source from checkpoint: " + lastCheckpointStr, ioe);
    }
}