Example usage for java.util.stream StreamSupport stream

List of usage examples for java.util.stream StreamSupport stream

Introduction

In this page you can find the example usage for java.util.stream StreamSupport stream.

Prototype

public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) 

Source Link

Document

Creates a new sequential or parallel Stream from a Spliterator .

Usage

From source file:com.joyent.manta.client.multipart.ServerSideMultipartManager.java

/**
 * Completes a multipart transfer by assembling the parts on Manta.
 * This is a synchronous operation.//  ww w .ja va 2 s  .com
 *
 * @param upload multipart upload object
 * @param parts iterable of multipart part objects
 * @throws IOException thrown if there is a problem connecting to Manta
 */
@Override
public void complete(final ServerSideMultipartUpload upload,
        final Iterable<? extends MantaMultipartUploadTuple> parts) throws IOException {
    Validate.notNull(upload, "Upload state object must not be null");

    final Stream<? extends MantaMultipartUploadTuple> partsStream = StreamSupport.stream(parts.spliterator(),
            false);

    complete(upload, partsStream);
}

From source file:fi.vm.sade.eperusteet.ylops.service.ops.impl.OpetussuunnitelmaServiceImpl.java

private void fetchOrganisaatioNimet(OpetussuunnitelmaBaseDto opetussuunnitelmaDto) {
    for (OrganisaatioDto organisaatioDto : opetussuunnitelmaDto.getOrganisaatiot()) {
        Map<String, String> tekstit = new HashMap<>();
        List<String> tyypit = new ArrayList<>();
        JsonNode organisaatio = organisaatioService.getOrganisaatio(organisaatioDto.getOid());
        if (organisaatio != null) {
            JsonNode nimiNode = organisaatio.get("nimi");
            if (nimiNode != null) {
                Iterator<Map.Entry<String, JsonNode>> it = nimiNode.fields();
                while (it.hasNext()) {
                    Map.Entry<String, JsonNode> field = it.next();
                    tekstit.put(field.getKey(), field.getValue().asText());
                }/*from  w  w w  .  ja v a2  s  .  com*/
            }

            JsonNode tyypitNode = ofNullable(organisaatio.get("tyypit"))
                    .orElse(organisaatio.get("organisaatiotyypit"));
            if (tyypitNode != null) {
                tyypit = StreamSupport.stream(tyypitNode.spliterator(), false).map(JsonNode::asText)
                        .collect(Collectors.toList());
            }
        }
        organisaatioDto.setNimi(new LokalisoituTekstiDto(tekstit));
        organisaatioDto.setTyypit(tyypit);
    }
}

From source file:fr.landel.utils.assertor.utils.AssertorMap.java

private static <M extends Map<K, V>, K, V, T> boolean contains(final M map, final Iterable<T> objects,
        final Predicate<T> predicate, final boolean all, final boolean not,
        final EnumAnalysisMode analysisMode) {

    long found = 0;

    if (EnumAnalysisMode.STANDARD.equals(analysisMode)) {
        for (T object : objects) {
            if (predicate.test(object)) {
                ++found;//  w ww  . j  a  v a2s  . c  o  m
            }
        }
    } else {
        found = StreamSupport.stream(objects.spliterator(), EnumAnalysisMode.PARALLEL.equals(analysisMode))
                .filter(predicate).count();
    }

    return HelperAssertor.isValid(all, not, found, IterableUtils.size(objects));
}

From source file:org.springframework.cloud.gcp.data.spanner.core.SpannerTemplate.java

private List<Mutation> getMutationsForMultipleObjects(Iterable it,
        Function<Object, Collection<Mutation>> individualEntityMutationFunc) {
    return (List<Mutation>) StreamSupport.stream(it.spliterator(), false)
            .flatMap((x) -> individualEntityMutationFunc.apply(x).stream()).collect(Collectors.toList());
}

From source file:com.intuit.wasabi.repository.cassandra.impl.CassandraAssignmentsRepository.java

@Override
@Timed/*from  ww  w  .  j av a2 s.c  om*/
public Assignment getAssignment(User.ID userID, Application.Name appName, Experiment.ID experimentID,
        Context context) {
    ListenableFuture<Result<ExperimentUserByUserIdContextAppNameExperimentId>> resultFuture = experimentUserIndexAccessor
            .asyncSelectBy(userID.toString(), appName.toString(), experimentID.getRawID(), context.toString());
    Result<ExperimentUserByUserIdContextAppNameExperimentId> assignmentResult = UninterruptibleUtil
            .getUninterruptibly(resultFuture);

    Stream<ExperimentUserByUserIdContextAppNameExperimentId> assignmentResultStream = StreamSupport.stream(
            Spliterators.spliteratorUnknownSize(assignmentResult.iterator(), Spliterator.ORDERED), false);

    final Stream<Assignment.Builder> assignmentBuilderStream = assignmentResultStream.map(t -> {
        Assignment.Builder builder = Assignment.newInstance(Experiment.ID.valueOf(t.getExperimentId()))
                .withUserID(User.ID.valueOf(t.getUserId())).withContext(Context.valueOf(t.getContext()));

        if (nonNull(t.getBucket()) && !t.getBucket().trim().isEmpty()) {
            builder.withBucketLabel(Bucket.Label.valueOf(t.getBucket()));
        }
        return builder;
    });

    Optional<Assignment> assignmentOptional = getAssignmentFromStream(experimentID, userID, context,
            assignmentBuilderStream);
    return assignmentOptional.isPresent() ? assignmentOptional.get() : null;
}

From source file:jp.classmethod.aws.dynamodb.DynamoDbRepository.java

private List<E> findAll(Iterable<AttributeValue> ids, boolean useParallelBatches) {
    Preconditions.checkNotNull(ids, "ids may not be null");
    List<AttributeValue> idList = Lists.newArrayList(ids);
    if (idList.isEmpty()) {
        return new ArrayList<>();
    }//from   w w  w . j  ava  2  s .  c  o  m
    List<Map<String, AttributeValue>> resultantItems = new ArrayList<>();

    StreamSupport.stream(Iterables.partition(idList, 25).spliterator(), useParallelBatches).forEach(inner -> {
        BatchGetItemRequest req = new BatchGetItemRequest();
        KeysAndAttributes keysAndAttributes = new KeysAndAttributes();
        keysAndAttributes.setConsistentRead(true);
        keysAndAttributes.setKeys(
                inner.stream().map(id -> ImmutableMap.of(hashKeyName, id)).collect(Collectors.toList()));
        String tableName = tableName();
        req.withRequestItems(ImmutableMap.of(tableName, keysAndAttributes));

        BatchGetItemResult result;

        do {
            try {
                result = dynamoDB.batchGetItem(req);
                resultantItems.addAll(result.getResponses().get(tableName));
                req.setRequestItems(result.getUnprocessedKeys());
            } catch (AmazonClientException e) {
                throw this.convertDynamoDBException(e, "batch get", null /*no conditions for reads*/);
            }
        } while (false == result.getUnprocessedKeys().isEmpty());
    });

    return resultantItems.stream().map(legacyItem -> Item.fromMap(InternalUtils.toSimpleMapValue(legacyItem)))
            .map(item -> convertItemToDomain(item)).collect(Collectors.toList());
}

From source file:controllers.nwbib.Application.java

/**
 * @param q Query to search in all fields
 * @param person Query for a person associated with the resource
 * @param name Query for the resource name (title)
 * @param subject Query for the resource subject
 * @param id Query for the resource id/*from ww w  .  ja  v a  2s.c  o  m*/
 * @param publisher Query for the resource publisher
 * @param issued Query for the resource issued year
 * @param medium Query for the resource medium
 * @param nwbibspatial Query for the resource nwbibspatial classification
 * @param nwbibsubject Query for the resource nwbibsubject classification
 * @param from The page start (offset of page of resource to return)
 * @param size The page size (size of page of resource to return)
 * @param owner Owner filter for resource queries
 * @param t Type filter for resource queries
 * @param field The facet field (the field to facet over)
 * @param sort Sorting order for results ("newest", "oldest", "" -> relevance)
 * @param set The set, overrides the default NWBib set if not empty
 * @param location A polygon describing the subject area of the resources
 * @param word A word, a concept from the hbz union catalog
 * @param corporation A corporation associated with the resource
 * @param raw A query string that's directly (unprocessed) passed to ES
 * @return The search results
 */
public static Promise<Result> facets(String q, String person, String name, String subject, String id,
        String publisher, String issued, String medium, String nwbibspatial, String nwbibsubject, int from,
        int size, String owner, String t, String field, String sort, String set, String location, String word,
        String corporation, String raw) {

    String key = String.format("facets.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s", field, q, person,
            name, id, publisher, set, location, word, corporation, raw, subject, issued, medium, nwbibspatial,
            nwbibsubject, owner, t);
    Result cachedResult = (Result) Cache.get(key);
    if (cachedResult != null) {
        return Promise.promise(() -> cachedResult);
    }

    String labelTemplate = "<span class='%s'/>&nbsp;%s (%s)";

    Function<JsonNode, Pair<JsonNode, String>> toLabel = json -> {
        String term = json.get("term").asText();
        int count = json.get("count").asInt();
        String icon = Lobid.facetIcon(Arrays.asList(term), field);
        String label = Lobid.facetLabel(Arrays.asList(term), field, "");
        String fullLabel = String.format(labelTemplate, icon, label, count);
        return Pair.of(json, fullLabel);
    };

    Predicate<Pair<JsonNode, String>> labelled = pair -> {
        JsonNode json = pair.getLeft();
        String label = pair.getRight();
        int count = json.get("count").asInt();
        return (!label.contains("http") || label.contains("nwbib"))
                && label.length() > String.format(labelTemplate, "", "", count).length();
    };

    Collator collator = Collator.getInstance(Locale.GERMAN);
    Comparator<Pair<JsonNode, String>> sorter = (p1, p2) -> {
        String t1 = p1.getLeft().get("term").asText();
        String t2 = p2.getLeft().get("term").asText();
        boolean t1Current = current(subject, medium, nwbibspatial, nwbibsubject, owner, t, field, t1, raw);
        boolean t2Current = current(subject, medium, nwbibspatial, nwbibsubject, owner, t, field, t2, raw);
        if (t1Current == t2Current) {
            if (!field.equals(ISSUED_FIELD)) {
                Integer c1 = p1.getLeft().get("count").asInt();
                Integer c2 = p2.getLeft().get("count").asInt();
                return c2.compareTo(c1);
            }
            String l1 = p1.getRight().substring(p1.getRight().lastIndexOf('>') + 1);
            String l2 = p2.getRight().substring(p2.getRight().lastIndexOf('>') + 1);
            return collator.compare(l1, l2);
        }
        return t1Current ? -1 : t2Current ? 1 : 0;
    };

    Function<Pair<JsonNode, String>, String> toHtml = pair -> {
        JsonNode json = pair.getLeft();
        String fullLabel = pair.getRight();
        String term = json.get("term").asText();
        if (field.equals(SUBJECT_LOCATION_FIELD)) {
            GeoPoint point = new GeoPoint(term);
            term = String.format("%s,%s", point.getLat(), point.getLon());
        }
        String mediumQuery = !field.equals(MEDIUM_FIELD) //
                ? medium
                : queryParam(medium, term);
        String typeQuery = !field.equals(TYPE_FIELD) //
                ? t
                : queryParam(t, term);
        String ownerQuery = !field.equals(ITEM_FIELD) //
                ? owner
                : withoutAndOperator(queryParam(owner, term));
        String nwbibsubjectQuery = !field.equals(NWBIB_SUBJECT_FIELD) //
                ? nwbibsubject
                : queryParam(nwbibsubject, term);
        String nwbibspatialQuery = !field.equals(NWBIB_SPATIAL_FIELD) //
                ? nwbibspatial
                : queryParam(nwbibspatial, term);
        String rawQuery = !field.equals(COVERAGE_FIELD) //
                ? raw
                : rawQueryParam(raw, term);
        String locationQuery = !field.equals(SUBJECT_LOCATION_FIELD) //
                ? location
                : term;
        String subjectQuery = !field.equals(SUBJECT_FIELD) //
                ? subject
                : queryParam(subject, term);
        String issuedQuery = !field.equals(ISSUED_FIELD) //
                ? issued
                : queryParam(issued, term);

        boolean current = current(subject, medium, nwbibspatial, nwbibsubject, owner, t, field, term, raw);

        String routeUrl = routes.Application.search(q, person, name, subjectQuery, id, publisher, issuedQuery,
                mediumQuery, nwbibspatialQuery, nwbibsubjectQuery, from, size, ownerQuery, typeQuery,
                sort(sort, nwbibspatialQuery, nwbibsubjectQuery, subjectQuery), false, set, locationQuery, word,
                corporation, rawQuery).url();

        String result = String.format(
                "<li " + (current ? "class=\"active\"" : "") + "><a class=\"%s-facet-link\" href='%s'>"
                        + "<input onclick=\"location.href='%s'\" class=\"facet-checkbox\" "
                        + "type=\"checkbox\" %s>&nbsp;%s</input>" + "</a></li>",
                Math.abs(field.hashCode()), routeUrl, routeUrl, current ? "checked" : "", fullLabel);

        return result;
    };

    Promise<Result> promise = Lobid.getFacets(q, person, name, subject, id, publisher, issued, medium,
            nwbibspatial, nwbibsubject, owner, field, t, set, location, word, corporation, raw).map(json -> {
                Stream<JsonNode> stream = StreamSupport.stream(
                        Spliterators.spliteratorUnknownSize(json.findValue("entries").elements(), 0), false);
                if (field.equals(ITEM_FIELD)) {
                    stream = preprocess(stream);
                }
                String labelKey = String.format(
                        "facets-labels.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s", field, raw, q,
                        person, name, id, publisher, set, word, corporation, subject, issued, medium,
                        nwbibspatial, nwbibsubject, raw, field.equals(ITEM_FIELD) ? "" : owner, t, location);

                @SuppressWarnings("unchecked")
                List<Pair<JsonNode, String>> labelledFacets = (List<Pair<JsonNode, String>>) Cache
                        .get(labelKey);
                if (labelledFacets == null) {
                    labelledFacets = stream.map(toLabel).filter(labelled).collect(Collectors.toList());
                    Cache.set(labelKey, labelledFacets, ONE_DAY);
                }
                return labelledFacets.stream().sorted(sorter).map(toHtml).collect(Collectors.toList());
            }).map(lis -> ok(String.join("\n", lis)));
    promise.onRedeem(r -> Cache.set(key, r, ONE_DAY));
    return promise;
}

From source file:gedi.atac.Atac.java

public static void normalizationFactors(GenomicRegionStorage<? extends AlignedReadsData> storage,
        GenomicRegionStorage<?> peaks, String out, String peakout, String detailout, String... typePattern)
        throws IOException {

    int cond = storage.getRandomRecord().getNumConditions();
    int[][] allCounts = new int[typePattern.length][cond];
    int[][] peakCounts = new int[typePattern.length][cond];

    Pattern[] types = new Pattern[typePattern.length];
    for (int i = 0; i < types.length; i++)
        types[i] = Pattern.compile(typePattern[i]);

    new LineOrientedFile(detailout).delete();

    Set<ReferenceSequence> refs = new TreeSet<ReferenceSequence>();
    for (ReferenceSequence ref : storage.getReferenceSequences())
        refs.add(ref.toStrandIndependent());

    for (ReferenceSequence ref : refs) {

        int ty = 0;
        for (; ty < types.length && !types[ty].matcher(ref.toPlusMinusString()).find(); ty++)
            ;/*from   w ww  . j ava2s.co  m*/

        if (ty < types.length)
            System.out.println(ref + " -> " + types[ty]);
        else
            System.out.println("Skipping " + ref);

        HashMap<ImmutableReferenceGenomicRegion<?>, int[]> detail = new HashMap<ImmutableReferenceGenomicRegion<?>, int[]>();

        int tyind = ty;
        Consumer<MutableReferenceGenomicRegion<? extends AlignedReadsData>> adder = new Consumer<MutableReferenceGenomicRegion<? extends AlignedReadsData>>() {

            @Override
            public void accept(MutableReferenceGenomicRegion<? extends AlignedReadsData> mrgr) {

                int f = GenomicRegionPosition.Start.position(ref, mrgr.getRegion(), 4);
                int b = GenomicRegionPosition.Stop.position(ref, mrgr.getRegion(), -4);

                int inpeak = 0;
                if (StreamSupport.stream(peaks.iterateIntersectingMutableReferenceGenomicRegions(
                        ref.toStrandIndependent(), f, f + 1), false).peek(peak -> {
                            int[] c = detail.computeIfAbsent(peak.toImmutable(), x -> new int[cond]);
                            for (int i = 0; i < c.length; i++)
                                c[i] += mrgr.getData().getTotalCountForConditionInt(i, ReadCountMode.All);
                        }).count() > 0)
                    inpeak++;

                if (StreamSupport.stream(peaks.iterateIntersectingMutableReferenceGenomicRegions(
                        ref.toStrandIndependent(), b, b + 1), false).peek(peak -> {
                            int[] c = detail.computeIfAbsent(peak.toImmutable(), x -> new int[cond]);
                            for (int i = 0; i < c.length; i++)
                                c[i] += mrgr.getData().getTotalCountForConditionInt(i, ReadCountMode.All);
                        }).count() > 0)
                    inpeak++;

                for (int i = 0; i < allCounts[tyind].length; i++) {
                    allCounts[tyind][i] += mrgr.getData().getTotalCountForConditionInt(i, ReadCountMode.All);
                    if (inpeak > 0)
                        peakCounts[tyind][i] += mrgr.getData().getTotalCountForConditionInt(i,
                                ReadCountMode.All) * inpeak;
                }
            }

        };
        if (ty < types.length) {
            storage.iterateMutableReferenceGenomicRegions(ref).forEachRemaining(adder);
            storage.iterateMutableReferenceGenomicRegions(ref.toPlusStrand()).forEachRemaining(adder);
            storage.iterateMutableReferenceGenomicRegions(ref.toMinusStrand()).forEachRemaining(adder);
        }

        LineOrientedFile d = new LineOrientedFile(detailout);
        if (d.exists())
            d.startAppending();
        else {
            d.startWriting();
            d.write("Peak\tType");
            for (int i = 0; i < cond; i++)
                d.writef("\t%d", i);
            d.writeLine();
        }

        for (ImmutableReferenceGenomicRegion<?> peak : detail.keySet()) {
            int[] count = detail.get(peak);
            d.writef("%s\t%s", peak.toLocationString(), typePattern[ty]);
            for (int c = 0; c < cond; c++)
                d.writef("\t%d", count[c]);
            d.writeLine();
        }
        d.finishWriting();

    }

    LineOrientedFile o = new LineOrientedFile(out);
    o.startWriting();
    o.write("Type\tCondition Index\tCount\n");
    for (int i = 0; i < types.length; i++) {
        for (int c = 0; c < allCounts[i].length; c++) {
            o.writef("%s\t%d\t%d\n", typePattern[i], c, allCounts[i][c]);
        }
    }
    o.finishWriting();

    o = new LineOrientedFile(peakout);
    o.startWriting();
    o.write("Type\tCondition Index\tCount\n");
    for (int i = 0; i < types.length; i++) {
        for (int c = 0; c < allCounts[i].length; c++) {
            o.writef("%s\t%d\t%d\n", typePattern[i], c, peakCounts[i][c]);
        }
    }
    o.finishWriting();
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_Buckets.java

/** Builds a V2 bucket out of a V1 source
 * @param src_json/*from   ww  w  . j av  a 2s . co  m*/
 * @return
 * @throws JsonParseException
 * @throws JsonMappingException
 * @throws IOException
 * @throws ParseException
 */
protected static DataBucketBean getBucketFromV1Source(final JsonNode src_json)
        throws JsonParseException, JsonMappingException, IOException, ParseException {
    // (think we'll use key instead of _id):
    //final String _id = safeJsonGet(JsonUtils._ID, src_json).asText(); 
    final String key = safeJsonGet("key", src_json).asText();
    final String created = safeJsonGet("created", src_json).asText();
    final String modified = safeJsonGet("modified", src_json).asText();
    final String title = safeJsonGet("title", src_json).asText();
    final String description = safeJsonGet("description", src_json).asText();
    final String owner_id = safeJsonGet("ownerId", src_json).asText();

    final JsonNode tags = safeJsonGet("tags", src_json); // collection of strings
    //final JsonNode comm_ids = safeJsonGet("communityIds", src_json); // collection of strings
    final JsonNode px_pipeline = safeJsonGet("processingPipeline", src_json); // collection of JSON objects, first one should have data_bucket
    final JsonNode px_pipeline_first_el = ((ObjectNode) px_pipeline.get(0))
            .without(Arrays.asList("test_params"));
    final JsonNode data_bucket_tmp = safeJsonGet("data_bucket", px_pipeline_first_el);// (WARNING: mutable, see below)
    final JsonNode scripting = safeJsonGet("scripting", data_bucket_tmp);

    // HANDLE SUBSTITUTION
    final String sub_prefix = Optional.ofNullable(scripting.get("sub_prefix")).map(x -> x.asText())
            .orElse("$$SCRIPT_");
    final String sub_suffix = Optional.ofNullable(scripting.get("sub_suffix")).map(x -> x.asText())
            .orElse("$$");
    final List<UnaryOperator<String>> search_replace = StreamSupport
            .stream(Spliterators.spliteratorUnknownSize(scripting.fieldNames(), Spliterator.ORDERED), false)
            .filter(f -> !f.equals("sub_prefix") && !f.equals("sub_suffix")) // (remove non language fields)
            .map(lang -> Tuples._2T(scripting.get(lang), lang))
            // Get (separator regex, entire script, sub prefix)
            .map(scriptobj_lang -> Tuples._3T(safeJsonGet("separator_regex", scriptobj_lang._1()).asText(),
                    safeJsonGet("script", scriptobj_lang._1()).asText(), sub_prefix + scriptobj_lang._2()))
            // Split each "entire script" up into blocks of format (bloc, lang)
            .<Stream<Tuple2<String, String>>>map(regex_script_lang -> Stream.concat(
                    Stream.of(Tuples._2T(regex_script_lang._2(), regex_script_lang._3())),
                    regex_script_lang._1().isEmpty()
                            ? Stream.of(Tuples._2T(regex_script_lang._2(), regex_script_lang._3()))
                            : Arrays.stream(regex_script_lang._2().split(regex_script_lang._1()))
                                    .<Tuple2<String, String>>map(s -> Tuples._2T(s, regex_script_lang._3()))))
            // Associate a per-lang index with each  script block -> (replacement, string_sub)
            .<Tuple2<String, String>>flatMap(stream -> StreamUtils.zip(stream, Stream.iterate(0, i -> i + 1),
                    (script_lang, i) -> Tuples._2T(
                            script_lang._1().replace("\"", "\\\"").replace("\n", "\\n").replace("\r", "\\r"),
                            i == 0 ? script_lang._2() + sub_suffix // (entire thing)
                                    : script_lang._2() + "_" + i + sub_suffix))) //(broken down components)

            .<UnaryOperator<String>>map(t2 -> (String s) -> s.replace(t2._2(), t2._1()))
            //(need to escape "s and newlines)
            .collect(Collectors.toList());

    // Apply the list of transforms to the string
    ((ObjectNode) data_bucket_tmp).remove("scripting"); // (WARNING: mutable)
    final String data_bucket_str = search_replace.stream().reduce(data_bucket_tmp.toString(),
            (acc, s) -> s.apply(acc), (acc1, acc2) -> acc1);

    // Convert back to the bucket JSON
    final JsonNode data_bucket = ((ObjectNode) _mapper.readTree(data_bucket_str))
            .without(Arrays.asList("test_params"));

    final DataBucketBean bucket = BeanTemplateUtils.build(data_bucket, DataBucketBean.class)
            .with(DataBucketBean::_id, getBucketIdFromV1SourceKey(key))
            .with(DataBucketBean::created, parseJavaDate(created))
            .with(DataBucketBean::modified, parseJavaDate(modified)).with(DataBucketBean::display_name, title)
            .with(DataBucketBean::description, description).with(DataBucketBean::owner_id, owner_id)
            .with(DataBucketBean::tags, StreamSupport.stream(tags.spliterator(), false).map(jt -> jt.asText())
                    .collect(Collectors.toSet()))
            .done().get();

    return bucket;

}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Long> deleteObjectsBySpec(final QueryComponent<O> spec) {
    try {/*from www  .j a va 2s. c om*/
        final Tuple2<DBObject, DBObject> query_and_meta = MongoDbUtils.convertToMongoQuery(spec);
        final Long limit = (Long) query_and_meta._2().get("$limit");
        final DBObject sort = (DBObject) query_and_meta._2().get("$sort");

        if ((null == limit) && (null == sort)) { // Simple case, just delete as many docs as possible
            final WriteResult<O, K> wr = _state.coll.remove(query_and_meta._1());
            return CompletableFuture.completedFuture((Long) (long) wr.getN());
        } else {

            final com.mongodb.DBCursor cursor = Optional
                    .of(_state.orig_coll.find(query_and_meta._1(), new BasicDBObject(_ID, 1)))
                    // (now we're processing on a cursor "c")
                    .map(c -> {
                        return (null != sort) ? c.sort(sort) : c;
                    }).map(c -> {
                        return (null != limit) ? c.limit(limit.intValue()) : c;
                    }).get();

            final List<Object> ids = StreamSupport.stream(cursor.spliterator(), false).map(o -> o.get(_ID))
                    .collect(Collectors.toList());

            return deleteObjectsBySpec(emptyQuery(_state.bean_clazz).withAny(_ID, ids));
        }
    } catch (Exception e) {
        return FutureUtils.<Long>returnError(e);
    }
}