Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteMetadata(final String userId, final List<HasMetadata> holders) {
    try {// w  ww.j av a2 s .c  o  m
        final List<List<HasMetadata>> subLists = Lists.partition(holders,
                config.getUserMetadataMaxInClauseItems());
        for (List<HasMetadata> hasMetadatas : subLists) {
            final List<QualifiedName> names = hasMetadatas.stream()
                    .filter(m -> m instanceof HasDefinitionMetadata)
                    .map(m -> ((HasDefinitionMetadata) m).getDefinitionName()).collect(Collectors.toList());
            if (!names.isEmpty()) {
                _deleteDefinitionMetadata(names);
            }
            if (config.canSoftDeleteDataMetadata()) {
                final List<String> uris = hasMetadatas.stream()
                        .filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
                        .map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
                if (!uris.isEmpty()) {
                    _softDeleteDataMetadata(userId, uris);
                }
            }
        }
    } catch (Exception e) {
        log.error("Failed deleting metadatas", e);
        throw new UserMetadataServiceException("Failed deleting metadatas", e);
    }
}

From source file:com.vmware.photon.controller.cloudstore.xenon.task.DatastoreCleanerService.java

/**
 * Schedule datastore delete tasks to run in batches.
 *
 * @param current// w ww.  j a va 2  s  . c  o  m
 */
private void scheduleDatastoreDeleteTasks(final State current, List<String> datastoreLinks) {
    if (datastoreLinks == null || datastoreLinks.size() == 0) {
        TaskUtils.sendSelfPatch(this, buildPatch(TaskState.TaskStage.FINISHED, null));
        return;
    }

    // Compute the batch trigger interval based on # of datastores when the interval is not set
    if (current.intervalBetweenBatchTriggersInSeconds == null) {
        int batches = datastoreLinks.size() / current.batchSize;
        current.intervalBetweenBatchTriggersInSeconds = TimeUnit.MILLISECONDS
                .toSeconds(DatastoreCleanerTriggerBuilder.DEFAULT_TRIGGER_INTERVAL_MILLIS) / batches;
    }

    int batchCount = 0;
    for (List<String> batch : Lists.partition(datastoreLinks, current.batchSize)) {
        getHost().schedule(() -> {
            triggerDatastoreDeleteTasksForBatch(batch);
        }, batchCount * current.intervalBetweenBatchTriggersInSeconds, TimeUnit.SECONDS);
        batchCount++;
    }

    TaskUtils.sendSelfPatch(this, buildPatch(TaskState.TaskStage.FINISHED, null));
}

From source file:com.cloudant.sync.replication.BasicPushStrategy.java

private int processOneChangesBatch(Changes changes) {

    int changesProcessed = 0;

    // Process the changes themselves in batches, where we post a batch
    // at a time to the remote database's _bulk_docs endpoint.
    List<List<DocumentRevision>> batches = Lists.partition(changes.getResults(), config.bulkInsertSize);
    for (List<DocumentRevision> batch : batches) {

        if (this.cancel) {
            break;
        }/*w ww  . j  a v  a 2  s. c o  m*/

        Map<String, DocumentRevisionTree> allTrees = this.sourceDb.getDocumentTrees(batch);
        Map<String, Set<String>> docOpenRevs = this.openRevisions(allTrees);
        Map<String, Set<String>> docMissingRevs = this.targetDb.revsDiff(docOpenRevs);

        ItemsToPush itemsToPush = missingRevisionsToJsonDocs(allTrees, docMissingRevs);
        List<String> serialisedMissingRevs = itemsToPush.serializedDocs;
        List<MultipartAttachmentWriter> multiparts = itemsToPush.multiparts;

        if (!this.cancel) {
            this.targetDb.putMultiparts(multiparts);
            this.targetDb.bulkSerializedDocs(serialisedMissingRevs);
            changesProcessed += docMissingRevs.size();
        }
    }

    if (!this.cancel) {
        this.putCheckpoint(String.valueOf(changes.getLastSequence()));
    }

    return changesProcessed;
}

From source file:edu.utah.further.core.query.domain.SearchCriteria.java

/**
 * Construct a property expression criterion.
 * // www .  j  av a 2 s  . co  m
 * @param searchType
 *            type of search (equals/less than/...)
 * @param fieldType
 *            Field type
 * @param propertyName
 *            Search field name (a collection-type property)
 * @param values
 *            array of property values
 */
public static <E> SearchCriterion collection(final SearchType searchType, final String propertyName,
        final Object... values) {
    switch (searchType) {
    case IN: {
        if (values.length > MAX_IN) {
            // Create a DISJUNCTION of several INs to overcome restriction on
            // number of IN parameters
            final SearchCriterion additionalCriterion = junction(SearchType.DISJUNCTION);
            final List<List<Object>> lists = Lists.partition(Arrays.asList(values), MAX_IN);
            for (final List<Object> list : lists) {
                final Object[] results = list.toArray();
                additionalCriterion.addCriterion(collection(SearchType.IN, propertyName, results));
            }
            return additionalCriterion;
        }
        final List<Object> params = CollectionUtil.newList();
        params.add(propertyName);
        addAll(params, values);
        return new SearchCriterionBuilder(searchType, values.length + 1).<Object>setParameters(params).build();

    }

    default: {
        throw new BusinessRuleException(searchType + " is not a collection expression search type");
    }
    }
}

From source file:com.google.cloud.pubsub.spi.v1.StreamingSubscriberConnection.java

@VisibleForTesting
static List<StreamingPullRequest> partitionAckOperations(List<String> acksToSend,
        List<PendingModifyAckDeadline> ackDeadlineExtensions, int size) {
    int numExtensions = 0;
    for (PendingModifyAckDeadline modify : ackDeadlineExtensions) {
        numExtensions += modify.ackIds.size();
    }// w  w w  . j a v  a  2s .c o  m
    int numChanges = Math.max(numExtensions, acksToSend.size());
    int numRequests = numChanges / size + (numChanges % size == 0 ? 0 : 1);

    List<StreamingPullRequest.Builder> requests = new ArrayList<>(numRequests);
    for (int i = 0; i < numRequests; i++) {
        requests.add(StreamingPullRequest.newBuilder());
    }

    int reqCount = 0;
    for (List<String> acksChunk : Lists.partition(acksToSend, size)) {
        requests.get(reqCount).addAllAckIds(acksChunk);
        reqCount++;
    }

    reqCount = 0;
    int ackCount = 0;
    for (PendingModifyAckDeadline modify : ackDeadlineExtensions) {
        for (String ackId : modify.ackIds) {
            requests.get(reqCount).addModifyDeadlineSeconds(modify.deadlineExtensionSeconds)
                    .addModifyDeadlineAckIds(ackId);
            ackCount++;
            if (ackCount == size) {
                reqCount++;
                ackCount = 0;
            }
        }
    }

    List<StreamingPullRequest> ret = new ArrayList<>(requests.size());
    for (StreamingPullRequest.Builder builder : requests) {
        ret.add(builder.build());
    }
    return ret;
}

From source file:org.eclipse.sirius.common.ui.tools.api.navigator.GroupingContentProvider.java

private Object[] groupChildrenByContainingFeature(Object parent, Object[] children) {
    LinkedListMultimap<Object, Object> childrenContainingMapping = buildChildrenContainerMapping(children);
    List<Object> result = new ArrayList<Object>();
    for (Object structuralFeature : childrenContainingMapping.keySet()) {
        int currentOffset = 0;
        List<Object> indexedChildren = childrenContainingMapping.get(structuralFeature);
        if (indexedChildren.size() > getTriggerSize()) {
            List<List<Object>> partition = Lists.partition(indexedChildren, config.groupSize);
            if (partition.size() > 0) {
                if (partition.size() > 1) {
                    for (List<Object> partItem : partition) {
                        GroupingItem currentGroup;
                        if (structuralFeature instanceof EStructuralFeature) {
                            currentGroup = new GroupingItem(currentOffset, parent,
                                    new ArrayList<Object>(partItem),
                                    " " + ((EStructuralFeature) structuralFeature).getName()); //$NON-NLS-1$
                        } else {
                            currentGroup = new GroupingItem(currentOffset, parent,
                                    new ArrayList<Object>(partItem));
                        }/*  w  w  w .ja v a  2s  .  co  m*/
                        result.add(currentGroup);
                        currentOffset = currentOffset + partItem.size();
                    }
                } else {
                    for (List<Object> partItem : partition) {
                        result.addAll(partItem);
                    }
                }
            }
        } else {
            result.addAll(indexedChildren);
        }
    }
    return result.toArray();

}

From source file:com.addthis.hydra.job.store.DataStoreUtil.java

private static void importJobDataParallel(List<String> jobIds, SpawnDataStore sourceDataStore,
        SpawnDataStore targetDataStore, boolean checkAllWrites) throws Exception {
    ExecutorService executorService = new ThreadPoolExecutor(numCutoverThreads, numCutoverThreads, 60,
            TimeUnit.SECONDS,//w  ww . ja v  a  2 s .  c  o m

            new LinkedBlockingQueue<>());
    AtomicBoolean gotFailures = new AtomicBoolean(false);
    int partitionSize = Math.max((int) Math.ceil((double) jobIds.size() / numCutoverThreads), 1);
    for (List<String> partition : Lists.partition(jobIds, partitionSize)) {
        executorService.submit(
                new CutoverWorker(sourceDataStore, targetDataStore, gotFailures, partition, checkAllWrites));
    }
    executorService.shutdown();
    executorService.awaitTermination(cutoverTimeoutMinutes, TimeUnit.MINUTES);
    if (gotFailures.get()) {
        throw new RuntimeException("A cutover worker has failed; see log for details");
    }

}

From source file:com.netflix.spinnaker.cats.dynomite.cache.DynomiteCache.java

@Override
protected void evictItems(String type, List<String> identifiers, Collection<String> allRelationships) {
    AtomicInteger delOperations = new AtomicInteger();
    AtomicInteger sremOperations = new AtomicInteger();

    Failsafe.with(REDIS_RETRY_POLICY).onRetriesExceeded(failure -> {
        throw new ExcessiveDynoFailureRetries(format("Evicting items for %s:%s", prefix, type), failure);
    }).run(() -> redisClientDelegate.withPipeline(pipeline -> {
        DynoJedisPipeline p = (DynoJedisPipeline) pipeline;

        for (List<String> idPartition : Lists.partition(identifiers, options.getMaxDelSize())) {
            String[] ids = idPartition.toArray(new String[idPartition.size()]);
            pipeline.srem(allOfTypeId(type), ids);
            sremOperations.incrementAndGet();
            pipeline.srem(allOfTypeReindex(type), ids);
            sremOperations.incrementAndGet();
        }/*w ww .  jav  a 2 s  .c o m*/

        for (String id : identifiers) {
            pipeline.del(itemId(type, id));
            delOperations.incrementAndGet();
            pipeline.del(itemHashesId(type, id));
            delOperations.incrementAndGet();
        }

        if (!identifiers.isEmpty()) {
            p.sync();
        }
    }));

    cacheMetrics.evict(prefix, type, identifiers.size(), delOperations.get(), sremOperations.get());
}

From source file:com.thinkbiganalytics.nifi.provenance.ProvenanceEventCollector.java

/**
 * Send the Batched events over to JMS/*from w  w w  . j a v a2  s.c o m*/
 *
 * @param elements The events to send to JMS
 */
private void sendBatchFeedEvents(List<ProvenanceEventRecordDTO> elements) {
    if (elements != null && !elements.isEmpty()) {
        Lists.partition(elements, getJmsEventGroupSize()).forEach(eventsSubList -> {
            ProvenanceEventRecordDTOHolder eventRecordDTOHolder = new ProvenanceEventRecordDTOHolder();
            eventRecordDTOHolder.setEvents(Lists.newArrayList(eventsSubList));
            provenanceEventActiveMqWriter.writeBatchEvents(eventRecordDTOHolder);
        });
    }
}

From source file:org.asoem.greyfish.core.environment.Generic2DEnvironment.java

private void processAgentsMovement() throws InterruptedException {
    final List<List<A>> partition = Lists.partition(ImmutableList.copyOf(getActiveAgents()),
            parallelizationThreshold);/*  w  w  w.  j ava2 s .  c  o  m*/
    final Collection<Callable<Void>> callables = Lists.transform(partition,
            new Function<List<A>, Callable<Void>>() {
                @Override
                public Callable<Void> apply(final List<A> input) {
                    return new Callable<Void>() {
                        @Override
                        public Void call() {
                            for (A a : input) {
                                space.moveObject(a, a.getMotion());
                            }
                            return null;
                        }
                    };
                }
            });

    executorService.invokeAll(callables);
}