Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.google.cloud.pubsub.SubscriberConnection.java

private void sendOutstandingAckOperations(List<PendingModifyAckDeadline> ackDeadlineExtensions) {
    List<PendingModifyAckDeadline> modifyAckDeadlinesToSend = Lists.newArrayList(ackDeadlineExtensions);
    List<String> acksToSend = new ArrayList<>(pendingAcks.size());
    synchronized (pendingAcks) {
        if (!pendingAcks.isEmpty()) {
            try {
                acksToSend = new ArrayList<>(pendingAcks);
                logger.debug("Sending {} acks", acksToSend.size());
            } finally {
                pendingAcks.clear();/*from   w  w  w .  j  a  v a 2s.  c o  m*/
            }
        }
    }
    List<PendingModifyAckDeadline> nacksToSend = new ArrayList<>(pendingNacks.size());
    synchronized (pendingNacks) {
        if (!pendingNacks.isEmpty()) {
            try {
                for (String ackId : pendingNacks) {
                    modifyAckDeadlinesToSend.add(new PendingModifyAckDeadline(ackId, 0));
                }
                logger.debug("Sending {} nacks", nacksToSend.size());
            } finally {
                pendingNacks.clear();
            }
        }
    }

    // Send the modify ack deadlines in batches as not to exceed the max request
    // size.
    List<List<String>> ackChunks = Lists.partition(acksToSend, MAX_PER_REQUEST_CHANGES);
    List<List<PendingModifyAckDeadline>> modifyAckDeadlineChunks = Lists.partition(modifyAckDeadlinesToSend,
            MAX_PER_REQUEST_CHANGES);
    Iterator<List<String>> ackChunksIt = ackChunks.iterator();
    Iterator<List<PendingModifyAckDeadline>> modifyAckDeadlineChunksIt = modifyAckDeadlineChunks.iterator();

    while (ackChunksIt.hasNext() || modifyAckDeadlineChunksIt.hasNext()) {
        com.google.pubsub.v1.StreamingPullRequest.Builder requestBuilder = StreamingPullRequest.newBuilder();
        if (modifyAckDeadlineChunksIt.hasNext()) {
            List<PendingModifyAckDeadline> modAckChunk = modifyAckDeadlineChunksIt.next();
            for (PendingModifyAckDeadline modifyAckDeadline : modAckChunk) {
                requestBuilder.addModifyDeadlineSeconds(modifyAckDeadline.deadlineExtensionSeconds);
                requestBuilder.addModifyDeadlineAckIds(modifyAckDeadline.ackId);
            }
        }
        if (ackChunksIt.hasNext()) {
            List<String> ackChunk = ackChunksIt.next();
            requestBuilder.addAllAckIds(ackChunk);
        }
        requestObserver.onNext(requestBuilder.build());
    }
}

From source file:org.apache.beam.sdk.util.GcsUtil.java

List<BatchRequest> makeRemoveBatches(Collection<String> filenames) throws IOException {
    List<BatchRequest> batches = new LinkedList<>();
    for (List<String> filesToDelete : Lists.partition(Lists.newArrayList(filenames), MAX_REQUESTS_PER_BATCH)) {
        BatchRequest batch = createBatchRequest();
        for (String file : filesToDelete) {
            enqueueDelete(GcsPath.fromUri(file), batch);
        }//ww w .j  a v a2 s  .  com
        batches.add(batch);
    }
    return batches;
}

From source file:com.cloudant.sync.datastore.DatastoreImpl.java

private List<DocumentRevision> getDocumentsWithInternalIdsInQueue(SQLDatabase db, final List<Long> docIds)
        throws AttachmentException, DocumentNotFoundException, DocumentException, DatastoreException {

    if (docIds.size() == 0) {
        return Collections.emptyList();
    }/*from  w  w  w .j  ava  2  s .  c  o  m*/

    final String GET_DOCUMENTS_BY_INTERNAL_IDS = "SELECT " + FULL_DOCUMENT_COLS + " FROM revs, docs "
            + "WHERE revs.doc_id IN ( %s ) AND current = 1 AND docs.doc_id = revs.doc_id";

    // Split into batches because SQLite has a limit on the number
    // of placeholders we can use in a single query. 999 is the default
    // value, but it can be lower. It's hard to find this out from Java,
    // so we use a value much lower.
    List<DocumentRevision> result = new ArrayList<DocumentRevision>(docIds.size());

    List<List<Long>> batches = Lists.partition(docIds, SQLITE_QUERY_PLACEHOLDERS_LIMIT);
    for (List<Long> batch : batches) {
        String sql = String.format(GET_DOCUMENTS_BY_INTERNAL_IDS, DatabaseUtils.makePlaceholders(batch.size()));
        String[] args = new String[batch.size()];
        for (int i = 0; i < batch.size(); i++) {
            args[i] = Long.toString(batch.get(i));
        }
        result.addAll(getRevisionsFromRawQuery(db, sql, args));
    }

    // Contract is to sort by sequence number, which we need to do
    // outside the sqlDb as we're batching requests.
    Collections.sort(result, new Comparator<DocumentRevision>() {
        @Override
        public int compare(DocumentRevision documentRevision, DocumentRevision documentRevision2) {
            long a = documentRevision.getSequence();
            long b = documentRevision2.getSequence();
            return (int) (a - b);
        }
    });

    return result;
}

From source file:com.salesforce.ide.ui.views.runtest.RunTestsView.java

/**
 * Abort all ApexTestQueueItem with the same test run ID.
 *//*  w w  w  .  jav a 2  s  .  c  o  m*/
@VisibleForTesting
public boolean abortTestRun(String testRunId) {
    if (Utils.isEmpty(forceProject) || Utils.isEmpty(testRunId)) {
        return false;
    }

    try {
        initializeConnection(forceProject);

        // Get all ApexTestQueueItem in the test run
        QueryResult qr = toolingStubExt
                .query(String.format(RunTestsConstants.QUERY_APEX_TEST_QUEUE_ITEM, testRunId));
        if (Utils.isEmpty(qr) || qr.getSize() == 0)
            return false;

        // Update status to Aborted
        List<ApexTestQueueItem> abortedList = Lists.newArrayList();
        for (SObject sObj : qr.getRecords()) {
            ApexTestQueueItem atqi = (ApexTestQueueItem) sObj;
            // If the queue item is not done yet, abort them
            if (!atqi.getStatus().equals(AsyncApexJobStatus.Completed)
                    && !atqi.getStatus().equals(AsyncApexJobStatus.Failed)) {
                atqi.setStatus(AsyncApexJobStatus.Aborted);
                abortedList.add(atqi);
            }
        }

        // Update in chunks because there is a limit to how many we can update in one call
        if (!abortedList.isEmpty()) {
            for (List<ApexTestQueueItem> subList : Lists.partition(abortedList, 200)) {
                ApexTestQueueItem[] abortedArray = subList.toArray(new ApexTestQueueItem[subList.size()]);
                toolingStubExt.update(abortedArray);
            }
            return true;
        }
    } catch (ForceConnectionException | ForceRemoteException e) {
        logger.error("Failed to abort test run", e);
    }

    return false;
}

From source file:org.metaservice.manager.Manager.java

public void removeDataFromGenerator(String generator) throws ManagerException {
    try {/*from w  w w  . java  2s. c o m*/
        TupleQuery repoSelect = this.repositoryConnection.prepareTupleQuery(QueryLanguage.SPARQL,
                "SELECT DISTINCT ?metadata { ?metadata a <" + METASERVICE.OBSERVATION + ">;  <"
                        + METASERVICE.GENERATOR + "> ?generator }");
        repoSelect.setBinding("generator", valueFactory.createLiteral(generator));
        TupleQueryResult queryResult = repoSelect.evaluate();
        ArrayList<URI> uris = new ArrayList<>();
        while (queryResult.hasNext()) {
            BindingSet set = queryResult.next();
            URI oldMetadata = (URI) set.getBinding("metadata").getValue();
            if (oldMetadata != null) //safety check -> null deletes whole repository
                uris.add(oldMetadata);
        }
        if (uris.size() > 0) //safety check -> empty deletes whole repository
        {
            LOGGER.info("Clearing {} contexts from generator {}", uris.size(), generator);
            List<List<URI>> partitions = Lists.partition(uris, 200);
            for (List<URI> partition : partitions) {
                repositoryConnection.clear(partition.toArray(new URI[partition.size()]));
            }
        }
    } catch (RepositoryException | MalformedQueryException | QueryEvaluationException e) {
        throw new ManagerException(e);
    }
}

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@Override
public void saveMetadata(final String user, final List<? extends HasMetadata> metadatas, final boolean merge) {
    try {/*from  w  ww.ja va 2  s .  com*/
        @SuppressWarnings("unchecked")
        final List<List<HasMetadata>> subLists = Lists.partition((List<HasMetadata>) metadatas,
                config.getUserMetadataMaxInClauseItems());
        for (List<HasMetadata> hasMetadatas : subLists) {
            final List<String> uris = Lists.newArrayList();
            final List<QualifiedName> names = Lists.newArrayList();
            // Get the names and uris
            final List<HasDefinitionMetadata> definitionMetadatas = Lists.newArrayList();
            final List<HasDataMetadata> dataMetadatas = Lists.newArrayList();
            hasMetadatas.forEach(hasMetadata -> {
                if (hasMetadata instanceof HasDefinitionMetadata) {
                    final HasDefinitionMetadata oDef = (HasDefinitionMetadata) hasMetadata;
                    names.add(oDef.getDefinitionName());
                    if (oDef.getDefinitionMetadata() != null) {
                        definitionMetadatas.add(oDef);
                    }
                }
                if (hasMetadata instanceof HasDataMetadata) {
                    final HasDataMetadata oData = (HasDataMetadata) hasMetadata;
                    if (oData.isDataExternal() && oData.getDataMetadata() != null
                            && oData.getDataMetadata().size() > 0) {
                        uris.add(oData.getDataUri());
                        dataMetadatas.add(oData);
                    }
                }
            });
            if (!definitionMetadatas.isEmpty() || !dataMetadatas.isEmpty()) {
                // Get the existing metadata based on the names and uris
                final Map<String, ObjectNode> definitionMap = getDefinitionMetadataMap(names);
                final Map<String, ObjectNode> dataMap = getDataMetadataMap(uris);
                // Curate the list of existing and new metadatas
                final List<Object[]> insertDefinitionMetadatas = Lists.newArrayList();
                final List<Object[]> updateDefinitionMetadatas = Lists.newArrayList();
                final List<Object[]> insertPartitionDefinitionMetadatas = Lists.newArrayList();
                final List<Object[]> updatePartitionDefinitionMetadatas = Lists.newArrayList();
                final List<Object[]> insertDataMetadatas = Lists.newArrayList();
                final List<Object[]> updateDataMetadatas = Lists.newArrayList();
                definitionMetadatas.forEach(oDef -> {
                    final QualifiedName qualifiedName = oDef.getDefinitionName();
                    if (qualifiedName != null && oDef.getDefinitionMetadata() != null
                            && oDef.getDefinitionMetadata().size() != 0) {
                        final String name = qualifiedName.toString();
                        final ObjectNode oNode = definitionMap.get(name);
                        if (oNode == null) {
                            final Object[] o = new Object[] {
                                    metacatJson.toJsonString(oDef.getDefinitionMetadata()), user, user, name, };
                            if (qualifiedName.isPartitionDefinition()) {
                                insertPartitionDefinitionMetadatas.add(o);
                            } else {
                                insertDefinitionMetadatas.add(o);
                            }
                        } else {
                            metacatJson.mergeIntoPrimary(oNode, oDef.getDefinitionMetadata());
                            final Object[] o = new Object[] { metacatJson.toJsonString(oNode), user, name };
                            if (qualifiedName.isPartitionDefinition()) {
                                updatePartitionDefinitionMetadatas.add(o);
                            } else {
                                updateDefinitionMetadatas.add(o);
                            }
                        }
                    }
                });
                dataMetadatas.forEach(oData -> {
                    final String uri = oData.getDataUri();
                    final ObjectNode oNode = dataMap.get(uri);
                    if (oData.getDataMetadata() != null && oData.getDataMetadata().size() != 0) {
                        if (oNode == null) {
                            insertDataMetadatas.add(new Object[] {
                                    metacatJson.toJsonString(oData.getDataMetadata()), user, user, uri, });
                        } else {
                            metacatJson.mergeIntoPrimary(oNode, oData.getDataMetadata());
                            updateDataMetadatas
                                    .add(new Object[] { metacatJson.toJsonString(oNode), user, uri });
                        }
                    }
                });
                if (!insertDefinitionMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.INSERT_DEFINITION_METADATA, insertDefinitionMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
                if (!updateDefinitionMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.UPDATE_DEFINITION_METADATA, updateDefinitionMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
                if (!insertPartitionDefinitionMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.INSERT_PARTITION_DEFINITION_METADATA,
                            insertPartitionDefinitionMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
                if (!updatePartitionDefinitionMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.UPDATE_PARTITION_DEFINITION_METADATA,
                            updatePartitionDefinitionMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
                if (!insertDataMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.INSERT_DATA_METADATA, insertDataMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
                if (!updateDataMetadatas.isEmpty()) {
                    jdbcTemplate.batchUpdate(SQL.UPDATE_DATA_METADATA, updateDataMetadatas,
                            new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR });
                }
            }
        }
    } catch (Exception e) {
        log.error("Failed to save metadata", e);
        throw new UserMetadataServiceException("Failed to save metadata", e);
    }
}

From source file:kr.debop4j.core.parallelism.Parallels.java

/**
 *  ? ,  ? .// ww  w . j a v  a2s  .co m
 *
 * @param elements  ??
 * @param function  
 * @return  
 */
public static <T, V> List<V> runPartitions(final Iterable<T> elements,
        final Function1<List<T>, List<V>> function) {
    shouldNotBeNull(elements, "elements");
    shouldNotBeNull(function, "function");
    if (isDebugEnabled)
        log.debug(" ? ... workerCount=[{}]", getWorkerCount());

    ExecutorService executor = Executors.newFixedThreadPool(getWorkerCount());

    try {
        List<T> elemList = Lists.newArrayList(elements);
        int partitionSize = getPartitionSize(elemList.size(), getWorkerCount());
        List<List<T>> partitions = Lists.partition(elemList, partitionSize);
        final Map<Integer, List<V>> localResults = Maps.newLinkedHashMap();

        List<Callable<List<V>>> tasks = Lists.newLinkedList(); // False Sharing?  

        for (final List<T> partition : partitions) {
            Callable<List<V>> task = new Callable<List<V>>() {
                @Override
                public List<V> call() throws Exception {
                    return function.execute(partition);
                }
            };
            tasks.add(task);
        }
        //  
        List<Future<List<V>>> futures = executor.invokeAll(tasks);

        List<V> results = Lists.newArrayListWithCapacity(elemList.size());
        for (Future<List<V>> future : futures)
            results.addAll(future.get());

        if (isDebugEnabled)
            log.debug(" ?  . workerCount=[{}]", getWorkerCount());

        return results;

    } catch (Exception e) {
        log.error("???      ?.", e);
        throw new RuntimeException(e);
    } finally {
        executor.shutdown();
    }
}

From source file:no.kantega.publishing.common.ao.ContentAOJdbcImpl.java

@Override
public List<Content> getContentList(ContentQuery contentQuery, boolean getAttributes, boolean getTopics) {
    final Map<Integer, Content> contentMap = new HashMap<>();
    final List<Content> contentList = new ArrayList<>();

    doForEachInContentList(contentQuery, content -> {
        contentList.add(content);//from  w  w w  . jav  a 2 s.  co m
        contentMap.put(content.getVersionId(), content);
    });

    int listSize = contentList.size();
    if (listSize > 0 && getAttributes) {
        // Hent attributter

        String attrquery = "select * from contentattributes where ContentVersionId in (:contentVersions) order by ContentVersionId";
        RowCallbackHandler callback = rs -> {
            int cvid = rs.getInt("ContentVersionId");
            Content current = contentMap.get(cvid);
            if (current != null) {
                ContentAOHelper.addAttributeFromRS(current, rs);
            }
        };
        List<List<Integer>> partition = Lists.partition(new ArrayList<>(contentMap.keySet()), 1000);
        for (List<Integer> contentVersionIds : partition) {
            getNamedParameterJdbcTemplate().query(attrquery,
                    Collections.<String, Object>singletonMap("contentVersions", contentVersionIds), callback);
        }

        for (Content content : contentMap.values()) {
            content.indexAttributes();
        }
    }

    if (listSize > 0 && getTopics) {
        // Hent topics
        for (Content content : contentList) {
            List<Topic> topics = topicDao.getTopicsByContentId(content.getId());
            content.setTopics(topics);
        }
    }

    SortOrder sort = contentQuery.getSortOrder();
    if (sort != null) {
        // Sorter lista
        String sort1 = sort.getSort1();
        String sort2 = sort.getSort2();

        List<ContentIdentifier> cids = contentQuery.getContentList();
        if (cids != null && ContentProperty.PRIORITY.equalsIgnoreCase(sort1)) {
            Comparator<Content> comparator = new AssociationIdListComparator(cids);
            Collections.sort(contentList, comparator);
        } else {
            // Kan sorteres etter inntil to kriterier
            if (isNotBlank(sort2)) {
                Comparator<Content> comparator = new ContentComparator(this, sort2, sort.sortDescending());
                Collections.sort(contentList, comparator);
            }

            if (!contentQuery.useSqlSort() && isNotBlank(sort1)) {
                Comparator<Content> comparator = new ContentComparator(this, sort1, sort.sortDescending());
                Collections.sort(contentList, comparator);
            }
        }
    }

    return contentList;
}

From source file:org.zanata.service.impl.TranslationServiceImpl.java

@Override
public List<String> translateAllInDoc(final String projectSlug, final String iterationSlug, final String docId,
        final LocaleId locale, final TranslationsResource translations, final Set<String> extensions,
        final MergeType mergeType, final boolean assignCreditToUploader, AsyncTaskHandle handle,
        final TranslationSourceType translationSourceType) {
    final HProjectIteration hProjectIteration = projectIterationDAO.getBySlug(projectSlug, iterationSlug);
    if (hProjectIteration == null) {
        throw new ZanataServiceException(
                "Version \'" + iterationSlug + "\' for project \'" + projectSlug + "\' ");
    }//www . ja  va2  s  .co  m
    if (mergeType == MergeType.IMPORT) {
        identity.checkPermission("import-translation", hProjectIteration);
    }
    ResourceUtils.validateExtensions(extensions);
    log.debug("pass evaluate");
    final HDocument document = documentDAO.getByDocIdAndIteration(hProjectIteration, docId);
    if (document == null || document.isObsolete()) {
        throw new ZanataServiceException("A document was not found.", 404);
    }
    log.debug("start put translations entity:{}", translations);
    boolean changed = false;
    final HLocale hLocale = localeServiceImpl.validateLocaleByProjectIteration(locale, projectSlug,
            iterationSlug);
    final Optional<AsyncTaskHandle> handleOp = Optional.fromNullable(handle);
    if (handleOp.isPresent()) {
        handleOp.get().setMaxProgress(translations.getTextFlowTargets().size());
    }
    try {
        changed |= runInTransaction(() ->
        // handle extensions
        resourceUtils.transferFromTranslationsResourceExtensions(translations.getExtensions(true), document,
                extensions, hLocale, mergeType));
    } catch (Exception e) {
        log.error("exception in transferFromTranslationsResourceExtensions: {}", e.getMessage());
        throw new ZanataServiceException("Error during translation.", 500, e);
    }
    // NB: removedTargets only applies for MergeType.IMPORT
    final Collection<Long> removedTextFlowTargetIds = new HashSet<>();
    final List<String> warnings = new ArrayList<String>();
    if (mergeType == MergeType.IMPORT) {
        for (HTextFlow textFlow : document.getTextFlows()) {
            HTextFlowTarget hTarget = textFlow.getTargets().get(hLocale.getId());
            if (hTarget != null) {
                removedTextFlowTargetIds.add(hTarget.getId());
            }
        }
    }
    // Break the target into batches
    List<List<TextFlowTarget>> batches = Lists.partition(translations.getTextFlowTargets(), BATCH_SIZE);
    for (final List<TextFlowTarget> batch : batches) {
        try {
            changed |= runInTransaction(() -> saveBatch(extensions, warnings, hLocale, document, mergeType,
                    removedTextFlowTargetIds, handleOp, hProjectIteration.getId(), batch,
                    assignCreditToUploader, translationSourceType));
        } catch (Exception e) {
            log.error("exception saving translation batch: {}", e.getMessage());
            throw new ZanataServiceException("Error during translation.", 500, e);
        }
    }
    if (changed || !removedTextFlowTargetIds.isEmpty()) {
        try {
            runInTransaction(() -> {
                for (Long targetId : removedTextFlowTargetIds) {
                    // need to refresh from persistence
                    HTextFlowTarget target = textFlowTargetDAO.findById(targetId, true);
                    target.clear();
                }
                textFlowTargetDAO.flush();
                documentDAO.flush();
                return null;
            });
            Long actorId = authenticatedAccount.getPerson().getId();
            documentUploadedEvent
                    .fire(new DocumentUploadedEvent(actorId, document.getId(), false, hLocale.getLocaleId()));
        } catch (Exception e) {
            log.error("exception in removeTargets: {}", e.getMessage());
            throw new ZanataServiceException("Error during translation.", 500, e);
        }
    }
    return warnings;
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

@Override
public <T extends Document> void remove(Collection<T> collection, List<String> keys) {
    log("remove", keys);
    DBCollection dbCollection = getDBCollection(collection);
    long start = PERFLOG.start();
    try {//from w w w.  ja  v  a 2s  .c  o m
        for (List<String> keyBatch : Lists.partition(keys, IN_CLAUSE_BATCH_SIZE)) {
            DBObject query = QueryBuilder.start(Document.ID).in(keyBatch).get();
            try {
                dbCollection.remove(query);
            } catch (Exception e) {
                throw DocumentStoreException.convert(e, "Remove failed for " + keyBatch);
            } finally {
                if (collection == Collection.NODES) {
                    for (String key : keyBatch) {
                        invalidateCache(collection, key);
                    }
                }
            }
        }
    } finally {
        PERFLOG.end(start, 1, "remove keys={}", keys);
    }
}