Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.google.cloud.pubsub.v1.StreamingSubscriberConnection.java

@InternalApi
static List<StreamingPullRequest> partitionAckOperations(List<String> acksToSend,
        List<PendingModifyAckDeadline> ackDeadlineExtensions, int size) {
    int numExtensions = 0;
    for (PendingModifyAckDeadline modify : ackDeadlineExtensions) {
        numExtensions += modify.ackIds.size();
    }/*  w w w  .  j  a  v a  2s  .  c  o m*/
    int numChanges = Math.max(numExtensions, acksToSend.size());
    int numRequests = numChanges / size + (numChanges % size == 0 ? 0 : 1);

    List<StreamingPullRequest.Builder> requests = new ArrayList<>(numRequests);
    for (int i = 0; i < numRequests; i++) {
        requests.add(StreamingPullRequest.newBuilder());
    }

    int reqCount = 0;
    for (List<String> acksChunk : Lists.partition(acksToSend, size)) {
        requests.get(reqCount).addAllAckIds(acksChunk);
        reqCount++;
    }

    reqCount = 0;
    int ackCount = 0;
    for (PendingModifyAckDeadline modify : ackDeadlineExtensions) {
        for (String ackId : modify.ackIds) {
            requests.get(reqCount).addModifyDeadlineSeconds(modify.deadlineExtensionSeconds)
                    .addModifyDeadlineAckIds(ackId);
            ackCount++;
            if (ackCount == size) {
                reqCount++;
                ackCount = 0;
            }
        }
    }

    List<StreamingPullRequest> ret = new ArrayList<>(requests.size());
    for (StreamingPullRequest.Builder builder : requests) {
        ret.add(builder.build());
    }
    return ret;
}

From source file:no.kantega.publishing.common.ao.AttachmentAOImpl.java

@Override
public List<Attachment> getAttachments(List<Integer> allAttachmentIds) {
    List<Attachment> attachments = new LinkedList<>();

    if (!allAttachmentIds.isEmpty()) {
        List<List<Integer>> partition = Lists.partition(allAttachmentIds, 500);
        for (List<Integer> ids : partition) {
            String query = "select " + DB_COLS + " from attachments where Id in " + getParamsString(ids);

            try (Connection c = dbConnectionFactory.getConnection();
                    PreparedStatement ps = c.prepareStatement(query)) {

                for (int i = 1; i < ids.size() + 1; i++) {
                    ps.setInt(i, ids.get(i - 1));
                }// ww w .  ja  va 2s . co  m
                try (ResultSet rs = ps.executeQuery()) {
                    while (rs.next()) {
                        attachments.add(getAttachmentFromRS(rs));
                    }

                }
            } catch (SQLException e) {
                throw new SystemException("SQL Feil ved databasekall", e);
            }
        }
    }
    return attachments;
}

From source file:ru.runa.wfe.security.dao.PermissionDao.java

/**
 * Returns subset of `idsOrNull` for which `actor` has `permission`. If `idsOrNull` is null (e.g. when called from isAllowedForAny()),
 * non-empty set (containing arbitrary value) means positive check result.
 *
 * @param checkPrivileged If false, only permission_mapping table is checked, but not privileged_mapping.
 */// ww w. jav  a 2 s  .  c  o m
public Set<Long> filterAllowedIds(Executor executor, Permission permission, SecuredObjectType type,
        List<Long> idsOrNull, boolean checkPrivileged) {
    ApplicablePermissions.check(type, permission);
    boolean haveIds = idsOrNull != null;

    if (permission == Permission.NONE) {
        // Optimization; see comments at NONE definition.
        return Collections.emptySet();
    }

    final Set<Executor> executorWithGroups = getExecutorWithAllHisGroups(executor);
    if (checkPrivileged && isPrivilegedExecutor(type, executorWithGroups)) {
        return haveIds ? new HashSet<>(idsOrNull) : nonEmptySet;
    }

    PermissionSubstitutions.ForCheck subst = PermissionSubstitutions.getForCheck(type, permission);
    QPermissionMapping pm = QPermissionMapping.permissionMapping;

    // Same type for all objects, thus same listType. I believe it would be faster to perform separate query here.
    // ATTENTION!!! Also, HQL query with two conditions (on both type and listType) always returns empty rowset. :(
    //              (Both here with QueryDSL and in HibernateCompilerHQLBuilder.addSecureCheck() with raw HQL.)
    if (!subst.listPermissions.isEmpty() && queryFactory.select(pm.id).from(pm)
            .where(pm.executor.in(executorWithGroups).and(pm.objectType.eq(type.getListType()))
                    .and(pm.objectId.eq(0L)).and(pm.permission.in(subst.listPermissions)))
            .fetchFirst() != null) {
        return haveIds ? new HashSet<>(idsOrNull) : nonEmptySet;
    }

    Set<Long> result = new HashSet<>();
    for (List<Long> idsPart : haveIds
            ? Lists.partition(idsOrNull, SystemProperties.getDatabaseParametersCount())
            : nonEmptyListList) {
        JPQLQuery<Long> q = queryFactory.select(pm.id).from(pm).where(pm.executor.in(executorWithGroups)
                .and(pm.objectType.eq(type)).and(pm.permission.in(subst.selfPermissions)));
        if (haveIds) {
            result.addAll(q.where(pm.objectId.in(idsPart)).fetch());
        } else if (q.fetchFirst() != null) {
            return nonEmptySet;
        }
    }
    return result;
}

From source file:org.sonar.core.qualityprofile.db.ActiveRuleDao.java

public List<ActiveRuleParamDto> selectParamsByActiveRuleIds(Collection<Integer> activeRuleIds,
        SqlSession session) {/* ww w. j  a  v  a 2s. co  m*/
    if (activeRuleIds.isEmpty()) {
        return Collections.emptyList();
    }
    List<ActiveRuleParamDto> dtosList = newArrayList();
    List<List<Integer>> idsPartitionList = Lists.partition(newArrayList(activeRuleIds), 1000);
    for (List<Integer> idsPartition : idsPartitionList) {
        List<ActiveRuleParamDto> dtos = session.selectList(
                "org.sonar.core.qualityprofile.db.ActiveRuleMapper.selectParamsByActiveRuleIds",
                newArrayList(idsPartition));
        dtosList.addAll(dtos);
    }
    return dtosList;
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchRefresh.java

@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
    final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
    final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(),
            qNames, excludeQualifiedNames);
    final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream()
            .map(s -> service.submit(() -> {
                final QualifiedName tableName = QualifiedName.fromString(s, false);
                final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
                int offset = 0;
                int count;
                final Sort sort;
                if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
                    sort = new Sort("id", SortOrder.ASC);
                } else {
                    sort = new Sort("part_id", SortOrder.ASC);
                }//  w w w .j  av a 2  s .c om
                final Pageable pageable = new Pageable(10000, offset);
                do {
                    final List<PartitionDto> partitionDtos = partitionService.list(tableName, sort, pageable,
                            true, true, new GetPartitionsRequestDto(null, null, true, true));
                    count = partitionDtos.size();
                    if (!partitionDtos.isEmpty()) {
                        final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos,
                                1000);
                        partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures
                                .add(indexPartitionDtos(tableName, subPartitionsDtos)));
                        offset = offset + count;
                        pageable.setOffset(offset);
                    }
                } while (count == 10000);
                return Futures.transform(Futures.successfulAsList(indexFutures),
                        Functions.constant((Void) null));
            })).collect(Collectors.toList());
    final ListenableFuture<Void> processPartitionsFuture = Futures
            .transformAsync(Futures.successfulAsList(futures), input -> {
                final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL)
                        .collect(Collectors.toList());
                return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls),
                        Functions.constant(null));
            });
    return Futures.transformAsync(processPartitionsFuture, input -> {
        elasticSearchUtil.refresh();
        final List<ListenableFuture<Void>> cleanUpFutures = tables.stream()
                .map(s -> service.submit(
                        () -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames)))
                .collect(Collectors.toList());
        return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null));
    });
}

From source file:com.addthis.hydra.job.JobConfigManager.java

/**
 * Find all job ids in the SpawnDataStore, split them into chunks, and then load the jobs from each chunk in parallel
 *
 * @return A map of all jobs found in the SpawnDataStore
 *///from  w ww.j ava 2s  .  c om
private Map<String, IJob> loadJobs() {
    final Map<String, IJob> jobs = new HashMap<>();
    List<String> jobNodes = spawnDataStore.getChildrenNames(SPAWN_JOB_CONFIG_PATH);
    if (jobNodes != null) {
        logger.info("Using {} threads to pull data on {} jobs", loadThreads, jobNodes.size());
        // Use multiple threads to query the database, and gather the results together
        ExecutorService executorService = new ThreadPoolExecutor(loadThreads, loadThreads, 1000L,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setDaemon(true).build());
        for (List<String> jobIdChunk : Lists.partition(jobNodes, jobChunkSize)) {
            executorService.submit(new MapChunkLoader(this, jobs, jobIdChunk));
        }
        logger.info("Waiting for job loading threads to finish...");
        MoreExecutors.shutdownAndAwaitTermination(executorService, 600, TimeUnit.SECONDS);
        logger.info("Job loading complete");
    }
    return jobs;
}

From source file:com.salesforce.ide.core.remote.MetadataStubExt.java

public FileProperties[] listMetadata(ListMetadataQuery[] allQueriesArray, IProgressMonitor monitor)
        throws ForceRemoteException {
    if (monitor == null) {
        monitor = new NullProgressMonitor();
    }/*from w  ww.j  a va  2  s  .c  om*/
    if (metadataConnection == null) {
        throw new IllegalArgumentException("Metadata stub cannot be null");
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Querying metadata for FileProperties");
    }

    if (Utils.isEmpty(allQueriesArray)) {
        return new FileProperties[] {};
    }

    // Remove unsupported components from queries.
    Set<String> supportedComponents = getSupportedMetadataComponents();
    ArrayList<ListMetadataQuery> allQueries = Lists.newArrayList(allQueriesArray);
    Iterator<ListMetadataQuery> it = allQueries.iterator();
    while (it.hasNext()) {
        ListMetadataQuery query = it.next();
        if (!supportedComponents.contains(query.getType())) {
            it.remove();
        }
    }

    //break request into 3 queries per api call (api constraint)
    final int QUERIES_PER_CALL = 3;
    List<FileProperties> filePropertiesList = new ArrayList<FileProperties>();
    try {
        for (List<ListMetadataQuery> queries : Lists.partition(allQueries, QUERIES_PER_CALL)) {
            try {
                filePropertiesList.addAll(getFileProperties(queries, monitor));
            } catch (ConnectionException e) {
                //Invalid type or timeout
                if (ForceExceptionUtils.isReadTimeoutException(e) || e instanceof SoapFaultException) {
                    filePropertiesList.addAll(tryOneByOne(queries, monitor));
                } else {
                    ForceExceptionUtils.throwTranslatedException(e, connection);
                }
            }
        }
    } catch (MonitorCanceledException e) {
        // nothing to do, just return
    }

    return filePropertiesList.toArray(new FileProperties[filePropertiesList.size()]);
}

From source file:com.threewks.thundr.gae.objectify.repository.AbstractRepository.java

/**
 * Reindexes all the entities matching the given search operation. The given {@link ReindexOperation}, if present will be applied to each batch of entities.
 * /*from  ww  w.  ja va2  s . co  m*/
 * @param search
 * @param batchSize
 * @param reindexOperation
 * @return the overall count of re-indexed entities.
 */
@Override
public int reindex(Search<E, K> search, int batchSize, ReindexOperation<E> reindexOperation) {
    int count = 0;
    List<E> results = search.run().getResults();
    List<List<E>> batches = Lists.partition(results, batchSize);
    for (List<E> batch : batches) {
        batch = reindexOperation == null ? batch : reindexOperation.apply(batch);
        if (reindexOperation != null) {
            // we only re-save the batch when a re-index op is supplied, otherwise the data can't have changed.
            ofy().save().entities(batch).now();
        }
        if (shouldSearch()) {
            index(batch).complete();
        }
        count += batch.size();
        Logger.info("Reindexed %d entities of type %s, %d of %d", batch.size(), entityType.getSimpleName(),
                count, results.size());
    }
    return count;
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchUtilImpl.java

/**
 * Batch updates the documents with partial updates with the given fields.
 * @param type index type//from w w w  . j av  a 2s.  c  o  m
 * @param ids list of entity ids
 * @param metacatRequestContext context containing the user name
 * @param node json that represents the document source
 */
public void updates(final String type, final List<String> ids,
        final MetacatRequestContext metacatRequestContext, final ObjectNode node) {

    if (ids != null && !ids.isEmpty()) {
        final List<List<String>> partitionedIds = Lists.partition(ids, 100);
        partitionedIds.forEach(subIds -> updateDocs(type, subIds, metacatRequestContext, node));
        partitionedIds.forEach(subIds -> ensureMigrationByCopy(type, subIds));
    }
}

From source file:com.cloudant.sync.replication.PushStrategy.java

private int processOneChangesBatch(Changes changes) throws AttachmentException, DatastoreException {

    int changesProcessed = 0;

    // Process the changes themselves in batches, where we post a batch
    // at a time to the remote database's _bulk_docs endpoint.
    List<? extends List<DocumentRevision>> batches = Lists.partition(changes.getResults(), this.bulkInsertSize);
    for (List<DocumentRevision> batch : batches) {

        if (this.state.cancel) {
            break;
        }//www .j  av  a2s.c om

        Map<String, DocumentRevisionTree> allTrees = this.sourceDb.getDocumentTrees(batch);
        Map<String, Set<String>> docOpenRevs = this.openRevisions(allTrees);
        Map<String, CouchClient.MissingRevisions> docMissingRevs = this.targetDb.revsDiff(docOpenRevs);

        ItemsToPush itemsToPush = missingRevisionsToJsonDocs(allTrees, docMissingRevs);
        List<String> serialisedMissingRevs = itemsToPush.serializedDocs;
        List<MultipartAttachmentWriter> multiparts = itemsToPush.multiparts;

        if (!this.state.cancel) {
            this.targetDb.putMultiparts(multiparts);
            this.targetDb.bulkCreateSerializedDocs(serialisedMissingRevs);
            changesProcessed += docMissingRevs.size();
        }
    }

    return changesProcessed;
}