Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.main.services.search.ElasticSearchUtilImpl.java

/**
 * Bulk save of the entities./* w  ww  .  j  av  a2 s.  co m*/
 * @param type index type
 * @param docs metacat documents
 */
public void save(final String type, final List<ElasticSearchDoc> docs) {
    if (docs != null && !docs.isEmpty()) {
        final List<List<ElasticSearchDoc>> partitionedDocs = Lists.partition(docs, 100);
        partitionedDocs.forEach(subDocs -> bulkSaveToIndex(type, subDocs, esIndex));
        partitionedDocs.forEach(subDocs -> ensureMigrationBySave(type, subDocs));
    }
}

From source file:org.eclipse.milo.opcua.sdk.client.DataTypeDictionaryReader.java

private CompletableFuture<List<String>> readDataTypeDescriptionValues(List<NodeId> nodeIds) {
    CompletableFuture<UInteger> maxNodesPerRead = readNode(
            new ReadValueId(Identifiers.Server_ServerCapabilities_OperationLimits_MaxNodesPerRead,
                    AttributeId.Value.uid(), null, QualifiedName.NULL_VALUE))
                            .thenApply(dv -> (UInteger) dv.getValue().getValue());

    CompletableFuture<Integer> getPartitionSize = maxNodesPerRead
            .thenApply(m -> Math.max(1, Ints.saturatedCast(m.longValue()))).exceptionally(ex -> PARTITION_SIZE);

    return getPartitionSize.thenCompose(partitionSize -> {
        List<List<NodeId>> partitions = Lists.partition(nodeIds, partitionSize);

        CompletableFuture<List<List<DataValue>>> sequence = FutureUtils
                .sequence(partitions.stream().map(list -> {
                    List<ReadValueId> readValueIds = list.stream().map(nodeId -> new ReadValueId(nodeId,
                            AttributeId.Value.uid(), null, QualifiedName.NULL_VALUE))
                            .collect(Collectors.toList());

                    return readNodes(readValueIds);
                }));/*from w  w w .j a  v a  2s  . co m*/

        return sequence.thenApply(values -> values.stream().flatMap(List::stream)
                .map(v -> (String) v.getValue().getValue()).collect(Collectors.toList()));
    });
}

From source file:com.google.gcloud.spi.DefaultStorageRpc.java

@Override
public BatchResponse batch(BatchRequest request) {
    List<List<Tuple<StorageObject, Map<Option, ?>>>> partitionedToDelete = Lists.partition(request.toDelete,
            MAX_BATCH_DELETES);/*ww w  . ja v a2s.  c o  m*/
    Iterator<List<Tuple<StorageObject, Map<Option, ?>>>> iterator = partitionedToDelete.iterator();
    BatchRequest chunkRequest = new BatchRequest(
            iterator.hasNext() ? iterator.next() : ImmutableList.<Tuple<StorageObject, Map<Option, ?>>>of(),
            request.toUpdate, request.toGet);
    BatchResponse response = batchChunk(chunkRequest);
    Map<StorageObject, Tuple<Boolean, StorageException>> deletes = Maps
            .newHashMapWithExpectedSize(request.toDelete.size());
    deletes.putAll(response.deletes);
    while (iterator.hasNext()) {
        chunkRequest = new BatchRequest(iterator.next(), null, null);
        BatchResponse deleteBatchResponse = batchChunk(chunkRequest);
        deletes.putAll(deleteBatchResponse.deletes);
    }
    return new BatchResponse(deletes, response.updates, response.gets);
}

From source file:com.netflix.metacat.connector.hive.sql.DirectSqlSavePartition.java

/**
 * Delete the partitions with the given <code>partitionNames</code>.
 *
 * @param tableQName     table name/*from   w w  w  . j  a v a  2s  .  c om*/
 * @param partitionNames list of partition ids
 */
public void delete(final QualifiedName tableQName, final List<String> partitionNames) {
    final long start = registry.clock().wallTime();
    try {
        final List<List<String>> subPartitionNameList = Lists.partition(partitionNames, batchSize);
        subPartitionNameList.forEach(subPartitionNames -> _delete(tableQName, subPartitionNames));
    } finally {
        this.fastServiceMetric.recordTimer(HiveMetrics.TagDropHivePartitions.getMetricName(),
                registry.clock().wallTime() - start);
    }
}

From source file:ru.runa.wfe.security.logic.AuthorizationLogic.java

/**
 *
 * @param objectNames Non-empty. Contains null values for singleton keys.
 * @param permissions Null if called from removeAllPermissions().
 *///from  w  ww .j a v  a  2  s .  c  om
private void removePermissionsImpl(User user, String executorName,
        Map<SecuredObjectType, Set<String>> objectNames, Set<Permission> permissions) {
    Executor executor = executorDao.getExecutor(executorName); // [QSL] Only id is needed, or maybe even join would be enough.
    permissionDao.checkAllowed(user, Permission.LIST, executor);

    QPermissionMapping pm = QPermissionMapping.permissionMapping;

    for (Map.Entry<SecuredObjectType, Set<String>> kv : objectNames.entrySet()) {
        SecuredObjectType type = kv.getKey();
        Set<String> names = kv.getValue();

        if (type.isSingleton()) {
            names = new HashSet<>(1);
            names.add(null);
        }

        for (List<String> namesPart : Lists.partition(new ArrayList<>(names),
                SystemProperties.getDatabaseNameParametersCount())) {
            List<Long> objectIds;

            if (type.isSingleton()) {
                objectIds = Collections.singletonList(0L);
            } else {
                objectIds = securedObjectFactory.getIdsByNames(type, new HashSet<>(namesPart));
            }
            permissionDao.checkAllowedForAll(user, Permission.UPDATE_PERMISSIONS, type, objectIds);

            HibernateDeleteClause q = queryFactory.delete(pm)
                    .where(pm.executor.eq(executor).and(pm.objectType.eq(type)).and(pm.objectId.in(objectIds)));
            if (permissions != null) {
                q.where(pm.permission.in(permissions));
            }
            q.execute();
        }
    }
}

From source file:kr.debop4j.core.parallelism.Parallels.java

/**
 *  ? ,  function? ,  ./*from  w  w w.  ja v a2 s  . c om*/
 *
 * @param elements function?  
 * @param function  
 * @return  ? 
 */
public static <T, V> List<V> runEach(final Iterable<T> elements, final Function1<T, V> function) {
    shouldNotBeNull(elements, "elements");
    shouldNotBeNull(function, "function");
    if (isDebugEnabled)
        log.debug(" ? ... workerCount=[{}]", getWorkerCount());

    ExecutorService executor = Executors.newFixedThreadPool(getWorkerCount());

    try {
        List<T> elemList = Lists.newArrayList(elements);
        int partitionSize = getPartitionSize(elemList.size(), getWorkerCount());
        List<List<T>> partitions = Lists.partition(elemList, partitionSize);
        final Map<Integer, List<V>> localResults = Maps.newLinkedHashMap();

        List<Callable<List<V>>> tasks = Lists.newLinkedList(); // False Sharing?  

        for (int p = 0; p < partitions.size(); p++) {
            final List<T> partition = partitions.get(p);
            final List<V> localResult = Lists.newArrayListWithCapacity(partition.size());
            localResults.put(p, localResult);

            Callable<List<V>> task = new Callable<List<V>>() {
                @Override
                public List<V> call() throws Exception {
                    for (final T element : partition)
                        localResult.add(function.execute(element));
                    return localResult;
                }
            };
            tasks.add(task);
        }

        executor.invokeAll(tasks);

        List<V> results = Lists.newArrayListWithCapacity(elemList.size());

        for (int i = 0; i < partitions.size(); i++) {
            results.addAll(localResults.get(i));
        }

        if (isDebugEnabled)
            log.debug(" ?  . workerCount=[{}]", getWorkerCount());

        return results;

    } catch (Exception e) {
        log.error("???      ?.", e);
        throw new RuntimeException(e);
    } finally {
        executor.shutdown();
    }
}

From source file:ddf.catalog.source.solr.SolrMetacardClientImpl.java

@Override
public List<Metacard> getIds(Set<String> ids) throws UnsupportedQueryException {
    List<Metacard> metacards = new ArrayList<>(ids.size());
    List<List<String>> partitions = Lists.partition(new ArrayList<>(ids), GET_BY_ID_LIMIT);
    for (List<String> partition : partitions) {
        try {//ww  w .ja v  a  2 s.  c  o m
            SolrDocumentList page = client.getById(partition);
            metacards.addAll(createMetacards(page));
        } catch (SolrServerException | SolrException | IOException e) {
            throw new UnsupportedQueryException("Could not complete solr query.", e);
        }
    }
    return metacards;
}

From source file:edu.utah.further.i2b2.query.criteria.service.impl.I2b2SearchCriterionBuilder.java

/**
 * Creates a CONJUNCTION phrase of the namespaceId, type, and code
 * /* ww w.j a  va 2  s .  c  o  m*/
 * @param namespace
 * @param alias
 * @param namespaceProperty
 * @param typeProperty
 * @param type
 * @param codeProperty
 * @return
 */
private SearchCriterion createPhrase(final Namespace namespace, final String alias,
        final String namespaceProperty, final String typeProperty, final String type,
        final String codeProperty) {
    final List<List<String>> lists = Lists.partition(domain, Constants.MAX_IN);
    SearchCriterion subCriterion = null;
    // If the lists size is > 1 then we have to do an OR of a bunch of INs
    final SearchCriterion parent = lists.size() > 1 ? junction(SearchType.DISJUNCTION) : null;
    for (final List<String> list : lists) {
        subCriterion = getPhraseCriterion(namespace);
        subCriterion.addCriterion(
                simpleExpression(Relation.EQ, alias + DOT + namespaceProperty, getNamespaceId(namespace)));
        subCriterion.addCriterion(simpleExpression(Relation.EQ, alias + DOT + typeProperty, type));
        subCriterion.addCriterion(constructInQuery(list, alias + DOT + codeProperty));
        if (dateConstraint != null
                && (dateConstraint.getDateFrom() != null || dateConstraint.getDateTo() != null)) {
            subCriterion.addCriterion(getDateConstraint());
        }

        if (parent != null) {
            parent.addCriterion(subCriterion);
        }
    }

    // We don't have a parent so just return us.
    if (parent == null) {
        return subCriterion;
    }

    // We have a parent of all subcriterion.
    return parent;
}

From source file:org.jclouds.blobstore.integration.internal.StubAsyncBlobStore.java

public static <T extends Comparable<?>> SortedSet<T> firstSliceOfSize(Iterable<T> elements, int size) {
    List<List<T>> slices = Lists.partition(Lists.newArrayList(elements), size);
    return Sets.newTreeSet(slices.get(0));
}

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getNonPartitionDefinitionMetadataMap(final List<QualifiedName> names) {
    final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
    return parts.stream().map(keys -> _getMetadataMap(keys, SQL.GET_DEFINITION_METADATAS))
            .flatMap(it -> it.entrySet().stream()).collect(Collectors
                    .toMap(it -> QualifiedName.fromString(it.getKey()).toString(), Map.Entry::getValue));
}