Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.twosigma.cook.jobclient.JobClient.java

/**
 * Query jobs for a given list of job {@link UUID}s. If the list size is larger that the
 * {@code _batchRequestSize}, it will partition the list into smaller lists and query them
 * respectively and return all query results together.
 *
 * @param uuids specifies a list of job {@link UUID}s expected to query.
 * @return a {@link ImmutableMap} from job {@link UUID} to {@link Job}.
 * @throws JobClientException//  www .j  a v a  2s .com
 */
public Map<UUID, Job> queryJobs(Collection<UUID> uuids) throws JobClientException {
    final List<NameValuePair> allParams = new ArrayList<NameValuePair>(uuids.size());
    for (UUID uuid : uuids) {
        allParams.add(new BasicNameValuePair("job", uuid.toString()));
    }
    final ImmutableMap.Builder<UUID, Job> UUIDToJob = ImmutableMap.builder();
    // Partition a large query into small queries.
    for (final List<NameValuePair> params : Lists.partition(allParams, _batchRequestSize)) {
        HttpResponse httpResponse;
        HttpRequestBase httpRequest;
        try {
            URIBuilder uriBuilder = new URIBuilder(_jobURI);
            uriBuilder.addParameters(params);
            httpRequest = new HttpGet(uriBuilder.build());
            httpResponse = _httpClient.execute(httpRequest);
        } catch (IOException | URISyntaxException e) {
            throw releaseAndCreateException(null, null,
                    "Can not submit GET request " + params + " via uri " + _jobURI, e);
        }
        // Check status code.
        final StatusLine statusLine = httpResponse.getStatusLine();
        // Base on the decision graph
        // http://clojure-liberator.github.io/liberator/tutorial/decision-graph.html
        // The status code for the proper GET response is 200.
        if (statusLine.getStatusCode() != HttpStatus.SC_OK) {
            throw releaseAndCreateException(httpRequest, httpResponse,
                    "The response of GET request " + params + " via uri " + _jobURI + ": "
                            + statusLine.getReasonPhrase() + ", " + statusLine.getStatusCode(),
                    null);
        }
        // Parse the response.
        String response = null;
        try {
            // parse the response to string.
            final HttpEntity entity = httpResponse.getEntity();
            response = EntityUtils.toString(entity);
            // Ensure that the entity content has been fully consumed and the underlying stream has been closed.
            EntityUtils.consume(entity);
            for (Job job : Job.parseFromJSON(response, _instanceDecorator)) {
                UUIDToJob.put(job.getUUID(), job);
            }
        } catch (JSONException | ParseException | IOException e) {
            throw new JobClientException("Can not parse the response = " + response + " for GET request "
                    + params + " via uri " + _jobURI, e);
        } finally {
            httpRequest.releaseConnection();
        }
    }
    return UUIDToJob.build();
}

From source file:de.thm.arsnova.dao.CouchDBDao.java

@CacheEvict("answers")
@Override//from w  w  w . j a v  a  2s.  c om
public int deleteAnswers(final Question question) {
    try {
        final NovaView view = new NovaView("answer/cleanup");
        view.setKey(question.get_id());
        view.setIncludeDocs(true);
        final ViewResults results = getDatabase().view(view);
        final List<List<Document>> partitions = Lists.partition(results.getResults(), BULK_PARTITION_SIZE);

        int count = 0;
        for (List<Document> partition : partitions) {
            List<Document> answersToDelete = new ArrayList<>();
            for (final Document a : partition) {
                final Document d = new Document(a.getJSONObject("doc"));
                d.put("_deleted", true);
                answersToDelete.add(d);
            }
            if (database.bulkSaveDocuments(answersToDelete.toArray(new Document[answersToDelete.size()]))) {
                count += partition.size();
            } else {
                LOGGER.error("Could not bulk delete answers");
            }
        }
        log("delete", "type", "answer", "answerCount", count);

        return count;
    } catch (final IOException e) {
        LOGGER.error("IOException: Could not delete answers for question {}", question.get_id());
    }

    return 0;
}

From source file:com.twosigma.cook.jobclient.JobClient.java

/**
 * Query a collection of groups for their status.
 * @param guuids specifies the uuids of the {@link Group}s to be queried.
 * @return a map of {@link UUID}s to {@link Group}s.
 * @throws JobClientException//from   w w w  . j a v  a  2 s.  com
 */
public Map<UUID, Group> queryGroups(Collection<UUID> guuids) throws JobClientException {
    if (_groupURI == null) {
        throw groupEndpointMissingException("Cannot query groups if the jobclient's group endpoint is null");
    }
    final List<NameValuePair> allParams = new ArrayList<NameValuePair>(guuids.size());
    for (UUID guuid : guuids) {
        allParams.add(new BasicNameValuePair("uuid", guuid.toString()));
    }
    allParams.add(new BasicNameValuePair("detailed", "true"));
    final ImmutableMap.Builder<UUID, Group> UUIDToGroup = ImmutableMap.builder();
    // Partition a large query into small queries.
    for (final List<NameValuePair> params : Lists.partition(allParams, _batchRequestSize)) {
        HttpResponse httpResponse;
        HttpRequestBase httpRequest;
        try {
            URIBuilder uriBuilder = new URIBuilder(_groupURI);
            uriBuilder.addParameters(params);
            httpRequest = new HttpGet(uriBuilder.build());
            httpResponse = _httpClient.execute(httpRequest);
        } catch (IOException | URISyntaxException e) {
            throw releaseAndCreateException(null, null,
                    "Can not submit GET request " + params + " via uri " + _jobURI, e);
        }
        // Check status code.
        final StatusLine statusLine = httpResponse.getStatusLine();
        // Base on the decision graph
        // http://clojure-liberator.github.io/liberator/tutorial/decision-graph.html
        // The status code for the proper GET response is 200.
        if (statusLine.getStatusCode() != HttpStatus.SC_OK) {
            throw releaseAndCreateException(httpRequest, httpResponse,
                    "The response of GET request " + params + " via uri " + _jobURI + ": "
                            + statusLine.getReasonPhrase() + ", " + statusLine.getStatusCode(),
                    null);
        }
        // Parse the response.
        String response = null;
        try {
            // parse the response to string.
            final HttpEntity entity = httpResponse.getEntity();
            response = EntityUtils.toString(entity);
            // Ensure that the entity content has been fully consumed and the underlying stream has been closed.
            EntityUtils.consume(entity);
            for (Group group : Group.parseFromJSON(response, _instanceDecorator)) {
                UUIDToGroup.put(group.getUUID(), group);
            }
        } catch (JSONException | ParseException | IOException e) {
            throw new JobClientException("Can not parse the response = " + response + " for GET request "
                    + params + " via uri " + _jobURI, e);
        } finally {
            httpRequest.releaseConnection();
        }
    }
    return UUIDToGroup.build();

}

From source file:com.cloudera.director.aws.ec2.EC2Provider.java

@Override
@SuppressWarnings("PMD.UnusedFormalParameter")
public Map<String, InstanceState> getInstanceState(EC2InstanceTemplate template,
        Collection<String> virtualInstanceIds) {
    Map<String, InstanceState> instanceStateByVirtualInstanceId = Maps
            .newHashMapWithExpectedSize(virtualInstanceIds.size());

    // Partition full requests into multiple batch requests, AWS limits
    // the total number of instance status requests you can make.
    List<List<String>> partitions = Lists.partition(Lists.newArrayList(virtualInstanceIds),
            MAX_INSTANCE_STATUS_REQUESTS);

    for (List<String> partition : partitions) {
        instanceStateByVirtualInstanceId.putAll(getBatchInstanceState(partition));
    }/*from  w w  w  .j a va 2  s .  co  m*/

    return instanceStateByVirtualInstanceId;
}

From source file:dagger2.internal.codegen.ComponentGenerator.java

private void initializeFrameworkTypes(BindingGraph input, ClassWriter componentWriter,
        ConstructorWriter constructorWriter, Optional<ClassName> builderName,
        Map<TypeElement, MemberSelect> componentContributionFields,
        ImmutableMap<BindingKey, MemberSelect> memberSelectSnippets,
        ImmutableMap<ContributionBinding, Snippet> parentMultibindingContributionSnippets,
        ImmutableMap<ContributionBinding, Snippet> multibindingContributionSnippets) throws AssertionError {
    List<List<BindingKey>> partitions = Lists.partition(input.resolvedBindings().keySet().asList(), 100);
    for (int i = 0; i < partitions.size(); i++) {
        MethodWriter initializeMethod = componentWriter.addMethod(VoidName.VOID,
                "initialize" + ((i == 0) ? "" : i));
        initializeMethod.body();// w  w w.j a v a  2 s  .c  o  m
        initializeMethod.addModifiers(PRIVATE);
        if (builderName.isPresent()) {
            initializeMethod.addParameter(builderName.get(), "builder").addModifiers(FINAL);
            constructorWriter.body().addSnippet("%s(builder);", initializeMethod.name());
        } else {
            constructorWriter.body().addSnippet("%s();", initializeMethod.name());
        }

        for (BindingKey bindingKey : partitions.get(i)) {
            Snippet memberSelectSnippet = memberSelectSnippets.get(bindingKey)
                    .getSnippetFor(componentWriter.name());
            ResolvedBindings resolvedBindings = input.resolvedBindings().get(bindingKey);
            switch (bindingKey.kind()) {
            case CONTRIBUTION:
                ImmutableSet<? extends ContributionBinding> bindings = resolvedBindings.contributionBindings();

                switch (ContributionBinding.bindingTypeFor(bindings)) {
                case SET:
                    boolean hasOnlyProvisions = Iterables.all(bindings,
                            Predicates.instanceOf(ProvisionBinding.class));
                    ImmutableList.Builder<Snippet> parameterSnippets = ImmutableList.builder();
                    for (ContributionBinding binding : bindings) {
                        if (multibindingContributionSnippets.containsKey(binding)) {
                            Snippet initializeSnippet = initializeFactoryForContributionBinding(binding, input,
                                    componentWriter.name(), componentContributionFields, memberSelectSnippets);
                            Snippet snippet = multibindingContributionSnippets.get(binding);
                            initializeMethod.body().addSnippet("this.%s = %s;", snippet, initializeSnippet);
                            parameterSnippets.add(snippet);
                        } else if (parentMultibindingContributionSnippets.containsKey(binding)) {
                            parameterSnippets.add(parentMultibindingContributionSnippets.get(binding));
                        } else {
                            throw new IllegalStateException(binding + " was not found in");
                        }
                    }
                    Snippet initializeSetSnippet = Snippet.format("%s.create(%s)",
                            hasOnlyProvisions ? ClassName.fromClass(SetFactory.class)
                                    : ClassName.fromClass(SetProducer.class),
                            Snippet.makeParametersSnippet(parameterSnippets.build()));
                    initializeMethod.body().addSnippet("this.%s = %s;", memberSelectSnippet,
                            initializeSetSnippet);
                    break;
                case MAP:
                    if (Sets.filter(bindings, Predicates.instanceOf(ProductionBinding.class)).isEmpty()) {
                        @SuppressWarnings("unchecked") // checked by the instanceof filter above
                        ImmutableSet<ProvisionBinding> provisionBindings = (ImmutableSet<ProvisionBinding>) bindings;
                        for (ProvisionBinding provisionBinding : provisionBindings) {
                            if (!isNonProviderMap(provisionBinding)
                                    && multibindingContributionSnippets.containsKey(provisionBinding)) {
                                Snippet snippet = multibindingContributionSnippets.get(provisionBinding);
                                initializeMethod.body().addSnippet("this.%s = %s;", snippet,
                                        initializeFactoryForProvisionBinding(provisionBinding,
                                                componentWriter.name(),
                                                input.componentDescriptor().dependencyMethodIndex(),
                                                componentContributionFields, memberSelectSnippets));
                            }
                        }
                        if (!provisionBindings.isEmpty()) {
                            Snippet initializeMapSnippet = initializeMapBinding(componentWriter.name(),
                                    memberSelectSnippets,
                                    new ImmutableMap.Builder<ContributionBinding, Snippet>()
                                            .putAll(parentMultibindingContributionSnippets)
                                            .putAll(multibindingContributionSnippets).build(),
                                    provisionBindings);
                            initializeMethod.body().addSnippet("this.%s = %s;", memberSelectSnippet,
                                    initializeMapSnippet);
                        }
                    } else {
                        // TODO(user): Implement producer map bindings.
                        throw new IllegalStateException("producer map bindings not implemented yet");
                    }
                    break;
                case UNIQUE:
                    if (!resolvedBindings.ownedContributionBindings().isEmpty()) {
                        ContributionBinding binding = Iterables.getOnlyElement(bindings);
                        if (binding instanceof ProvisionBinding) {
                            ProvisionBinding provisionBinding = (ProvisionBinding) binding;
                            if (!provisionBinding.factoryCreationStrategy().equals(ENUM_INSTANCE)
                                    || provisionBinding.scope().isPresent()) {
                                initializeMethod.body().addSnippet("this.%s = %s;", memberSelectSnippet,
                                        initializeFactoryForProvisionBinding(provisionBinding,
                                                componentWriter.name(),
                                                input.componentDescriptor().dependencyMethodIndex(),
                                                componentContributionFields, memberSelectSnippets));
                            }
                        } else if (binding instanceof ProductionBinding) {
                            ProductionBinding productionBinding = (ProductionBinding) binding;
                            initializeMethod.body().addSnippet("this.%s = %s;", memberSelectSnippet,
                                    initializeFactoryForProductionBinding(productionBinding, input,
                                            componentWriter.name(),
                                            input.componentDescriptor().dependencyMethodIndex(),
                                            componentContributionFields, memberSelectSnippets));
                        } else {
                            throw new AssertionError();
                        }
                    }
                    break;
                default:
                    throw new IllegalStateException();
                }
                break;
            case MEMBERS_INJECTION:
                MembersInjectionBinding binding = Iterables
                        .getOnlyElement(resolvedBindings.membersInjectionBindings());
                if (!binding.injectionStrategy().equals(MembersInjectionBinding.Strategy.NO_OP)) {
                    initializeMethod.body().addSnippet("this.%s = %s;", memberSelectSnippet,
                            initializeMembersInjectorForBinding(componentWriter.name(), binding,
                                    memberSelectSnippets));
                }
                break;
            default:
                throw new AssertionError();
            }
        }
    }
}

From source file:com.twosigma.cook.jobclient.JobClient.java

/**
 * Abort jobs for a given list of job {@link UUID}s. If the size of the list is larger that the
 * {@code _batchRequestSize}, it will partition the list into smaller lists to abort separately.
 *
 * @param uuids specifies a list of job {@link UUID}s expected to abort.
 * @throws JobClientException/*w  ww . j a v  a2s  .  c o  m*/
 */
public void abort(Collection<UUID> uuids) throws JobClientException {
    final List<NameValuePair> allParams = new ArrayList<NameValuePair>(uuids.size());
    for (UUID uuid : uuids) {
        allParams.add(new BasicNameValuePair("job", uuid.toString()));
    }
    // Partition a large query into small queries.
    for (final List<NameValuePair> params : Lists.partition(allParams, _batchRequestSize)) {
        HttpRequestBase httpRequest;
        try {
            URIBuilder uriBuilder = new URIBuilder(_jobURI);
            uriBuilder.addParameters(params);
            httpRequest = new HttpDelete(uriBuilder.build());
        } catch (URISyntaxException e) {
            throw releaseAndCreateException(null, null,
                    "Can not submit DELETE request " + params + " via uri " + _jobURI, e);
        }
        HttpResponse httpResponse;
        try {
            httpResponse = _httpClient.execute(httpRequest);
        } catch (IOException e) {
            throw releaseAndCreateException(httpRequest, null,
                    "Can not submit DELETE request " + params + " via uri " + _jobURI, e);
        }
        // Check status code.
        final StatusLine statusLine = httpResponse.getStatusLine();
        // Base on the decision graph
        // http://clojure-liberator.github.io/liberator/tutorial/decision-graph.html
        // If jobs are aborted successfully, the returned status code is 204.
        if (statusLine.getStatusCode() != HttpStatus.SC_NO_CONTENT) {
            throw releaseAndCreateException(
                    httpRequest, httpResponse, "The response of DELETE request " + params + " via uri "
                            + _jobURI + ": " + statusLine.getReasonPhrase() + ", " + statusLine.getStatusCode(),
                    null);
        }
        // Parse the response.
        try {
            // Parse the response to string.
            final HttpEntity entity = httpResponse.getEntity();
            if (null != entity) {
                final String response = EntityUtils.toString(entity);
                if (_log.isDebugEnabled()) {
                    _log.debug("Response String for aborting jobs " + uuids + " is " + response);
                }
            }
        } catch (ParseException | IOException e) {
            throw new JobClientException(
                    "Can not parse the response for DELETE request " + params + " via uri " + _jobURI, e);
        } finally {
            httpRequest.releaseConnection();
        }
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.java

@CheckForNull
private <T extends Document> boolean internalCreate(Collection<T> collection, List<UpdateOp> updates) {
    final Stopwatch watch = startWatch();
    List<String> ids = new ArrayList<String>(updates.size());
    boolean success = true;
    try {/*from w  ww . java 2 s. com*/

        // try up to CHUNKSIZE ops in one transaction
        for (List<UpdateOp> chunks : Lists.partition(updates, CHUNKSIZE)) {
            List<T> docs = new ArrayList<T>();
            for (UpdateOp update : chunks) {
                ids.add(update.getId());
                maintainUpdateStats(collection, update.getId());
                UpdateUtils.assertUnconditional(update);
                T doc = collection.newDocument(this);
                addUpdateCounters(update);
                UpdateUtils.applyChanges(doc, update);
                if (!update.getId().equals(doc.getId())) {
                    throw new DocumentStoreException(
                            "ID mismatch - UpdateOp: " + update.getId() + ", ID property: " + doc.getId());
                }
                docs.add(doc);
            }
            boolean done = insertDocuments(collection, docs);
            if (done) {
                if (collection == Collection.NODES) {
                    for (T doc : docs) {
                        nodesCache.putIfAbsent((NodeDocument) doc);
                    }
                }
            } else {
                success = false;
            }
        }
        return success;
    } catch (DocumentStoreException ex) {
        return false;
    } finally {
        stats.doneCreate(watch.elapsed(TimeUnit.NANOSECONDS), collection, ids, success);
    }
}

From source file:com.disney.opa.service.queue.QueueService.java

public List<OPAQueue> processProductsWhoseContractsAreUnknownOrChanged(List<OPAQueue> firstRecords)
        throws Exception {
    ObjectMapper mapper = new ObjectMapper();
    try {//  w w  w . java 2s .c om
        for (OPAQueue q : firstRecords) {
            List<Integer> products = contractService.getProductsWhoseLicenseeChanged();
            log.debug("# of products to validate :" + products.size());
            List<List<Integer>> productsOf25 = Lists.partition(products, 25);
            for (List<Integer> prod25 : productsOf25) {
                OPAQueue queueRecord = new OPAQueue();
                queueRecord.setCreationDate(new java.sql.Timestamp(Calendar.getInstance().getTime().getTime()));
                queueRecord.setJob("validateContract");
                queueRecord.setCreatedBy(-1);
                ContractValidationQueueJsonObject cvo = new ContractValidationQueueJsonObject();
                cvo.setProductIds(prod25);
                queueRecord.setJsonData(mapper.writeValueAsString(cvo));
                opaQueueDao.createRecord(queueRecord);
            }

            q.setCompletionDate(new java.sql.Timestamp(Calendar.getInstance().getTime().getTime()));
            q.setSuccess("1");
            opaQueueDao.updateRecord(q);
        }

    } catch (Exception ex) {
        //            utils.LogExceptionAndSendEmail("ReaderScheduler", ex);
        throw ex;
    } finally {

    }
    return firstRecords;
}

From source file:dagger.internal.codegen.AbstractComponentWriter.java

private void initializeFrameworkTypes() {
    ImmutableList.Builder<CodeBlock> codeBlocks = ImmutableList.builder();

    for (BindingKey bindingKey : graph.resolvedBindings().keySet()) {
        initializeFrameworkType(bindingKey).ifPresent(codeBlocks::add);
    }//  w w w .j  av a 2  s  .  c o  m
    List<List<CodeBlock>> partitions = Lists.partition(codeBlocks.build(),
            INITIALIZATIONS_PER_INITIALIZE_METHOD);

    initializationMethods = new ArrayList<>();

    UniqueNameSet methodNames = new UniqueNameSet();
    for (List<CodeBlock> partition : partitions) {
        String methodName = methodNames.getUniqueName("initialize");
        MethodSpec.Builder initializeMethod = methodBuilder(methodName).addModifiers(PRIVATE)
                /* TODO(gak): Strictly speaking, we only need the suppression here if we are also
                 * initializing a raw field in this method, but the structure of this code makes it
                 * awkward to pass that bit through.  This will be cleaned up when we no longer
                 * separate fields and initilization as we do now. */
                .addAnnotation(AnnotationSpecs.suppressWarnings(UNCHECKED))
                .addCode(CodeBlocks.concat(partition));
        if (builderName.isPresent()) {
            initializeMethod.addParameter(builderName.get(), "builder", FINAL);
            constructor.addStatement("$L(builder)", methodName);
        } else {
            constructor.addStatement("$L()", methodName);
        }
        final MethodSpec method = initializeMethod.build();
        initializationMethods.add(method);
        component.addMethod(method);
    }
}

From source file:com.disney.opa.service.queue.QueueService.java

public List<OPAQueue> processAllMigrationProducts(List<OPAQueue> firstRecords) throws Exception {
    ObjectMapper mapper = new ObjectMapper();
    try {/*  w  w w  .ja v  a2 s  .  c  o m*/
        for (OPAQueue q : firstRecords) {
            List<Integer> products = contractService.getAllMigrationProducts();
            List<List<Integer>> productsOf25 = Lists.partition(products, 25);
            for (List<Integer> prod25 : productsOf25) {
                OPAQueue queueRecord = new OPAQueue();
                queueRecord.setCreationDate(new java.sql.Timestamp(Calendar.getInstance().getTime().getTime()));
                queueRecord.setJob("validateContract");
                queueRecord.setCreatedBy(-1);
                ContractValidationQueueJsonObject cvo = new ContractValidationQueueJsonObject();
                cvo.setProductIds(prod25);
                queueRecord.setJsonData(mapper.writeValueAsString(cvo));
                opaQueueDao.createRecord(queueRecord);
            }
            q.setCompletionDate(new java.sql.Timestamp(Calendar.getInstance().getTime().getTime()));
            q.setSuccess("1");
            opaQueueDao.updateRecord(q);
        }

    } catch (Exception ex) {
        //            utils.LogExceptionAndSendEmail("ReaderScheduler", ex);
        throw ex;
    } finally {

    }
    return firstRecords;
}