Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.dangdang.ddframe.job.internal.job.dataflow.AbstractDataFlowElasticJob.java

@SuppressWarnings("unchecked")
private void processDataForThroughput(final JobExecutionMultipleShardingContext shardingContext,
        final List<T> data) {
    int threadCount = getJobFacade().getConcurrentDataProcessThreadCount();
    if (threadCount <= 1 || data.size() <= threadCount) {
        processDataWithStatistics((C) shardingContext, data);
        return;//from  w ww  .  ja v a2  s . c o  m
    }
    List<List<T>> splitData = Lists.partition(data, data.size() / threadCount);
    final CountDownLatch latch = new CountDownLatch(splitData.size());
    for (final List<T> each : splitData) {
        executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    processDataWithStatistics((C) shardingContext, each);
                } finally {
                    latch.countDown();
                }
            }
        });
    }
    latchAwait(latch);
}

From source file:net.sourceforge.vaticanfetcher.util.gui.viewer.PagedTableViewer.java

public final void setRoot(@Nullable Object rootElement) {
    // Reset fields, excluding the pageIndex
    this.rootElement = rootElement;
    table.removeAll();//from  w  w w  . ja  va 2 s . c  om
    elementToItemMap.clear();
    pages = emptyPages;
    if (columns.isEmpty())
        return;

    /*
     * Get elements and split them into pages
     * 
     * Lists.partition(...) returns an empty list (i.e. zero pages) if the
     * input list is empty or if the page size is Integer.MAX_VALUE, which
     * is not what we want.
     */
    Collection<E> elements = getElements(rootElement);
    if (elements == null || elements.isEmpty())
        pages = emptyPages;
    else if (elementsPerPage == Integer.MAX_VALUE)
        pages = Collections.singletonList(filterAndSort(elements));
    else
        pages = Lists.partition(filterAndSort(elements), elementsPerPage);

    // Populate the table with the elements on the current page
    assert pageIndex >= 0;
    assert pages.size() >= 1;
    pageIndex = Util.clamp(pageIndex, 0, pages.size() - 1);
    List<E> pageElements = pages.get(pageIndex);
    onPageRefresh(pageElements);
    for (E element : pageElements) {
        TableItem item = new TableItem(table, SWT.NONE);
        update(element, item);
        elementToItemMap.put(element, item);
    }
}

From source file:org.sonar.db.purge.PurgeCommands.java

@VisibleForTesting
protected void deleteAnalyses(List<IdUuidPair> analysisIdUuids) {
    List<List<String>> analysisUuidsPartitions = Lists.partition(IdUuidPairs.uuids(analysisIdUuids),
            MAX_SNAPSHOTS_PER_QUERY);//from   www .  j  a  va 2  s .  c o m

    deleteAnalysisDuplications(analysisUuidsPartitions);

    profiler.start("deleteAnalyses (events)");
    analysisUuidsPartitions.forEach(purgeMapper::deleteAnalysisEvents);
    session.commit();
    profiler.stop();

    profiler.start("deleteAnalyses (project_measures)");
    analysisUuidsPartitions.forEach(purgeMapper::deleteAnalysisMeasures);
    session.commit();
    profiler.stop();

    profiler.start("deleteAnalyses (snapshots)");
    analysisUuidsPartitions.forEach(purgeMapper::deleteAnalyses);
    session.commit();
    profiler.stop();
}

From source file:org.haiku.haikudepotserver.pkg.job.PkgDumpExportJobRunner.java

private void writePkgs(JsonGenerator jsonGenerator, PkgDumpExportJobSpecification specification)
        throws IOException {
    jsonGenerator.writeFieldName("items");
    jsonGenerator.writeStartArray();//from  w  ww .  ja  va2 s.c o  m

    final ObjectContext context = serverRuntime.newContext();

    NaturalLanguage naturalLanguage = NaturalLanguage.getByCode(context, specification.getNaturalLanguageCode())
            .orElseThrow(() -> new IllegalStateException(
                    "unable to find the natural language [" + specification.getNaturalLanguageCode() + "]"));

    RepositorySource repositorySource = RepositorySource
            .tryGetByCode(context, specification.getRepositorySourceCode())
            .orElseThrow(() -> new IllegalStateException(
                    "unable to find the repository source [" + specification.getRepositorySourceCode() + "]"));

    List<String> pkgNames = getPkgNames(context, repositorySource);

    // iterate through the pkgnames.  This is done in this manner so that if there is (erroneously)
    // two 'latest' pkg versions under the same pkg for two different architectures then these will
    // be grouped in the output instead of the same pkg appearing twice.

    LOGGER.info("will dump pkg versions for {} pkgs", pkgNames.size());

    Lists.partition(pkgNames, BATCH_SIZE).forEach((subPkgNames) -> {
        List<PkgVersion> pkgVersions = createPkgVersionSelect(repositorySource, subPkgNames).select(context);
        writePkgVersions(jsonGenerator, context, pkgVersions, repositorySource, naturalLanguage);
    });

    jsonGenerator.writeEndArray();
}

From source file:org.metaservice.manager.Manager.java

public void postProcessAllByClass(URI clazz) throws ManagerException {

    try {//w  w  w.  j  a  v  a 2s. c  om
        TupleQuery tupleQuery = repositoryConnection.prepareTupleQuery(QueryLanguage.SPARQL,
                "SELECT *  WITH {SELECT DISTINCT ?x ?time { {{ GRAPH ?c { ?a ?b ?x.?x a <" + clazz
                        + ">.} } UNION { GRAPH ?c { ?x ?b ?a.?x a <" + clazz + ">. }}} ?c <"
                        + METASERVICE.DATA_TIME + "> ?time} } AS %data WHERE {INCLUDE %data} ORDER BY  ?time");
        TupleQueryResult result = tupleQuery.evaluate();
        ArrayList<PostProcessingTask> postProcessingTasks = new ArrayList<>();
        while (result.hasNext()) {
            BindingSet bindings = result.next();
            URI uri = valueFactory.createURI(bindings.getBinding("x").getValue().stringValue());
            Date date = ((Literal) bindings.getBinding("time").getValue()).calendarValue().toGregorianCalendar()
                    .getTime();
            PostProcessingTask task = new PostProcessingTask(uri, date);
            postProcessingTasks.add(task);
        }
        for (List<PostProcessingTask> tasks : Lists.partition(postProcessingTasks, 1000)) {
            messageHandler.bulkSend(tasks);
        }
    } catch (RepositoryException | QueryEvaluationException | MessagingException | MalformedQueryException e) {
        throw new ManagerException(e);
    }
}

From source file:org.zanata.client.commands.glossary.push.GlossaryPushCommand.java

@Override
public void run() throws Exception {

    log.info("Server: {}", getOpts().getUrl());
    log.info("Username: {}", getOpts().getUsername());
    log.info("Source language: {}", DEFAULT_SOURCE_LANG);
    log.info("Translation language: {}", getOpts().getTransLang());
    if (StringUtils.isNotBlank(getOpts().getProject())) {
        log.info("Project: {}", getOpts().getProject());
    }//  w  w w .  ja  v a 2s .co m
    log.info("Glossary file: {}", getOpts().getFile());
    log.info("Batch size: {}", getOpts().getBatchSize());

    File glossaryFile = getOpts().getFile();

    if (glossaryFile == null) {
        throw new RuntimeException("Option '--file' is required.");
    }
    if (!glossaryFile.exists()) {
        throw new RuntimeException("File '" + glossaryFile + "' does not exist. Check '--file' option");
    }

    if (getOpts().getBatchSize() <= 0) {
        throw new RuntimeException("Option '--batch-size' needs to be 1 or more.");
    }

    String fileExtension = validateFileExtensionWithTransLang();

    String project = getOpts().getProject();
    String qualifiedName;
    try {
        qualifiedName = StringUtils.isBlank(project) ? client.getGlobalQualifiedName()
                : client.getProjectQualifiedName(project);
    } catch (ResponseProcessingException rpe) {
        if (rpe.getResponse().getStatus() == 404) {
            log.error("Project {} not found", project);
            return;
        } else {
            throw rpe;
        }
    }
    AbstractGlossaryPushReader reader = getReader(fileExtension);

    log.info("Pushing glossary document [{}] to server", glossaryFile.getName());

    Reader inputStreamReader = new InputStreamReader(new FileInputStream(glossaryFile), "UTF-8");
    BufferedReader br = new BufferedReader(inputStreamReader);

    Map<LocaleId, List<GlossaryEntry>> glossaries = reader.extractGlossary(br, qualifiedName);

    int totalEntries = 0;
    for (Map.Entry<LocaleId, List<GlossaryEntry>> entries : glossaries.entrySet()) {
        totalEntries = totalEntries + entries.getValue().size();
        log.info("Total entries:" + totalEntries);
    }

    int totalDone = 0;
    for (Map.Entry<LocaleId, List<GlossaryEntry>> entry : glossaries.entrySet()) {
        List<List<GlossaryEntry>> batches = Lists.partition(entry.getValue(), getOpts().getBatchSize());
        for (List<GlossaryEntry> batch : batches) {
            client.post(batch, entry.getKey(), qualifiedName);
            totalDone = totalDone + batch.size();
            log.info("Pushed " + totalDone + " of " + totalEntries + " entries");
        }
    }
}

From source file:com.google.cloud.pubsub.PollingSubscriberConnection.java

@Override
void sendAckOperations(List<String> acksToSend, List<PendingModifyAckDeadline> ackDeadlineExtensions) {
    // Send the modify ack deadlines in batches as not to exceed the max request
    // size.//  w  w  w .ja  va2 s.  co  m
    List<List<PendingModifyAckDeadline>> modifyAckDeadlineChunks = Lists.partition(ackDeadlineExtensions,
            MAX_PER_REQUEST_CHANGES);
    for (List<PendingModifyAckDeadline> modAckChunk : modifyAckDeadlineChunks) {
        for (PendingModifyAckDeadline modifyAckDeadline : modAckChunk) {
            stub.withDeadlineAfter(DEFAULT_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS)
                    .modifyAckDeadline(ModifyAckDeadlineRequest.newBuilder().setSubscription(subscription)
                            .addAllAckIds(modifyAckDeadline.ackIds)
                            .setAckDeadlineSeconds(modifyAckDeadline.deadlineExtensionSeconds).build());
        }
    }

    List<List<String>> ackChunks = Lists.partition(acksToSend, MAX_PER_REQUEST_CHANGES);
    Iterator<List<String>> ackChunksIt = ackChunks.iterator();
    while (ackChunksIt.hasNext()) {
        List<String> ackChunk = ackChunksIt.next();
        stub.withDeadlineAfter(DEFAULT_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS).acknowledge(
                AcknowledgeRequest.newBuilder().setSubscription(subscription).addAllAckIds(ackChunk).build());
    }
}

From source file:edu.cmu.lti.oaqa.baseqa.providers.ml.classifiers.ClassifierProvider.java

default List<Double> crossTrainInferMultiLabel(List<Map<String, Double>> X, List<Collection<String>> Y,
        ResampleType resampleType, String label) throws AnalysisEngineProcessException {
    Set<Integer> indexes = IntStream.range(0, X.size()).boxed().collect(toSet());
    List<Integer> indexList = new ArrayList<>(indexes);
    Collections.shuffle(indexList);
    int nfolds = (int) Math.ceil(indexList.size() / 10.0);
    List<Double> ret = IntStream.range(0, X.size()).mapToObj(i -> Double.NaN).collect(toList());
    for (List<Integer> cvTestIndexes : Lists.partition(indexList, nfolds)) {
        List<Map<String, Double>> cvTrainX = new ArrayList<>();
        List<Collection<String>> cvTrainY = new ArrayList<>();
        Sets.difference(indexes, new HashSet<>(cvTestIndexes)).forEach(cvTrainIndex -> {
            cvTrainX.add(X.get(cvTrainIndex));
            cvTrainY.add(Y.get(cvTrainIndex));
        });/*  ww w  .j  ava  2 s  . c  o  m*/
        trainMultiLabel(cvTrainX, cvTrainY, resampleType, false);
        for (int cvTestIndex : cvTestIndexes) {
            double result = infer(X.get(cvTestIndex), label);
            ret.set(cvTestIndex, result);
        }
    }
    return ret;
}

From source file:com.opengamma.livedata.server.AbstractPersistentSubscriptionManager.java

/**
 * Creates a persistent subscription on the server for any persistent
 * subscriptions which are not yet there.
 *//*from  w w w  .  j ava2  s . c o m*/
private synchronized void updateServer(boolean catchExceptions) {
    Collection<LiveDataSpecification> specs = getSpecs(_persistentSubscriptions);
    Set<LiveDataSpecification> persistentSubscriptionsToMake = new HashSet<LiveDataSpecification>(specs);

    OperationTimer operationTimer = new OperationTimer(s_logger,
            "Updating server's persistent subscriptions {}", persistentSubscriptionsToMake.size());

    int partitionSize = 50; //Aim is to make sure we can convert subscriptions quickly enough that nothing expires, and to leave the server responsive, and make retrys not take too long

    List<List<LiveDataSpecification>> partitions = Lists
            .partition(Lists.newArrayList(persistentSubscriptionsToMake), partitionSize);
    for (List<LiveDataSpecification> partition : partitions) {

        Map<LiveDataSpecification, MarketDataDistributor> marketDataDistributors = _server
                .getMarketDataDistributors(persistentSubscriptionsToMake);
        for (Entry<LiveDataSpecification, MarketDataDistributor> distrEntry : marketDataDistributors
                .entrySet()) {
            if (distrEntry.getValue() != null) {
                //Upgrade or no/op should be fast, lets do it to avoid expiry
                createPersistentSubscription(catchExceptions, distrEntry.getKey());
                persistentSubscriptionsToMake.remove(distrEntry.getKey());
            }
        }

        SetView<LiveDataSpecification> toMake = Sets.intersection(new HashSet<LiveDataSpecification>(partition),
                persistentSubscriptionsToMake);
        if (!toMake.isEmpty()) {
            createPersistentSubscription(catchExceptions, toMake); //PLAT-1632 
            persistentSubscriptionsToMake.removeAll(toMake);
        }
    }
    operationTimer.finished();
    s_logger.info("Server updated");
}

From source file:org.apache.drill.exec.planner.sql.HivePartitionDescriptor.java

@Override
protected void createPartitionSublists() {
    List<PartitionLocation> locations = new LinkedList<>();
    HiveReadEntry origEntry = ((HiveScan) scanRel.getGroupScan()).hiveReadEntry;
    for (Partition partition : origEntry.getPartitions()) {
        locations.add(new HivePartitionLocation(partition.getValues(), partition.getSd().getLocation()));
    }/*from   w  w w  .j a  v a  2 s. c o m*/
    locationSuperList = Lists.partition(locations, PartitionDescriptor.PARTITION_BATCH_SIZE);
    sublistsCreated = true;
}