Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:org.jboss.hal.meta.processing.RrdTask.java

@Override
public Completable call(LookupContext context) {
    boolean recursive = context.recursive;
    List<Completable> completables = new ArrayList<>();

    // create and partition non-optional operations
    List<Operation> operations = rrdOps.create(context, recursive, false);
    List<List<Operation>> piles = Lists.partition(operations, batchSize);
    List<Composite> composites = piles.stream().map(Composite::new).collect(toList());
    for (Composite composite : composites) {
        completables.add(/*from  w  w w  . ja va2  s  . c  om*/
                dispatcher.execute(composite).doOnSuccess(parseRrdAction(context, composite)).toCompletable());
    }

    // create optional operations w/o partitioning!
    List<Operation> optionalOperations = rrdOps.create(context, recursive, true);
    // Do not refactor to
    // List<Composite> optionalComposites = optionalOperations.stream().map(Composite::new).collect(toList());
    // the GWT compiler will crash with an ArrayIndexOutOfBoundsException!
    List<Composite> optionalComposites = new ArrayList<>();
    optionalOperations.forEach(operation -> optionalComposites.add(new Composite(operation)));
    for (Composite composite : optionalComposites) {
        completables.add(dispatcher.execute(composite).onErrorResumeNext(throwable -> {
            if (throwable instanceof DispatchFailure) {
                logger.debug("Ignore errors on optional resource operation {}", composite.asCli());
                return Single.just(new CompositeResult(new ModelNode()));
            } else {
                return Single.error(throwable);
            }
        }).doOnSuccess(parseRrdAction(context, composite)).toCompletable());
    }

    if (!completables.isEmpty()) {
        logger.debug("About to execute {} ({}+{}) composite operations (regular+optional)",
                composites.size() + optionalComposites.size(), composites.size(), optionalComposites.size());
        return Completable.concat(completables);
    } else {
        logger.debug("No DMR operations necessary");
        return Completable.complete();
    }
}

From source file:com.romeikat.datamessie.core.sync.service.template.withIdAndVersion.DeleteExecutor.java

private void delete() {
    final List<Long> rhsIds = decisionResults.getToBeDeleted();
    final List<List<Long>> rhsIdsBatches = Lists.partition(rhsIds, batchSizeEntities);
    new ParallelProcessing<List<Long>>(sessionFactory, rhsIdsBatches, parallelismFactor) {
        @Override//from   ww  w.  j  a va  2  s  .c om
        public void doProcessing(final HibernateSessionProvider rhsSessionProvider,
                final List<Long> rhsIdsBatch) {
            new ExecuteWithTransaction(rhsSessionProvider.getStatelessSession()) {

                @Override
                protected void execute(final StatelessSession statelessSession) {
                    delete(rhsSessionProvider.getStatelessSession(), rhsIdsBatch);
                }
            }.execute();
        }
    };
}

From source file:org.apache.geode.geospatial.function.GeoQueryFunction.java

@Override
public void execute(FunctionContext context) {

    ResultSender<Collection<PdxInstance>> resultSender = context.getResultSender();
    try {/*from w ww .  j  ava  2 s .c  om*/
        String wellKownText = (String) context.getArguments();
        //Create a JTS object that we can test against.
        Geometry geometry = new WKTReader().read(wellKownText);

        ArrayList<Object> keys = new ArrayList<Object>(geospatialIndex.query(geometry));

        List<List<Object>> partitionedKeys = Lists.partition(keys, chunkSize);
        for (List currKeySet : partitionedKeys) {
            resultSender.sendResult(new ArrayList<>(region.getAll(currKeySet).values()));
        }

    } catch (Exception e) {
        e.printStackTrace();
    }
    resultSender.lastResult(null);

}

From source file:org.sonar.db.purge.PurgeCommands.java

void deleteComponents(List<IdUuidPair> componentIdUuids) {
    List<List<Long>> componentIdPartitions = Lists.partition(IdUuidPairs.ids(componentIdUuids),
            MAX_RESOURCES_PER_QUERY);//www . j a  va  2  s  . c  om
    List<List<String>> componentUuidsPartitions = Lists.partition(IdUuidPairs.uuids(componentIdUuids),
            MAX_RESOURCES_PER_QUERY);
    // Note : do not merge the delete statements into a single loop of resource ids. It's
    // voluntarily grouped by tables in order to benefit from JDBC batch mode.
    // Batch requests can only relate to the same PreparedStatement.

    // possible missing optimization: filter requests according to resource scope

    profiler.start("deleteResourceLinks (project_links)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponentLinks);
    session.commit();
    profiler.stop();

    profiler.start("deleteResourceProperties (properties)");
    componentIdPartitions.forEach(purgeMapper::deleteComponentProperties);
    session.commit();
    profiler.stop();

    profiler.start("deleteResourceGroupRoles (group_roles)");
    componentIdPartitions.forEach(purgeMapper::deleteComponentGroupRoles);
    session.commit();
    profiler.stop();

    profiler.start("deleteResourceUserRoles (user_roles)");
    componentIdPartitions.forEach(purgeMapper::deleteComponentUserRoles);
    session.commit();
    profiler.stop();

    profiler.start("deleteResourceManualMeasures (manual_measures)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponentManualMeasures);
    session.commit();
    profiler.stop();

    profiler.start("deleteComponentIssueChanges (issue_changes)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponentIssueChanges);
    session.commit();
    profiler.stop();

    profiler.start("deleteComponentIssues (issues)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponentIssues);
    session.commit();
    profiler.stop();

    profiler.start("deleteComponentEvents (events)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponentEvents);
    session.commit();
    profiler.stop();

    profiler.start("deleteResource (projects)");
    componentUuidsPartitions.forEach(purgeMapper::deleteComponents);
    session.commit();
    profiler.stop();

    profiler.start("deleteAuthors (authors)");
    componentIdPartitions.forEach(purgeMapper::deleteAuthors);
    session.commit();
    profiler.stop();
}

From source file:br.com.metricminer2.RepositoryMining.java

public void mine() {

    for (SCMRepository repo : repos) {
        log.info("Git repository in " + repo.getPath());

        List<ChangeSet> allCs = range.get(repo.getScm());
        log.info("Total of commits: " + allCs.size());

        log.info("Starting threads: " + threads);
        ExecutorService exec = Executors.newFixedThreadPool(threads);
        List<List<ChangeSet>> partitions = Lists.partition(allCs, threads);
        for (List<ChangeSet> partition : partitions) {

            exec.submit(() -> {//  w  w  w. ja  v  a2 s  . c  om
                for (ChangeSet cs : partition) {
                    try {
                        processEverythingOnChangeSet(repo, cs);
                    } catch (OutOfMemoryError e) {
                        System.err
                                .println("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME");
                        e.printStackTrace();
                        System.err.println("goodbye :/");

                        log.fatal("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME", e);
                        log.fatal("Goodbye! ;/");
                        System.exit(-1);
                    } catch (Throwable t) {
                        log.error(t);
                    }
                }
            });
        }

        try {
            exec.shutdown();
            exec.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
        } catch (InterruptedException e) {
            log.error("error waiting for threads to terminate in " + repo.getLastDir(), e);
        }
    }

    closeAllPersistence();
    printScript();

}

From source file:org.wso2.carbon.analytics.datasource.hbase.HBaseRecordIterator.java

public HBaseRecordIterator(int tenantId, String tableName, List<String> columns, List<String> recordIds,
        Connection conn, int batchSize) throws AnalyticsException, AnalyticsTableNotAvailableException {
    this.init(conn, tenantId, tableName, columns);
    if (batchSize <= 0) {
        throw new AnalyticsException("Error batching records: the batch size should be a positive integer");
    } else {/*from   w w w.  j a v  a  2 s.c o  m*/
        this.batchedIds = Lists.partition(recordIds, batchSize);
        this.totalBatches = this.batchedIds.size();
        /* pre-fetching from HBase and populating records for the first time */
        this.fetch();
    }
}

From source file:com.simiacryptus.mindseye.eval.BatchedTrainable.java

@Override
public PointSample measure(final TrainingMonitor monitor) {
    @Nonnull//from ww w . j  av a 2  s .c  o m
    final List<Tensor[]> tensors = Arrays.asList(getData());
    TimedResult<PointSample> timedResult = TimedResult.time(() -> {
        if (batchSize < tensors.size()) {
            final int batches = (int) Math.ceil(tensors.size() * 1.0 / batchSize);
            final int evenBatchSize = (int) Math.ceil(tensors.size() * 1.0 / batches);
            @Nonnull
            final List<List<Tensor[]>> collection = Lists.partition(tensors, evenBatchSize);
            return collection.stream().map(trainingData -> {
                if (batchSize < trainingData.size()) {
                    throw new RuntimeException();
                }
                getInner().setData(trainingData);
                return super.measure(monitor);
            }).reduce((a, b) -> a.add(b)).get();
        } else {
            getInner().setData(tensors);
            return super.measure(monitor);
        }
    });
    if (null != monitor && isVerbose()) {
        monitor.log(String.format("Evaluated %s items in %.4fs (%s/%s)", tensors.size(),
                timedResult.timeNanos / 1e9, timedResult.result.getMean(),
                timedResult.result.delta.getMagnitude()));
    }
    return timedResult.result;
}

From source file:net.oneandone.maven.plugins.cycles.classes.PackageDependencies.java

private static String packagePrefix(String pkg, int depth) {
    return Joiner.on('.')
            .join(Lists.partition(ImmutableList.copyOf(Splitter.on('.').split(pkg)), depth).get(0));
}

From source file:google.registry.tools.server.ListDomainsAction.java

@Override
public ImmutableSet<DomainResource> loadObjects() {
    checkArgument(!tlds.isEmpty(), "Must specify TLDs to query");
    for (String tld : tlds) {
        assertTldExists(tld);/*  w  w w .  j  ava 2 s . c o  m*/
    }
    ImmutableSortedSet.Builder<DomainResource> builder = new ImmutableSortedSet.Builder<DomainResource>(
            COMPARATOR);
    for (List<String> batch : Lists.partition(tlds.asList(), MAX_NUM_SUBQUERIES)) {
        builder.addAll(queryNotDeleted(DomainResource.class, clock.nowUtc(), "tld in", batch));
    }
    return builder.build();
}

From source file:io.opencensus.exporter.stats.stackdriver.CreateTimeSeriesExporter.java

@Override
public void export(Collection<Metric> metrics) {
    List<TimeSeries> timeSeriesList = new ArrayList<>(metrics.size());
    for (Metric metric : metrics) {
        timeSeriesList.addAll(StackdriverExportUtils.createTimeSeriesList(metric, monitoredResource, domain,
                projectName.getProject()));
    }// w w w.  ja v  a 2  s.c o m

    Span span = tracer.getCurrentSpan();
    for (List<TimeSeries> batchedTimeSeries : Lists.partition(timeSeriesList, MAX_BATCH_EXPORT_SIZE)) {
        span.addAnnotation("Export Stackdriver TimeSeries.");
        try {
            CreateTimeSeriesRequest request = CreateTimeSeriesRequest.newBuilder()
                    .setName(projectName.toString()).addAllTimeSeries(batchedTimeSeries).build();
            metricServiceClient.createTimeSeries(request);
            span.addAnnotation("Finish exporting TimeSeries.");
        } catch (ApiException e) {
            logger.log(Level.WARNING, "ApiException thrown when exporting TimeSeries.", e);
            span.setStatus(Status.CanonicalCode.valueOf(e.getStatusCode().getCode().name()).toStatus()
                    .withDescription("ApiException thrown when exporting TimeSeries: "
                            + StackdriverExportUtils.exceptionMessage(e)));
        } catch (Throwable e) {
            logger.log(Level.WARNING, "Exception thrown when exporting TimeSeries.", e);
            span.setStatus(Status.UNKNOWN.withDescription("Exception thrown when exporting TimeSeries: "
                    + StackdriverExportUtils.exceptionMessage(e)));
        }
    }
}