Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteMetadatas(final String userId, final List<HasMetadata> holders) {
    try {//from w w w  .j  a va 2s .co  m
        final Connection conn = poolingDataSource.getConnection();
        try {
            final List<List<HasMetadata>> subLists = Lists.partition(holders,
                    config.getUserMetadataMaxInClauseItems());
            for (List<HasMetadata> hasMetadatas : subLists) {
                final List<QualifiedName> names = hasMetadatas.stream()
                        .filter(m -> m instanceof HasDefinitionMetadata)
                        .map(m -> ((HasDefinitionMetadata) m).getDefinitionName()).collect(Collectors.toList());
                if (!names.isEmpty()) {
                    _deleteDefinitionMetadatas(conn, names);
                }
                if (config.canSoftDeleteDataMetadata()) {
                    final List<String> uris = hasMetadatas.stream()
                            .filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
                            .map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
                    if (!uris.isEmpty()) {
                        _softDeleteDataMetadatas(conn, userId, uris);
                    }
                }
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        log.error("Sql exception", e);
        throw new UserMetadataServiceException("Failed deleting data metadata", e);
    }
}

From source file:org.sonar.db.purge.PurgeCommands.java

public void purgeAnalyses(List<IdUuidPair> analysisUuids) {
    List<List<String>> analysisUuidsPartitions = Lists.partition(IdUuidPairs.uuids(analysisUuids),
            MAX_SNAPSHOTS_PER_QUERY);/*from   w  w  w  .  j  av a  2  s  .  c  o m*/

    deleteAnalysisDuplications(analysisUuidsPartitions);

    profiler.start("deleteSnapshotWastedMeasures (project_measures)");
    List<Long> metricIdsWithoutHistoricalData = purgeMapper.selectMetricIdsWithoutHistoricalData();
    analysisUuidsPartitions.stream().forEach(analysisUuidsPartition -> purgeMapper
            .deleteAnalysisWastedMeasures(analysisUuidsPartition, metricIdsWithoutHistoricalData));
    session.commit();
    profiler.stop();

    profiler.start("updatePurgeStatusToOne (snapshots)");
    analysisUuidsPartitions.forEach(purgeMapper::updatePurgeStatusToOne);
    session.commit();
    profiler.stop();
}

From source file:org.asoem.greyfish.core.environment.Generic2DEnvironment.java

private void executeAllAgents() throws InterruptedException, ExecutionException {
    final List<List<A>> partition = Lists.partition(ImmutableList.copyOf(getActiveAgents()),
            parallelizationThreshold);/* w  w  w .j a v a  2  s.  c o  m*/
    final Collection<Callable<Void>> callables = Lists.transform(partition,
            new Function<List<A>, Callable<Void>>() {
                @Override
                public Callable<Void> apply(final List<A> input) {
                    return new Callable<Void>() {
                        @Override
                        public Void call() {
                            for (A a : input) {
                                a.run();
                            }
                            return null;
                        }
                    };
                }
            });

    final List<Future<Void>> futures = executorService.invokeAll(callables);
    for (Future<Void> future : futures) {
        future.get();
    }
}

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteDefinitionMetadata(@Nonnull final List<QualifiedName> names) {
    try {//from   w  ww.  ja  v  a 2 s  .c  o m
        final List<List<QualifiedName>> subLists = Lists.partition(names,
                config.getUserMetadataMaxInClauseItems());
        for (List<QualifiedName> subNames : subLists) {
            _deleteDefinitionMetadata(subNames);
        }
    } catch (Exception e) {
        final String message = String.format("Failed deleting the definition metadata for %s", names);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
}

From source file:org.opennms.netmgt.discovery.actors.RangeChunker.java

public List<DiscoveryJob> chunk(final DiscoveryConfiguration config) {

    final int chunkSize = (config.getChunkSize() > 0) ? config.getChunkSize()
            : DiscoveryConfigFactory.DEFAULT_CHUNK_SIZE;
    final double packetsPerSecond = (config.getPacketsPerSecond() > 0.0) ? config.getPacketsPerSecond()
            : DiscoveryConfigFactory.DEFAULT_PACKETS_PER_SECOND;

    // If the foreign source for the discovery config is not set than use 
    // the default foreign source
    final String foreignSourceFromConfig = (config.getForeignSource() == null
            || "".equals(config.getForeignSource().trim())) ? "default" : config.getForeignSource().trim();

    // If the monitoring location for the discovery config is not set than use 
    // the default localhost location
    final String locationFromConfig = (config.getLocation() == null || "".equals(config.getLocation().trim()))
            ? MonitoringLocationDao.DEFAULT_MONITORING_LOCATION_ID
            : config.getLocation().trim();

    final DiscoveryConfigFactory configFactory = new DiscoveryConfigFactory(config);

    final AtomicReference<IPPollRange> previousRange = new AtomicReference<>();

    return StreamSupport.stream(configFactory.getConfiguredAddresses().spliterator(), false).filter(address -> {
        // If there is no IP address filter set or the filter matches
        return m_ipAddressFilter == null || m_ipAddressFilter.matches(address.getAddress());
    })//  w  w w.  j a va  2s .co m
            // TODO: We could optimize this further by not unrolling IPPollRanges into individual
            // IPPollAddresses during the mapping.
            .map(address -> {
                // Create a singleton IPPollRange
                return new IPPollRange(
                        // Make sure that foreignSource is not null so that we can partition on the value
                        address.getForeignSource() == null ? foreignSourceFromConfig
                                : address.getForeignSource(),
                        // Make sure that location is not null so that we can partition on the value
                        address.getLocation() == null ? locationFromConfig : address.getLocation(),
                        address.getAddress(), address.getAddress(), address.getTimeout(), address.getRetries());
            }).collect(Collectors.groupingBy(range -> {
                // Create a Map<ForeignSourceLocationKey,List<IPPollRange>>
                return new ForeignSourceLocationKey(
                        // Make sure that foreignSource is not null so that we can partition on the value
                        range.getForeignSource() == null ? foreignSourceFromConfig : range.getForeignSource(),
                        // Make sure that location is not null so that we can partition on the value
                        range.getLocation() == null ? locationFromConfig : range.getLocation());
            })).entrySet().stream()
            // Flat map one list of IPPollRanges to many chunked DiscoveryJobs
            .flatMap(entry -> {
                // Partition the list of address values
                return Lists.partition(entry.getValue(), chunkSize).stream()
                        // Map each partition value to a separate DiscoveryJob
                        .map(ranges -> {
                            DiscoveryJob retval = new DiscoveryJob(ranges.stream().map(address -> {
                                // If this address is consecutive with the previous range,
                                // then just extend the range to cover this address too
                                if (isConsecutive(previousRange.get(), address)) {
                                    previousRange.get().getAddressRange().incrementEnd();
                                    return null;
                                }
                                previousRange.set(address);
                                return address;
                            })
                                    // Filter out all of the consecutive values that we nulled out
                                    .filter(Objects::nonNull)
                                    // Convert back into a list of ranges
                                    .collect(Collectors.toList()), entry.getKey().getForeignSource(),
                                    entry.getKey().getLocation(), packetsPerSecond);
                            // Reset the previousRange value
                            previousRange.set(null);
                            return retval;
                        })
                        // Collect the DiscoveryJobs
                        .collect(Collectors.toList()).stream();
            }).collect(Collectors.toList());
}

From source file:io.opencensus.exporter.stats.stackdriver.StackdriverExporterWorker.java

@VisibleForTesting
void export() {//from  ww  w .j  av  a2  s . c om
    List</*@Nullable*/ ViewData> viewDataList = Lists.newArrayList();
    for (View view : viewManager.getAllExportedViews()) {
        if (registerView(view)) {
            // Only upload stats for valid views.
            viewDataList.add(viewManager.getView(view.getName()));
        }
    }

    List<TimeSeries> timeSeriesList = Lists.newArrayList();
    for (/*@Nullable*/ ViewData viewData : viewDataList) {
        timeSeriesList.addAll(StackdriverExportUtils.createTimeSeriesList(viewData, monitoredResource));
    }
    for (List<TimeSeries> batchedTimeSeries : Lists.partition(timeSeriesList, MAX_BATCH_EXPORT_SIZE)) {
        Span span = tracer.getCurrentSpan();
        span.addAnnotation("Export Stackdriver TimeSeries.");
        try (Scope scope = tracer.withSpan(span)) {
            CreateTimeSeriesRequest request = CreateTimeSeriesRequest.newBuilder()
                    .setName(projectName.toString()).addAllTimeSeries(batchedTimeSeries).build();
            metricServiceClient.createTimeSeries(request);
            span.addAnnotation("Finish exporting TimeSeries.");
        } catch (ApiException e) {
            logger.log(Level.WARNING, "ApiException thrown when exporting TimeSeries.", e);
            span.setStatus(Status.CanonicalCode.valueOf(e.getStatusCode().getCode().name()).toStatus()
                    .withDescription("ApiException thrown when exporting TimeSeries: " + exceptionMessage(e)));
        } catch (Throwable e) {
            logger.log(Level.WARNING, "Exception thrown when exporting TimeSeries.", e);
            span.setStatus(Status.UNKNOWN
                    .withDescription("Exception thrown when exporting TimeSeries: " + exceptionMessage(e)));
        }
    }
}

From source file:com.plumbee.flume.source.sqs.BatchConsumer.java

public void flush() {

    // Commit messages to the downstream channel.
    LOGGER.debug("Flushing, transaction size: {}", batchDeleteRequestEntries.size());
    if (batchEventList.size() > 0) {
        channelProcessor.processEventBatch(batchEventList);
        batchEventList.clear();//from  w w w .j a  v  a  2s .c o  m
    }

    // Request the batch deletion of messages.
    for (List<DeleteMessageBatchRequestEntry> partition : Lists.partition(batchDeleteRequestEntries,
            queueDeleteBatchSize)) {
        sourceCounter.incrementBatchDeleteRequestAttemptCount();
        deleteMessageBatchRequest.setEntries(partition);
        DeleteMessageBatchResult batchResult = client.deleteMessageBatch(deleteMessageBatchRequest);
        for (BatchResultErrorEntry errorEntry : batchResult.getFailed()) {
            LOGGER.error("Failed to delete message, {}", errorEntry.toString());
        }
        sourceCounter.incrementBatchDeleteRequestSuccessCount();
        sourceCounter.addToDeleteMessageFailedCount((long) batchResult.getFailed().size());
        sourceCounter.addToDeleteMessageSuccessCount((long) batchResult.getSuccessful().size());
    }
    batchDeleteRequestEntries.clear();
    lastFlush = System.currentTimeMillis();
}

From source file:org.apache.ambari.server.orm.dao.HostRoleCommandDAO.java

@RequiresSession
public List<Long> findTaskIdsByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
    TypedQuery<Long> query = entityManagerProvider.get()
            .createQuery(//from   w  w  w  . j ava  2 s . c  o m
                    "SELECT DISTINCT task.taskId FROM HostRoleCommandEntity task "
                            + "WHERE task.requestId IN ?1 AND task.taskId IN ?2 " + "ORDER BY task.taskId",
                    Long.class);

    if (daoUtils.getDbType().equals(ORACLE) && taskIds.size() > ORACLE_LIST_LIMIT) {
        List<Long> result = new ArrayList<Long>();

        List<List<Long>> lists = Lists.partition(new ArrayList<Long>(taskIds), ORACLE_LIST_LIMIT);
        for (List<Long> taskIdList : lists) {
            result.addAll(daoUtils.selectList(query, requestIds, taskIdList));
        }

        return result;
    }
    return daoUtils.selectList(query, requestIds, taskIds);
}

From source file:hu.bme.mit.trainbenchmark.benchmark.fourstore.driver.FourStoreDriver.java

public void insertVertices(final List<String> uris, final String type) throws IOException {
    if (uris.isEmpty()) {
        return;/*from w  w w.ja va 2 s.  c o  m*/
    }

    final List<List<String>> partitions = Lists.partition(uris, PARTITION_SIZE);
    for (final List<String> partition : partitions) {
        insertVerticesPartition(partition, type);
    }
}

From source file:eu.mondo.driver.fourstore.FourStoreGraphDriverReadWrite.java

@Override
public void updateProperties(final Map<String, Object> properties, final String type) throws IOException {
    if (properties.isEmpty()) {
        return;//from w  ww  .j a  va2  s . c  o  m
    }

    final List<String> vertexURIs = new ArrayList<>(properties.keySet());

    final List<List<String>> vertexURIpartitions = Lists.partition(vertexURIs, PARTITION_SIZE);
    for (final List<String> vertexURIpartition : vertexURIpartitions) {

        final Map<String, Object> propertyPartition = new HashMap<>();
        for (final String vertexURI : vertexURIpartition) {
            final Object value = properties.get(vertexURI);
            propertyPartition.put(vertexURI, value);
        }

        updatePropertiesPartition(propertyPartition, type);
    }
}