Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@Override
public void softDeleteDataMetadata(final String user, @Nonnull final List<String> uris) {
    try {//w w  w.  j av a  2s  .com
        final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
        for (List<String> subUris : subLists) {
            _softDeleteDataMetadata(user, subUris);
        }
    } catch (Exception e) {
        final String message = String.format("Failed deleting the data metadata for %s", uris);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
}

From source file:org.webtestingexplorer.explorer.WebTestingExplorer.java

private ActionSequenceQueue buildInitialActionSequences(int numPartitions, int partitionNumber)
        throws Exception {
    List<ActionSequence> initialActionSequences = Lists.newArrayList(config.getInitialActionSequences());

    ActionSequenceQueue actionSequences = new ActionSequenceQueue();
    for (ActionSequence initialActionSequence : initialActionSequences) {
        runner.runActionSequence(new ActionSequenceRunnerConfig(config.getUrl(), initialActionSequence,
                config.getOracleConfig(), config.getWaitConditionConfig(), null, config.getNumRetries(),
                config.isUseElementsCache(), false));
        List<Action> actions = getAllPossibleActionsInCurrentState();
        for (Action action : actions) {
            extendAndPushActionSequence(actionSequences, initialActionSequence, action);
        }/*from  www  . j av a 2 s .  c  om*/
        runner.getDriver().close();
    }

    if (numPartitions == 0) {
        // We are not using partitioning of the initial action sequences.
        return actionSequences;
    }

    int partitionSize = actionSequences.size() / numPartitions;
    if (actionSequences.size() % numPartitions != 0) {
        ++partitionSize;
    }
    List<List<ActionSequence>> partitionedActionSequences = Lists.partition(actionSequences.asList(),
            partitionSize);
    return new ActionSequenceQueue(partitionedActionSequences.get(partitionNumber));
}

From source file:com.pearson.eidetic.driver.threads.MonitorSnapshotVolumeTime.java

@Override
public void run() {
    Calendar calendar_int = Calendar.getInstance();

    //0-365//from   w  w w .j a va2 s . c o  m
    today_ = calendar_int.get(Calendar.DAY_OF_YEAR);

    ConcurrentHashMap<Region, ArrayList<Volume>> localVolumeTime;

    localVolumeTime = awsAccount_.getVolumeTime_Copy();

    for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) {
        Region region = entry.getKey();
        splitFactorDay_.put(region, 10);
        HashSet<Date> newHashSet = new HashSet<>();
        didMySnapshotDay_.put(entry.getKey(), newHashSet);
    }

    addAlreadyDoneTodaySnapshots(localVolumeTime);

    while (true) {
        try {
            //Reset my stuff
            if (isItTomorrow(today_)) {
                calendar_int = Calendar.getInstance();

                today_ = calendar_int.get(Calendar.DAY_OF_YEAR);
                resetDidMySnapshotDay();

            }

            localVolumeTime = awsAccount_.getVolumeTime_Copy();
            for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) {
                Region region = entry.getKey();

                if (localVolumeTime.get(region).isEmpty()) {
                    continue;
                }

                timeDay_.put(region, extractRunAt(localVolumeTime.get(region)));

            }

            for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) {
                Region region = entry.getKey();

                if (localVolumeTime.get(region).isEmpty()) {
                    continue;
                }

                timeDay_.get(region).keySet().removeAll(didMySnapshotDay_.get(region));
                Calendar calendar = Calendar.getInstance();
                Date now = calendar.getTime();
                now = dayFormat_.parse(dayFormat_.format(now));

                List<Date> lessThanNow = findLessThanNow(timeDay_.get(region).keySet(), now);

                if (!lessThanNow.isEmpty()) {
                    for (Date date : lessThanNow) {
                        ArrayList<Volume> volumes = timeDay_.get(region).get(date);
                        List<List<Volume>> listOfLists = Lists.partition(volumes, splitFactorDay_.get(region));

                        if (localVolumeTimeListDay_.get(region) == null
                                || localVolumeTimeListDay_.get(region).isEmpty()) {
                            localVolumeTimeListDay_.put(region, listsToArrayLists(listOfLists));
                        } else {
                            try {
                                localVolumeTimeListDay_.get(region).add(listsToArrayLists(listOfLists).get(0));
                            } catch (Exception e) {
                            }
                        }

                        ArrayList<SnapshotVolumeTime> threads = new ArrayList<>();

                        for (ArrayList<Volume> vols : listsToArrayLists(listOfLists)) {
                            threads.add(new SnapshotVolumeTime(awsAccount_.getAwsAccessKeyId(),
                                    awsAccount_.getAwsSecretKey(), awsAccount_.getUniqueAwsAccountIdentifier(),
                                    awsAccount_.getMaxApiRequestsPerSecond(),
                                    ApplicationConfiguration.getAwsCallRetryAttempts(), region, vols));

                        }

                        didMySnapshotDay_.get(region).add(date);

                        EideticSubThreads_.put(region, threads);

                    }

                }
            }
            //localVolumeTimeListDay now has hashmaps of regions with keys of arrays of arrays of volumes to take snapshots of.

            HashMap<Region, Integer> secsSlept = new HashMap<>();
            HashMap<Region, Boolean> allDead = new HashMap<>();

            for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) {
                Region region = entry.getKey();

                if (localVolumeTimeListDay_.get(region) == null
                        || localVolumeTimeListDay_.get(region).isEmpty()) {
                    continue;
                }

                //Initializing content
                secsSlept.put(region, 0);

                //Initializing content
                allDead.put(region, false);

                Threads.threadExecutorFixedPool(EideticSubThreads_.get(region), splitFactorDay_.get(region),
                        300, TimeUnit.MINUTES);
            }

            //LETS SEE IF THEY'RE DEAD
            Boolean ejection = false;
            Boolean theyreDead;
            while (true) {
                for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) {
                    Region region = entry.getKey();

                    if (areAllThreadsDead(EideticSubThreads_.get(region))) {
                        allDead.put(region, true);
                    } else {
                        secsSlept.replace(region, secsSlept.get(region), secsSlept.get(region) + 1);
                        if (secsSlept.get(region) > 1800) {
                            splitFactorDay_.replace(region, splitFactorDay_.get(region),
                                    splitFactorDay_.get(region) + 1);
                            logger.info(
                                    "Event=\"increasing_splitFactor\", Monitor=\"SnapshotVolumeTime\", splitFactor=\""
                                            + Integer.toString(splitFactorDay_.get(region))
                                            + "\", VolumeTimeSize=\""
                                            + Integer.toString(localVolumeTime.get(region).size()) + "\"");
                            ejection = true;
                            break;
                        }

                    }

                }

                //I dont like this
                theyreDead = true;
                for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) {
                    Region region = entry.getKey();

                    //If any of them have false
                    if (!allDead.get(region)) {
                        theyreDead = false;
                    }
                }

                if (ejection || theyreDead) {
                    break;
                }

                Threads.sleepSeconds(1);
            }

            //See if decrease splitfactor
            for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) {
                Region region = entry.getKey();

                int timeRemaining = 1800 - secsSlept.get(region);

                if ((splitFactorDay_.get(region) > 5) & (timeRemaining > 60)) {
                    splitFactorDay_.replace(region, splitFactorDay_.get(region),
                            splitFactorDay_.get(region) - 1);
                    logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier()
                            + "\",Event=\"decreasing_splitFactor\", Monitor=\"SnapshotVolumeNoTime\", splitFactor=\""
                            + Integer.toString(splitFactorDay_.get(region)) + "\", VolumeNoTimeSize=\""
                            + Integer.toString(localVolumeTime.get(region).size()) + "\"");
                }
            }

            localVolumeTimeListDay_.clear();
            EideticSubThreads_.clear();

            Threads.sleepSeconds(30);

        } catch (Exception e) {
            logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier()
                    + "\",Error=\"MonitorSnapshotVolumeTimeFailure\", stacktrace=\"" + e.toString()
                    + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\"");
        }
    }

}

From source file:com.daugherty.e2c.persistence.data.jdbc.JdbcSupplierDao.java

@Override
public List<Supplier> loadLatestBySupplierIds(List<Long> ids, Locale locale) {
    List<Supplier> suppliers = Lists.newArrayList();

    HashSet<Long> uniqueIds = Sets.newHashSet(ids);
    LOGGER.debug("Getting Supplier from the database by supplier ids " + uniqueIds);
    for (List<Long> partitionedIds : Lists.partition(Lists.newArrayList(uniqueIds), 1000)) {
        String sql = getSql("/supplier/getLatestBySupplierIds.sql");
        SqlParameterSource parameterSource = new MapSqlParameterSource("supplierIds", partitionedIds)
                .addValue("language", locale.getLanguage());
        suppliers.addAll(jdbcTemplate.query(sql, parameterSource, new SupplierResultSetExtractor()));
    }//  w w  w  . j  a va  2s .c om

    return suppliers;
}

From source file:com.netflix.spinnaker.clouddriver.core.agent.CleanupPendingOnDemandCachesAgent.java

void run(Collection<Provider> providers) {
    providers.forEach(provider -> {//from ww w.  ja v a2 s . c  o  m
        String onDemandSetName = provider.getProviderName() + ":onDemand:members";
        List<String> onDemandKeys = scanMembers(onDemandSetName).stream().filter(s -> !s.equals("_ALL_"))
                .collect(Collectors.toList());

        Map<String, Response<Boolean>> existingOnDemandKeys = new HashMap<>();
        if (redisClientDelegate.supportsMultiKeyPipelines()) {
            redisClientDelegate.withMultiKeyPipeline(pipeline -> {
                for (List<String> partition : Iterables.partition(onDemandKeys,
                        redisCacheOptions.getMaxDelSize())) {
                    for (String id : partition) {
                        existingOnDemandKeys.put(id,
                                pipeline.exists(provider.getProviderName() + ":onDemand:attributes:" + id));
                    }
                }
                pipeline.sync();
            });
        } else {
            redisClientDelegate.withCommandsClient(client -> {
                onDemandKeys.stream()
                        .filter(k -> client.exists(provider.getProviderName() + "onDemand:attributes:" + k))
                        .forEach(k -> existingOnDemandKeys.put(k, new StaticResponse(Boolean.TRUE)));
            });
        }

        List<String> onDemandKeysToRemove = new ArrayList<>();
        for (String onDemandKey : onDemandKeys) {
            if (!existingOnDemandKeys.containsKey(onDemandKey)
                    || !existingOnDemandKeys.get(onDemandKey).get()) {
                onDemandKeysToRemove.add(onDemandKey);
            }
        }

        if (!onDemandKeysToRemove.isEmpty()) {
            log.info("Removing {} from {}", onDemandKeysToRemove.size(), onDemandSetName);
            log.debug("Removing {} from {}", onDemandKeysToRemove, onDemandSetName);

            redisClientDelegate.withMultiKeyPipeline(pipeline -> {
                for (List<String> idPartition : Lists.partition(onDemandKeysToRemove,
                        redisCacheOptions.getMaxDelSize())) {
                    String[] ids = idPartition.toArray(new String[idPartition.size()]);
                    pipeline.srem(onDemandSetName, ids);
                }

                pipeline.sync();
            });
        }
    });
}

From source file:org.opendaylight.sxp.core.service.BindingDispatcher.java

/**
 * Partition data based on pre configured value, expands bindings for legacy connections
 *
 * @param connection     SxpConnection for which data will be partitioned
 * @param deleteBindings Bindings for delete
 * @param addBindings    Bindings for add
 * @param <T>            Any type extending SxpBindingFields
 * @return List of Functions that will generate Byte representation of data
 *///from  ww  w.  j  av  a  2s . c  o  m
<T extends SxpBindingFields> List<BiFunction<SxpConnection, SxpBindingFilter, ByteBuf>> partitionBindings(
        SxpConnection connection, List<T> deleteBindings, List<T> addBindings) {
    List<BiFunction<SxpConnection, SxpBindingFilter, ByteBuf>> partitions = new ArrayList<>();
    List<T> lastPartition = null;
    //Prefix Expansion for legacy versions
    if (connection.isVersion4() && !connection.getCapabilitiesRemote().contains(CapabilityType.SubnetBindings)
            || connection.getVersion().getIntValue() < 3) {
        expandBindings(deleteBindings, owner.getExpansionQuantity());
        expandBindings(addBindings, owner.getExpansionQuantity());
    }
    //Split Delete Bindings
    if (deleteBindings != null && !deleteBindings.isEmpty()) {
        for (List<T> partition : Lists.partition(deleteBindings, getPartitionSize())) {
            partitions.add(generatePart(partition, null));
            lastPartition = partition;
        }
    }
    //Split Add Bindings and rest of Delete Bindings
    if (addBindings != null && !addBindings.isEmpty()) {
        int splitFactor = 0;
        if (lastPartition != null) {
            splitFactor = getPartitionSize() - lastPartition.size();
            partitions.set(partitions.size() - 1, generatePart(lastPartition,
                    splitFactor > addBindings.size() ? addBindings : addBindings.subList(0, splitFactor)));
        }
        for (List<T> partition : Lists.partition(
                addBindings.subList(splitFactor > addBindings.size() ? 0 : splitFactor, addBindings.size()),
                getPartitionSize())) {
            partitions.add(generatePart(null, partition));
        }
    }
    return partitions;
}

From source file:org.opennms.netmgt.discovery.RangeChunker.java

public Map<String, List<DiscoveryJob>> chunk(final DiscoveryConfiguration config) {

    final int chunkSize = (config.getChunkSize() > 0) ? config.getChunkSize()
            : DiscoveryConfigFactory.DEFAULT_CHUNK_SIZE;
    final double packetsPerSecond = (config.getPacketsPerSecond() > 0.0) ? config.getPacketsPerSecond()
            : DiscoveryConfigFactory.DEFAULT_PACKETS_PER_SECOND;

    // If the foreign source for the discovery config is not set than use 
    // a value of null so that non-requisitioned nodes are created.
    ////  ww w . j  av  a 2s  .  com
    // TODO: Use the "default" foreign source instead so that we can move
    // away from using non-requisitioned nodes.
    //
    final String foreignSourceFromConfig = (config.getForeignSource() == null
            || "".equals(config.getForeignSource().trim())) ? null : config.getForeignSource().trim();

    // If the monitoring location for the discovery config is not set than use 
    // the default localhost location
    final String locationFromConfig = (config.getLocation() == null || "".equals(config.getLocation().trim()))
            ? MonitoringLocationDao.DEFAULT_MONITORING_LOCATION_ID
            : config.getLocation().trim();

    final DiscoveryConfigFactory configFactory = new DiscoveryConfigFactory(config);

    final AtomicReference<IPPollRange> previousRange = new AtomicReference<>();

    return StreamSupport.stream(configFactory.getConfiguredAddresses().spliterator(), false).filter(address -> {
        // If there is no IP address filter set or the filter matches
        return ipAddressFilter.matches(address.getLocation(), address.getAddress());
    })
            // TODO: We could optimize this further by not unrolling IPPollRanges into individual
            // IPPollAddresses during the mapping.
            .map(address -> {
                // Create a singleton IPPollRange
                return new IPPollRange(
                        // Make sure that foreignSource is not null so that we can partition on the value
                        address.getForeignSource() == null ? foreignSourceFromConfig
                                : address.getForeignSource(),
                        // Make sure that location is not null so that we can partition on the value
                        address.getLocation() == null ? locationFromConfig : address.getLocation(),
                        address.getAddress(), address.getAddress(), address.getTimeout(), address.getRetries());
            }).collect(Collectors.groupingBy(range -> {
                // Create a Map<ForeignSourceLocationKey,List<IPPollRange>>
                return new ForeignSourceLocationKey(
                        // Make sure that foreignSource is not null so that we can partition on the value
                        range.getForeignSource() == null ? foreignSourceFromConfig : range.getForeignSource(),
                        // Make sure that location is not null so that we can partition on the value
                        range.getLocation() == null ? locationFromConfig : range.getLocation());
            }, LinkedHashMap::new, Collectors.toList())).entrySet().stream()
            // Flat map one list of IPPollRanges to many chunked DiscoveryJobs
            .flatMap(entry -> {
                // Partition the list of address values
                return Lists.partition(entry.getValue(), chunkSize).stream()
                        // Map each partition value to a separate DiscoveryJob
                        .map(ranges -> {
                            DiscoveryJob retval = new DiscoveryJob(ranges.stream().map(address -> {
                                // If this address is consecutive with the previous range,
                                // then just extend the range to cover this address too
                                if (isConsecutive(previousRange.get(), address)) {
                                    previousRange.get().getAddressRange().incrementEnd();
                                    return null;
                                }
                                previousRange.set(address);
                                return address;
                            })
                                    // Filter out all of the consecutive values that we nulled out
                                    .filter(Objects::nonNull)
                                    // Convert back into a list of ranges
                                    .collect(Collectors.toList()), entry.getKey().getForeignSource(),
                                    entry.getKey().getLocation(), packetsPerSecond);
                            // Reset the previousRange value
                            previousRange.set(null);
                            return retval;
                        })
                        // Collect the DiscoveryJobs
                        .collect(Collectors.toList()).stream();
            })
            .collect(Collectors.groupingBy(job -> job.getLocation(), LinkedHashMap::new, Collectors.toList()));
}

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteDefinitionMetadatas(@Nonnull final List<QualifiedName> names) {
    try {/*  w  w w. ja va2  s  .  co m*/
        final Connection conn = poolingDataSource.getConnection();
        try {
            final List<List<QualifiedName>> subLists = Lists.partition(names,
                    config.getUserMetadataMaxInClauseItems());
            for (List<QualifiedName> subNames : subLists) {
                _deleteDefinitionMetadatas(conn, subNames);
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        log.error("Sql exception", e);
        throw new UserMetadataServiceException(
                String.format("Failed deleting the definition metadata for %s", names), e);
    }
}

From source file:com.google.appengine.tools.pipeline.impl.backend.AppEngineBackEnd.java

private void putAll(Collection<? extends PipelineModelObject> objects) {
    if (objects.isEmpty()) {
        return;/* ww  w.  j a  va2 s  .c om*/
    }
    List<Entity> entityList = new ArrayList<>(objects.size());
    for (PipelineModelObject x : objects) {
        logger.finest("Storing: " + x);
        entityList.add(x.toEntity());
    }
    List<List<Entity>> partitions = Lists.partition(entityList, MAX_ENTITY_COUNT_PUT);
    for (final List<Entity> partition : partitions) {
        dataStore.put(partition.toArray(new FullEntity[partition.size()]));
    }
}

From source file:com.netflix.spinnaker.cats.redis.cache.RedisCache.java

private void mergeItems(String type, Collection<CacheData> items) {
    if (items.isEmpty()) {
        return;//ww  w  . j  a v a  2 s. com
    }
    final Set<String> relationshipNames = new HashSet<>();
    final List<String> keysToSet = new LinkedList<>();
    final Set<String> idSet = new HashSet<>();

    final Map<String, Integer> ttlSecondsByKey = new HashMap<>();
    int skippedWrites = 0;

    final Map<String, byte[]> hashes = getHashes(type, items);

    final NavigableMap<byte[], byte[]> updatedHashes = new TreeMap<>(new ByteArrayComparator());

    for (CacheData item : items) {
        MergeOp op = buildMergeOp(type, item, hashes);
        relationshipNames.addAll(op.relNames);
        keysToSet.addAll(op.keysToSet);
        idSet.add(item.getId());
        updatedHashes.putAll(op.hashesToSet);
        skippedWrites += op.skippedWrites;

        if (item.getTtlSeconds() > 0) {
            for (String key : op.keysToSet) {
                ttlSecondsByKey.put(key, item.getTtlSeconds());
            }
        }
    }

    int saddOperations = 0;
    int msetOperations = 0;
    int hmsetOperations = 0;
    int pipelineOperations = 0;
    int expireOperations = 0;
    if (keysToSet.size() > 0) {
        try (Jedis jedis = source.getJedis()) {
            Pipeline pipeline = jedis.pipelined();
            for (List<String> idPart : Iterables.partition(idSet, options.getMaxSaddSize())) {
                final String[] ids = idPart.toArray(new String[idPart.size()]);
                pipeline.sadd(allOfTypeReindex(type), ids);
                saddOperations++;
                pipeline.sadd(allOfTypeId(type), ids);
                saddOperations++;
            }

            for (List<String> keys : Lists.partition(keysToSet, options.getMaxMsetSize())) {
                pipeline.mset(keys.toArray(new String[keys.size()]));
                msetOperations++;
            }

            if (!relationshipNames.isEmpty()) {
                for (List<String> relNamesPart : Iterables.partition(relationshipNames,
                        options.getMaxSaddSize())) {
                    pipeline.sadd(allRelationshipsId(type),
                            relNamesPart.toArray(new String[relNamesPart.size()]));
                    saddOperations++;
                }
            }

            if (!updatedHashes.isEmpty()) {
                for (List<byte[]> hashPart : Iterables.partition(updatedHashes.keySet(),
                        options.getMaxHmsetSize())) {
                    pipeline.hmset(hashesId(type), updatedHashes.subMap(hashPart.get(0), true,
                            hashPart.get(hashPart.size() - 1), true));
                    hmsetOperations++;
                }
            }
            pipeline.sync();
            pipelineOperations++;
        }
        try (Jedis jedis = source.getJedis()) {
            for (List<Map.Entry<String, Integer>> ttlPart : Iterables.partition(ttlSecondsByKey.entrySet(),
                    options.getMaxPipelineSize())) {
                Pipeline pipeline = jedis.pipelined();
                for (Map.Entry<String, Integer> ttlEntry : ttlPart) {
                    pipeline.expire(ttlEntry.getKey(), ttlEntry.getValue());
                }
                expireOperations += ttlPart.size();
                pipeline.sync();
                pipelineOperations++;
            }
        }
    }
    cacheMetrics.merge(prefix, type, items.size(), keysToSet.size() / 2, relationshipNames.size(),
            skippedWrites, updatedHashes.size(), saddOperations, msetOperations, hmsetOperations,
            pipelineOperations, expireOperations);
}