Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteDataMetadatas(@Nonnull final List<String> uris) {
    try {//from w  ww.  j  a  v a 2  s.  c o m
        final Connection conn = poolingDataSource.getConnection();
        try {
            final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
            for (List<String> subUris : subLists) {
                _deleteDataMetadatas(conn, subUris);
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        log.error("Sql exception", e);
        throw new UserMetadataServiceException(String.format("Failed deleting the data metadata for %s", uris),
                e);
    }
}

From source file:com.romeikat.datamessie.core.sync.service.template.withIdAndVersion.CreateOrUpdateExecutor.java

private void create(final List<Long> lhsIds) throws TaskCancelledException {
    final Collection<List<Long>> lhsIdsBatches = Lists.partition(lhsIds, batchSizeEntities);
    final int lhsCount = lhsIds.size();
    int firstEntity = 0;

    CountDownLatch rhsInProgress = null;
    CountDownLatch rhsDone = null;
    final Executor e = Executors.newSingleThreadExecutor();

    for (final List<Long> lhsIdsBatch : lhsIdsBatches) {
        // Feedback
        final int lastEntity = firstEntity + lhsIdsBatch.size();
        final double progress = (double) lastEntity / (double) lhsCount;
        final String msg = String.format("Creating %s to %s of %s (%s)",
                IntegerConverter.INSTANCE.convertToString(firstEntity + 1),
                IntegerConverter.INSTANCE.convertToString(lastEntity),
                IntegerConverter.INSTANCE.convertToString(lhsCount),
                PercentageConverter.INSTANCE_2.convertToString(progress));
        final TaskExecutionWork work = taskExecution.reportWorkStart(msg);

        // Load LHS
        final Collection<E> lhsEntities = loadLhsBatch(rhsInProgress, lhsIdsBatch);

        // Create RHS
        rhsInProgress = new CountDownLatch(1);
        rhsDone = new CountDownLatch(1);
        e.execute(new RhsCreator(rhsInProgress, rhsDone, lhsEntities));

        firstEntity += batchSizeEntities;

        taskExecution.reportWorkEnd(work);
        taskExecution.checkpoint();/*from   w ww  .java2s. c om*/
    }

    // Wait until last batch ends
    if (rhsDone != null) {
        try {
            rhsDone.await();
        } catch (final InterruptedException e1) {
        }
    }
}

From source file:com.dangdang.ddframe.job.api.type.dataflow.executor.DataflowJobExecutor.java

private void processDataForThroughput(final int concurrentDataProcessThreadCount,
        final ShardingContext shardingContext, final List<Object> data) {
    if (concurrentDataProcessThreadCount <= 1 || data.size() <= concurrentDataProcessThreadCount) {
        processData(shardingContext, data);
        return;/*from ww w  .j a va  2s  . com*/
    }
    List<List<Object>> splitData = Lists.partition(data, data.size() / concurrentDataProcessThreadCount);
    final CountDownLatch latch = new CountDownLatch(splitData.size());
    for (final List<Object> each : splitData) {
        getExecutorService().submit(new Runnable() {

            @Override
            public void run() {
                try {
                    processData(shardingContext, each);
                } finally {
                    latch.countDown();
                }
            }
        });
    }
    latchAwait(latch);
}

From source file:org.sonar.db.purge.PurgeCommands.java

public void deleteComponentMeasures(List<String> analysisUuids, List<String> componentUuids) {
    if (analysisUuids.isEmpty() || componentUuids.isEmpty()) {
        return;//w  w w.  j  av  a 2s.c o  m
    }

    List<List<String>> analysisUuidsPartitions = Lists.partition(analysisUuids, MAX_SNAPSHOTS_PER_QUERY);
    List<List<String>> componentUuidsPartitions = Lists.partition(componentUuids, MAX_RESOURCES_PER_QUERY);

    profiler.start("deleteComponentMeasures");
    for (List<String> analysisUuidsPartition : analysisUuidsPartitions) {
        for (List<String> componentUuidsPartition : componentUuidsPartitions) {
            purgeMapper.deleteComponentMeasures(analysisUuidsPartition, componentUuidsPartition);
        }
    }
    session.commit();
    profiler.stop();
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchUtilImpl.java

/**
 * Delete index documents./* ww w  .  j a v  a 2 s. co m*/
 * @param type index type
 * @param ids entity ids
 */
public void delete(final String type, final List<String> ids) {
    if (ids != null && !ids.isEmpty()) {
        final List<List<String>> partitionedIds = Lists.partition(ids, 10000);
        partitionedIds.forEach(subIds -> hardDeleteDoc(type, subIds));
    }
}

From source file:com.stratio.cassandra.index.RowServiceWide.java

/**
 * {@inheritDoc}/*  w w w .  j  a va2  s  . co m*/
 * <p/>
 * The {@link Row} is a logical one.
 */
@Override
protected List<Row> rows(List<SearchResult> searchResults, long timestamp, boolean usesRelevance) {
    // Initialize result
    List<Row> rows = new ArrayList<>(searchResults.size());

    // Group key queries by partition keys
    Map<CellName, Float> scoresByClusteringKey = new HashMap<>(searchResults.size());
    Map<DecoratedKey, List<CellName>> keys = new HashMap<>();
    for (SearchResult searchResult : searchResults) {
        DecoratedKey partitionKey = searchResult.getPartitionKey();
        CellName clusteringKey = searchResult.getClusteringKey();
        Float score = searchResult.getScore();
        scoresByClusteringKey.put(clusteringKey, score);
        List<CellName> clusteringKeys = keys.get(partitionKey);
        if (clusteringKeys == null) {
            clusteringKeys = new ArrayList<>();
            keys.put(partitionKey, clusteringKeys);
        }
        clusteringKeys.add(clusteringKey);
    }

    for (Map.Entry<DecoratedKey, List<CellName>> entry : keys.entrySet()) {
        DecoratedKey partitionKey = entry.getKey();
        for (List<CellName> clusteringKeys : Lists.partition(entry.getValue(), 1000)) {
            Map<CellName, Row> partitionRows = rows(partitionKey, clusteringKeys, timestamp);
            for (Map.Entry<CellName, Row> entry1 : partitionRows.entrySet()) {
                Row row = entry1.getValue();
                if (usesRelevance) {
                    CellName clusteringKey = entry1.getKey();
                    Float score = scoresByClusteringKey.get(clusteringKey);
                    Row scoredRow = addScoreColumn(row, timestamp, score);
                    rows.add(scoredRow);
                } else {
                    rows.add(row);
                }
            }
        }
    }
    return rows;
}

From source file:com.netflix.spinnaker.front50.model.S3StorageService.java

public void bulkDeleteObjects(ObjectType objectType, Collection<String> objectKeys) {
    if (readOnlyMode) {
        throw new ReadOnlyModeException();
    }//from w  w  w .java  2s .c o m

    // s3 supports bulk delete for a maximum of 1000 object keys
    Lists.partition(new ArrayList<>(objectKeys), 1000).forEach(keys -> {
        amazonS3.deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keys.stream()
                .map(k -> new DeleteObjectsRequest.KeyVersion(
                        buildS3Key(objectType.group, k, objectType.defaultMetadataFilename)))
                .collect(Collectors.toList())));
    });
}

From source file:gg.uhc.uhc.modules.team.ListTeamsCommand.java

@Override
protected boolean runCommand(CommandSender sender, OptionSet options) {
    int page = pageSpec.value(options);
    boolean emptyOnly = options.has(emptyOnlySpec);
    boolean showAll = options.has(showAllSpec);

    if (showAll && emptyOnly) {
        sender.sendMessage(ChatColor.RED + "You must provide -e OR -a, you cannot supply both");
        return true;
    }//from  w  w w.  j  a v  a2 s  .c om

    Predicate<Team> predicate;
    String type;

    if (emptyOnly) {
        type = "(empty teams)";
        predicate = Predicates.not(FunctionalUtil.TEAMS_WITH_PLAYERS);
    } else if (showAll) {
        type = "(all teams)";
        predicate = Predicates.alwaysTrue();
    } else {
        type = "(with players)";
        predicate = FunctionalUtil.TEAMS_WITH_PLAYERS;
    }

    List<Team> teams = Lists.newArrayList(Iterables.filter(teamModule.getTeams().values(), predicate));

    if (teams.size() == 0) {
        sender.sendMessage(ChatColor.RED + "No results found for query " + type);
        return true;
    }

    List<List<Team>> partitioned = Lists.partition(teams, COUNT_PER_PAGE);

    if (page > partitioned.size()) {
        sender.sendMessage(ChatColor.RED + "Page " + page + " does not exist");
        return true;
    }

    List<Team> pageItems = partitioned.get(page - 1);

    Map<String, Object> context = ImmutableMap.<String, Object>builder().put("page", page)
            .put("pages", partitioned.size()).put("type", type).put("count", pageItems.size())
            .put("teams", teams.size()).put("multiple", partitioned.size() > 1).build();

    sender.sendMessage(messages.evalTemplate("header", context));

    Joiner joiner = Joiner.on(", ");
    for (Team team : pageItems) {
        String memberString;
        Set<OfflinePlayer> members = team.getPlayers();

        if (members.size() == 0) {
            memberString = NO_MEMBERS;
        } else {
            memberString = joiner
                    .join(Iterables.transform(team.getPlayers(), FunctionalUtil.PLAYER_NAME_FETCHER));
        }

        sender.sendMessage(
                String.format(FORMAT, team.getPrefix() + team.getDisplayName(), team.getName(), memberString));
    }

    return true;
}

From source file:eu.mondo.driver.fourstore.FourStoreGraphDriverReadWrite.java

@Override
public void insertEdgesWithVertex(final Multimap<String, String> edges, final String edgeType,
        final String targetVertexType) throws IOException {
    if (edges.isEmpty()) {
        return;//from  ww w . j av a  2 s . com
    }

    final ArrayList<String> sourceVertices = new ArrayList<>(edges.keySet());
    final List<List<String>> sourceVerticesPartitions = Lists.partition(sourceVertices, PARTITION_SIZE);
    for (final List<String> sourceVerticesPartition : sourceVerticesPartitions) {

        final Multimap<String, String> edgePartition = ArrayListMultimap.create();
        for (final String sourceVertexURI : sourceVerticesPartition) {
            final Collection<String> targetVertexURIs = edges.get(sourceVertexURI);
            edgePartition.putAll(sourceVertexURI, targetVertexURIs);
        }

        insertEdgesWithVertexPartition(edgePartition, edgeType, targetVertexType);
    }

}

From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionInserter.java

private void insertPolygons() throws SQLException {
    Closer closer = Closer.create();//from  w ww. ja  v  a2s.  c  o m
    try {
        PreparedStatement stmt = closer
                .register(conn.prepareStatement("INSERT INTO " + config.getTablePrefix() + "region_poly2d "
                        + "(region_id, world_id, max_y, min_y) " + "VALUES " + "(?, " + worldId + ", ?, ?)"));

        for (List<ProtectedPolygonalRegion> partition : Lists.partition(polygons,
                StatementBatch.MAX_BATCH_SIZE)) {
            for (ProtectedPolygonalRegion region : partition) {
                stmt.setString(1, region.getId());
                stmt.setInt(2, region.getMaximumPoint().getBlockY());
                stmt.setInt(3, region.getMinimumPoint().getBlockY());
                stmt.addBatch();
            }

            stmt.executeBatch();
        }
    } finally {
        closer.closeQuietly();
    }
}