Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.metacat.connector.hive.sql.DirectSqlSavePartition.java

/**
 * Inserts the partitions.//from  www  .  j a  v a2 s.  com
 * Note: Column descriptor of the partitions will be set to that of the table.
 *
 * @param tableQName table name
 * @param table      hive table
 * @param partitions list of partitions
 */
public void insert(final QualifiedName tableQName, final Table table, final List<PartitionInfo> partitions) {
    final long start = registry.clock().wallTime();
    try {
        // Get the table id and column id
        final TableSequenceIds tableSequenceIds = getTableSequenceIds(table.getDbName(), table.getTableName());
        // Get the sequence ids and lock the records in the database
        final PartitionSequenceIds partitionSequenceIds = this.getPartitionSequenceIds(partitions.size());
        final List<List<PartitionInfo>> subPartitionList = Lists.partition(partitions, batchSize);
        // Use the current time for create and update time.
        final long currentTimeInEpoch = Instant.now().getEpochSecond();
        int index = 0;
        // Insert the partitions in batches
        for (List<PartitionInfo> subPartitions : subPartitionList) {
            _insert(tableQName, table, tableSequenceIds, partitionSequenceIds, subPartitions,
                    currentTimeInEpoch, index);
            index += batchSize;
        }
    } finally {
        this.fastServiceMetric.recordTimer(HiveMetrics.TagAddPartitions.getMetricName(),
                registry.clock().wallTime() - start);
    }
}

From source file:org.sonar.core.issue.db.IssueDao.java

public List<IssueDto> selectByIds(Collection<Long> ids, SqlSession session) {
    if (ids.isEmpty()) {
        return Collections.emptyList();
    }/*from   w w  w .  j a va 2s . c o  m*/
    List<IssueDto> dtosList = newArrayList();
    List<List<Long>> idsPartitionList = Lists.partition(newArrayList(ids), 1000);
    for (List<Long> idsPartition : idsPartitionList) {
        List<IssueDto> dtos = session.selectList("org.sonar.core.issue.db.IssueMapper.selectByIds",
                newArrayList(idsPartition));
        dtosList.addAll(dtos);
    }
    return dtosList;
}

From source file:com.navercorp.pinpoint.web.dao.hbase.HbaseTraceDao.java

private List<List<TransactionId>> partition(List<TransactionId> transactionIdList, int eachPartitionSize) {
    return Lists.partition(transactionIdList, eachPartitionSize);
}

From source file:com.netflix.metacat.s3.connector.S3SplitDetailManager.java

@SuppressWarnings("checkstyle:methodname")
private List<ConnectorPartition> _getPartitions(final SchemaTableName tableName, final String filterExpression,
        final List<String> partitionIds, final Sort sort, final Pageable pageable,
        final boolean includePartitionDetails) {
    //// ww  w .j  a  v  a 2s.co m
    // Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes)
    // will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so
    //
    final List<ConnectorPartition> partitions = Lists.newArrayList();
    if (partitionIds != null && partitionIds.size() > 5000) {
        final List<List<String>> subFilterPartitionNamesList = Lists.partition(partitionIds, 5000);
        subFilterPartitionNamesList
                .forEach(subPartitionIds -> partitions.addAll(_getConnectorPartitions(tableName,
                        filterExpression, subPartitionIds, sort, pageable, includePartitionDetails)));
    } else {
        partitions.addAll(_getConnectorPartitions(tableName, filterExpression, partitionIds, sort, pageable,
                includePartitionDetails));
    }
    return partitions;
}

From source file:com.daugherty.e2c.persistence.data.jdbc.JdbcSupplierDao.java

@Override
public List<Supplier> loadApprovedBySupplierIds(List<Long> ids, Locale locale) {
    List<Supplier> suppliers = Lists.newArrayList();
    HashSet<Long> uniqueIds = Sets.newHashSet(ids);
    LOGGER.debug("Getting Supplier from the database by supplier ids " + uniqueIds);
    for (List<Long> partitionedIds : Lists.partition(Lists.newArrayList(uniqueIds), 1000)) {
        String sql = getSql("/supplier/getApprovedBySupplierIds.sql");
        SqlParameterSource parameterSource = new MapSqlParameterSource("supplierIds", partitionedIds)
                .addValue("language", locale.getLanguage());
        suppliers.addAll(jdbcTemplate.query(sql, parameterSource, new SupplierResultSetExtractor()));
    }/*from  w w w.ja  v a  2s  . c o  m*/

    return suppliers;
}

From source file:org.apache.drill.exec.planner.ParquetPartitionDescriptor.java

@Override
protected void createPartitionSublists() {
    Set<String> fileLocations = ((ParquetGroupScan) scanRel.getGroupScan()).getFileSet();
    List<PartitionLocation> locations = new LinkedList<>();
    for (String file : fileLocations) {
        locations.add(new ParquetPartitionLocation(file));
    }/*from   w w  w . j  a v a  2  s  .c  o  m*/
    locationSuperList = Lists.partition(locations, PartitionDescriptor.PARTITION_BATCH_SIZE);
    sublistsCreated = true;
}

From source file:org.apache.ambari.server.orm.dao.HostRoleCommandDAO.java

@RequiresSession
public List<HostRoleCommandEntity> findByPKs(Collection<Long> taskIds) {
    if (taskIds == null || taskIds.isEmpty()) {
        return Collections.emptyList();
    }//from w  w w  .j a v a 2s.c o m

    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
            "SELECT task FROM HostRoleCommandEntity task WHERE task.taskId IN ?1 " + "ORDER BY task.taskId",
            HostRoleCommandEntity.class);

    if (daoUtils.getDbType().equals(ORACLE) && taskIds.size() > ORACLE_LIST_LIMIT) {
        List<HostRoleCommandEntity> result = new ArrayList<HostRoleCommandEntity>();

        List<List<Long>> lists = Lists.partition(new ArrayList<Long>(taskIds), ORACLE_LIST_LIMIT);
        for (List<Long> list : lists) {
            result.addAll(daoUtils.selectList(query, list));
        }

        return result;
    }

    return daoUtils.selectList(query, taskIds);
}

From source file:com.b2international.snowowl.core.compare.CompareResultsDsvExporter.java

public File export(IProgressMonitor monitor) throws IOException {

    CsvMapper mapper = new CsvMapper();
    CsvSchema schema = mapper.schemaFor(CompareData.class).withColumnSeparator(delimiter).withHeader().sortedBy(
            "componentType", "componentId", "componentType", "label", "changeKind", "attribute", "from", "to");

    try (SequenceWriter writer = mapper.writer(schema).writeValues(outputPath.toFile())) {
        monitor.beginTask("Exporting compare results to DSV", compareResults.getTotalNew()
                + compareResults.getTotalChanged() + compareResults.getTotalDeleted());

        ListMultimap<Short, ComponentIdentifier> newComponentIdentifiers = Multimaps
                .index(compareResults.getNewComponents(), ComponentIdentifier::getTerminologyComponentId);
        ListMultimap<Short, String> newComponentIds = ImmutableListMultimap.copyOf(
                Multimaps.transformValues(newComponentIdentifiers, ComponentIdentifier::getComponentId));

        for (short terminologyComponentId : newComponentIds.keySet()) {
            for (List<String> componentIds : Lists.partition(newComponentIds.get(terminologyComponentId),
                    PARTITION_SIZE)) {//from   w w w.jav  a  2 s.  co  m
                RevisionIndexRequestBuilder<CollectionResource<IComponent>> componentFetchRequest = fetcherFunction
                        .apply(terminologyComponentId, componentIds);

                if (componentFetchRequest == null) {
                    break;
                }

                CollectionResource<IComponent> components = componentFetchRequest
                        .build(repositoryUuid, compareBranch)
                        .execute(ApplicationContext.getServiceForClass(IEventBus.class)).getSync();

                for (IComponent component : components) {
                    writer.write(new CompareData(component, ChangeKind.ADDED));
                }

                monitor.worked(components.getItems().size());
            }
        }

        ListMultimap<Short, ComponentIdentifier> changedComponentIdentifiers = Multimaps
                .index(compareResults.getChangedComponents(), ComponentIdentifier::getTerminologyComponentId);
        ListMultimap<Short, String> changedComponentIds = ImmutableListMultimap.copyOf(
                Multimaps.transformValues(changedComponentIdentifiers, ComponentIdentifier::getComponentId));
        ListMultimap<String, IComponent> componentPairs = ArrayListMultimap.create(PARTITION_SIZE, 2);

        for (short terminologyComponentId : changedComponentIds.keySet()) {
            for (List<String> componentIds : Lists.partition(changedComponentIds.get(terminologyComponentId),
                    PARTITION_SIZE)) {
                componentPairs.clear();
                RevisionIndexRequestBuilder<CollectionResource<IComponent>> componentFetchRequest = fetcherFunction
                        .apply(terminologyComponentId, componentIds);

                if (componentFetchRequest == null) {
                    break;
                }

                componentFetchRequest.build(repositoryUuid, baseBranch)
                        .execute(ApplicationContext.getServiceForClass(IEventBus.class)).getSync()
                        .forEach(c -> componentPairs.put(c.getId(), c));

                componentFetchRequest.build(repositoryUuid, compareBranch)
                        .execute(ApplicationContext.getServiceForClass(IEventBus.class)).getSync()
                        .forEach(c -> componentPairs.put(c.getId(), c));

                for (Entry<String, List<IComponent>> entry : Multimaps.asMap(componentPairs).entrySet()) {
                    IComponent baseComponent = entry.getValue().get(0);
                    IComponent compareComponent = entry.getValue().get(1);
                    Collection<CompareData> compareResults = getCompareResultsOfComponent.apply(baseComponent,
                            compareComponent);

                    for (CompareData d : compareResults) {
                        writer.write(d);
                    }
                }

                monitor.worked(componentPairs.keySet().size());
            }
        }

        ListMultimap<Short, ComponentIdentifier> deletedComponentIdentifiers = Multimaps
                .index(compareResults.getDeletedComponents(), ComponentIdentifier::getTerminologyComponentId);
        ListMultimap<Short, String> deletedComponentIds = ImmutableListMultimap.copyOf(
                Multimaps.transformValues(deletedComponentIdentifiers, ComponentIdentifier::getComponentId));

        for (short terminologyComponentId : deletedComponentIds.keySet()) {
            for (List<String> componentIds : Lists.partition(deletedComponentIds.get(terminologyComponentId),
                    PARTITION_SIZE)) {
                RevisionIndexRequestBuilder<CollectionResource<IComponent>> componentFetchRequest = fetcherFunction
                        .apply(terminologyComponentId, componentIds);

                if (componentFetchRequest == null) {
                    break;
                }

                CollectionResource<IComponent> components = componentFetchRequest
                        .build(repositoryUuid, baseBranch)
                        .execute(ApplicationContext.getServiceForClass(IEventBus.class)).getSync();

                for (IComponent component : components) {
                    writer.write(new CompareData(component, ChangeKind.DELETED));
                }

                monitor.worked(components.getItems().size());
            }
        }

    } finally {
        monitor.done();
    }

    return outputPath.toFile();
}

From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionUpdater.java

private void replaceFlags() throws SQLException {
    Closer closer = Closer.create();/*from w w  w .  jav a2  s .  c om*/
    try {
        PreparedStatement stmt = closer.register(conn.prepareStatement("DELETE FROM " + config.getTablePrefix()
                + "region_flag " + "WHERE region_id = ? " + "AND world_id = " + worldId));

        for (List<ProtectedRegion> partition : Lists.partition(flagsToReplace, StatementBatch.MAX_BATCH_SIZE)) {
            for (ProtectedRegion region : partition) {
                stmt.setString(1, region.getId());
                stmt.addBatch();
            }

            stmt.executeBatch();
        }
    } finally {
        closer.closeQuietly();
    }

    closer = Closer.create();
    try {
        PreparedStatement stmt = closer.register(conn.prepareStatement("INSERT INTO " + config.getTablePrefix()
                + "region_flag " + "(id, region_id, world_id, flag, value) " + "VALUES " + "(null, ?, "
                + worldId + ", ?, ?)"));

        StatementBatch batch = new StatementBatch(stmt, StatementBatch.MAX_BATCH_SIZE);

        for (ProtectedRegion region : flagsToReplace) {
            for (Map.Entry<Flag<?>, Object> entry : region.getFlags().entrySet()) {
                if (entry.getValue() == null)
                    continue;

                Object flag = marshalFlagValue(entry.getKey(), entry.getValue());

                stmt.setString(1, region.getId());
                stmt.setString(2, entry.getKey().getName());
                stmt.setObject(3, flag);
                batch.addBatch();
            }
        }

        batch.executeRemaining();
    } finally {
        closer.closeQuietly();
    }
}

From source file:org.opennms.newts.cassandra.search.CassandraIndexer.java

@Override
public void update(Collection<Sample> samples) {

    Timer.Context ctx = m_updateTimer.time();

    List<RegularStatement> statements = Lists.newArrayList();
    Map<Context, Map<Resource, ResourceMetadata>> cacheQueue = Maps.newHashMap();

    for (Sample sample : samples) {
        ConsistencyLevel writeConsistency = m_contextConfigurations.getWriteConsistency(sample.getContext());
        maybeIndexResource(cacheQueue, statements, sample.getContext(), sample.getResource(), writeConsistency);
        maybeIndexResourceAttributes(cacheQueue, statements, sample.getContext(), sample.getResource(),
                writeConsistency);/*  w w w . j  a v  a 2s . co  m*/
        maybeAddMetricName(cacheQueue, statements, sample.getContext(), sample.getResource(), sample.getName(),
                writeConsistency);
    }

    try {
        if (statements.size() > 0) {
            // Deduplicate the insert statements by keying off the effective query strings
            TreeMap<String, RegularStatement> cqlToStatementMap = new TreeMap<String, RegularStatement>();
            for (RegularStatement statement : statements) {
                cqlToStatementMap.put(statement.toString(), statement);
            }
            statements = Lists.newArrayList(cqlToStatementMap.values());

            // Limit the size of the batches; See NEWTS-67
            List<ResultSetFuture> futures = Lists.newArrayList();
            for (List<RegularStatement> partition : Lists.partition(statements, MAX_BATCH_SIZE)) {
                futures.add(m_session
                        .executeAsync(batch(partition.toArray(new RegularStatement[partition.size()]))));
            }

            for (ResultSetFuture future : futures) {
                future.getUninterruptibly();
            }
        }

        // Order matters here; We want the cache updated only after a successful Cassandra write.
        for (Context context : cacheQueue.keySet()) {
            for (Map.Entry<Resource, ResourceMetadata> entry : cacheQueue.get(context).entrySet()) {
                m_cache.merge(context, entry.getKey(), entry.getValue());
            }
        }
    } finally {
        ctx.stop();
    }

}