Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:org.candlepin.model.EntitlementCurator.java

/**
 * A version of list Modifying that finds Entitlements that modify
 * input entitlements./*from   ww  w .  j ava 2s  .c om*/
 * When dealing with large amount of entitlements for which it is necessary
 * to determine their modifier products.
 * @param entitlement
 * @return Entitlements that are being modified by the input entitlements
 */
public Collection<String> batchListModifying(Iterable<Entitlement> entitlements) {
    List<String> eids = new LinkedList<String>();

    if (entitlements != null && entitlements.iterator().hasNext()) {
        String hql = "SELECT DISTINCT eOut.id" + "    FROM Entitlement eOut" + "        JOIN eOut.pool outPool"
                + "        JOIN outPool.providedProducts outProvided"
                + "        JOIN outProvided.productContent outProvContent"
                + "        JOIN outProvContent.content outContent"
                + "        JOIN outContent.modifiedProductIds outModProdId" + "    WHERE"
                + "        outPool.endDate >= current_date AND" + "        eOut NOT IN (:ein) AND"
                + "        EXISTS (" + "            SELECT eIn" + "                FROM Entitlement eIn"
                + "                    JOIN eIn.consumer inConsumer"
                + "                    JOIN eIn.pool inPool"
                + "                    JOIN inPool.product inMktProd"
                + "                    LEFT JOIN inPool.providedProducts inProvidedProd"
                + "                WHERE eIn in (:ein) AND inConsumer = eOut.consumer AND"
                + "                    inPool.endDate >= outPool.startDate AND"
                + "                    inPool.startDate <= outPool.endDate AND"
                + "                    (inProvidedProd.id = outModProdId OR inMktProd.id = outModProdId)"
                + "        )";

        Query query = this.getEntityManager().createQuery(hql);

        Iterable<List<Entitlement>> blocks = Iterables.partition(entitlements,
                AbstractHibernateCurator.IN_OPERATOR_BLOCK_SIZE);

        for (List<Entitlement> block : blocks) {
            eids.addAll(query.setParameter("ein", block).getResultList());
        }
    }

    return eids;
}

From source file:kr.debop4j.core.parallelism.Parallels.java

/**
 *  ? ,  ? ? ./*from  w w w . j  a va  2 s.  c  o m*/
 *
 * @param elements action?  ??  
 * @param action    function
 */
public static <T> void runEach(final Iterable<T> elements, final Action1<T> action) {
    shouldNotBeNull(elements, "elements");
    shouldNotBeNull(action, "function");
    if (isDebugEnabled)
        log.debug(" ? ... workerCount=[{}]", getWorkerCount());

    ExecutorService executor = Executors.newFixedThreadPool(getWorkerCount());

    try {
        List<T> elemList = Lists.newArrayList(elements);
        int partitionSize = getPartitionSize(elemList.size(), getWorkerCount());
        Iterable<List<T>> partitions = Iterables.partition(elemList, partitionSize);
        List<Callable<Void>> tasks = Lists.newLinkedList();

        for (final List<T> partition : partitions) {
            Callable<Void> task = new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    for (final T element : partition)
                        action.perform(element);
                    return null;
                }
            };
            tasks.add(task);
        }
        List<Future<Void>> results = executor.invokeAll(tasks);
        for (Future<Void> result : results) {
            result.get();
        }

        if (isDebugEnabled)
            log.debug(" ?  . workerCount=[{}]", getWorkerCount());

    } catch (Exception e) {
        log.error("???      ?.", e);
        throw new RuntimeException(e);
    } finally {
        executor.shutdown();
    }
}

From source file:com.palantir.atlasdb.transaction.impl.SerializableTransaction.java

private void verifyRows(Transaction ro) {
    for (String table : rowsRead.keySet()) {
        final ConcurrentNavigableMap<Cell, byte[]> readsForTable = getReadsForTable(table);
        Multimap<ColumnSelection, byte[]> map = Multimaps.newSortedSetMultimap(
                Maps.<ColumnSelection, Collection<byte[]>>newHashMap(), new Supplier<SortedSet<byte[]>>() {
                    @Override/*from ww w.  j a  va  2 s . co  m*/
                    public TreeSet<byte[]> get() {
                        return Sets.newTreeSet(UnsignedBytes.lexicographicalComparator());
                    }
                });
        for (RowRead r : rowsRead.get(table)) {
            map.putAll(r.cols, r.rows);
        }
        for (final ColumnSelection cols : map.keySet()) {
            for (List<byte[]> batch : Iterables.partition(map.get(cols), 1000)) {
                SortedMap<byte[], RowResult<byte[]>> currentRows = ro.getRows(table, batch, cols);
                for (byte[] row : batch) {
                    RowResult<byte[]> currentRow = currentRows.get(row);
                    Map<Cell, byte[]> orignalReads = readsForTable
                            .tailMap(Cells.createSmallestCellForRow(row), true)
                            .headMap(Cells.createLargestCellForRow(row), true);

                    // We want to filter out all our reads to just the set that matches our column selection.
                    orignalReads = Maps.filterKeys(orignalReads, new Predicate<Cell>() {
                        @Override
                        public boolean apply(Cell input) {
                            return cols.contains(input.getColumnName());
                        }
                    });

                    if (writesByTable.get(table) != null) {
                        // We don't want to verify any reads that we wrote to cause we will just read our own values.
                        // NB: We filter our write set out here because our normal SI checking handles this case to ensure the value hasn't changed.
                        orignalReads = Maps.filterKeys(orignalReads,
                                Predicates.not(Predicates.in(writesByTable.get(table).keySet())));
                    }

                    if (currentRow == null && orignalReads.isEmpty()) {
                        continue;
                    }

                    if (currentRow == null) {
                        throw TransactionSerializableConflictException.create(table, getTimestamp(),
                                System.currentTimeMillis() - timeCreated);
                    }

                    Map<Cell, byte[]> currentCells = Maps2.fromEntries(currentRow.getCells());
                    if (writesByTable.get(table) != null) {
                        // We don't want to verify any reads that we wrote to cause we will just read our own values.
                        // NB: We filter our write set out here because our normal SI checking handles this case to ensure the value hasn't changed.
                        currentCells = Maps.filterKeys(currentCells,
                                Predicates.not(Predicates.in(writesByTable.get(table).keySet())));
                    }
                    if (!areMapsEqual(orignalReads, currentCells)) {
                        throw TransactionSerializableConflictException.create(table, getTimestamp(),
                                System.currentTimeMillis() - timeCreated);
                    }
                }
            }
        }

    }
}

From source file:com.b2international.snowowl.snomed.datastore.id.cis.CisSnomedIdentifierService.java

@Override
public void release(final Set<String> componentIds) {
    LOGGER.debug("Releasing {} component IDs.", componentIds.size());

    final Map<String, SctId> sctIds = getSctIds(componentIds);
    final Map<String, SctId> problemSctIds = ImmutableMap.copyOf(Maps.filterValues(sctIds,
            Predicates.<SctId>not(Predicates.or(SctId::isAssigned, SctId::isReserved, SctId::isAvailable))));

    if (!problemSctIds.isEmpty()) {
        throw new SctIdStatusException(
                "Cannot release %s component IDs because they are not assigned, reserved, or already available.",
                problemSctIds);/* w w w.  j  a va 2  s. co m*/
    }

    final Map<String, SctId> assignedOrReservedSctIds = ImmutableMap
            .copyOf(Maps.filterValues(sctIds, Predicates.or(SctId::isAssigned, SctId::isReserved)));

    if (assignedOrReservedSctIds.isEmpty()) {
        return;
    }

    HttpPut releaseRequest = null;
    String currentNamespace = null;

    try {

        if (assignedOrReservedSctIds.size() > 1) {
            final Multimap<String, String> componentIdsByNamespace = toNamespaceMultimap(
                    assignedOrReservedSctIds.keySet());
            for (final Entry<String, Collection<String>> entry : componentIdsByNamespace.asMap().entrySet()) {
                currentNamespace = entry.getKey();

                for (final Collection<String> bulkIds : Iterables.partition(entry.getValue(), BULK_LIMIT)) {
                    LOGGER.debug(String.format("Sending bulk release request for namespace %s with size %d.",
                            currentNamespace, bulkIds.size()));
                    releaseRequest = httpPut(String.format("sct/bulk/release?token=%s", getToken()),
                            createBulkReleaseData(currentNamespace, bulkIds));
                    execute(releaseRequest);
                }
            }

        } else {

            final String componentId = Iterables.getOnlyElement(assignedOrReservedSctIds.keySet());
            currentNamespace = SnomedIdentifiers.getNamespace(componentId);
            releaseRequest = httpPut(String.format("sct/release?token=%s", getToken()),
                    createReleaseData(componentId));
            execute(releaseRequest);
        }

    } catch (IOException e) {
        throw new SnowowlRuntimeException(
                String.format("Exception while releasing IDs for namespace %s.", currentNamespace), e);
    } finally {
        release(releaseRequest);
    }
}

From source file:com.jlhood.metrics.CloudWatchReporter.java

/**
 * Reports the given metrics to CloudWatch.
 *
 * @param gauges     gauge metrics./* w  ww.  j  a  va  2  s  .  c o m*/
 * @param counters   counter metrics.
 * @param histograms histogram metrics.
 * @param meters     meter metrics.
 * @param timers     timer metrics.
 */
void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
        SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters,
        SortedMap<String, Timer> timers) {

    // Just an estimate to reduce resizing.
    List<MetricDatum> data = new ArrayList<MetricDatum>(
            gauges.size() + counters.size() + meters.size() + 2 * histograms.size() + 2 * timers.size());

    // Translate various metric classes to MetricDatum
    for (Map.Entry<String, Gauge> gaugeEntry : gauges.entrySet()) {
        reportGauge(gaugeEntry, typeDimValGauge, data);
    }
    for (Map.Entry<String, Counter> counterEntry : counters.entrySet()) {
        reportCounter(counterEntry, typeDimValCounterCount, data);
    }
    for (Map.Entry<String, Meter> meterEntry : meters.entrySet()) {
        reportCounter(meterEntry, typeDimValMeterCount, data);
    }
    for (Map.Entry<String, Histogram> histogramEntry : histograms.entrySet()) {
        reportCounter(histogramEntry, typeDimValHistoSamples, data);
        reportSampling(histogramEntry, typeDimValHistoStats, 1.0, data);
    }
    for (Map.Entry<String, Timer> timerEntry : timers.entrySet()) {
        reportCounter(timerEntry, typeDimValTimerSamples, data);
        reportSampling(timerEntry, typeDimValTimerStats, 0.000001, data); // nanos -> millis
    }

    // Filter out unreportable entries.
    Collection<MetricDatum> nonEmptyData = Collections2.filter(data, new Predicate<MetricDatum>() {
        @Override
        public boolean apply(MetricDatum input) {
            if (input == null) {
                return false;
            } else if (input.getStatisticValues() != null) {
                // CloudWatch rejects any Statistic Sets with sample count == 0, which it probably should reject.
                return input.getStatisticValues().getSampleCount() > 0;
            }
            return true;
        }
    });

    // Whether to use local "now" (true, new Date()) or cloudwatch service "now" (false, leave null).
    if (timestampLocal) {
        Date now = new Date();
        for (MetricDatum datum : nonEmptyData) {
            datum.withTimestamp(now);
        }
    }

    // Finally, apply any user-level filter.
    Collection<MetricDatum> filtered = Collections2.filter(nonEmptyData, reporterFilter);

    // Each CloudWatch API request may contain at maximum 20 datums. Break into partitions of 20.
    Iterable<List<MetricDatum>> dataPartitions = Iterables.partition(filtered, 20);
    List<Future<?>> cloudWatchFutures = Lists.newArrayListWithExpectedSize(filtered.size());

    // Submit asynchronously with threads.
    for (List<MetricDatum> dataSubset : dataPartitions) {
        cloudWatchFutures.add(cloudWatch.putMetricDataAsync(
                new PutMetricDataRequest().withNamespace(metricNamespace).withMetricData(dataSubset)));
    }

    // Wait for CloudWatch putMetricData futures to be fulfilled.
    for (Future<?> cloudWatchFuture : cloudWatchFutures) {
        try {
            cloudWatchFuture.get();
        } catch (Exception e) {
            LOG.error(
                    "Exception reporting metrics to CloudWatch. Some or all of the data in this CloudWatch API request "
                            + "may have been discarded, did not make it to CloudWatch.",
                    e);
        }
    }

    LOG.debug("Sent {} metric data to CloudWatch. namespace: {}", filtered.size(), metricNamespace);
}

From source file:com.netflix.spinnaker.clouddriver.ecs.services.EcsCloudMetricService.java

private void copyAlarmsForAsg(AmazonCloudWatch srcCloudWatchClient, AmazonCloudWatch dstCloudWatchClient,
        String srcRegion, String dstRegion, String srcAccountId, String dstAccountId, String srcServiceName,
        String dstServiceName, String clusterName, Set<String> srcAlarmNames,
        Map<String, String> srcPolicyArnToDstPolicyArn) {

    for (List<String> srcAlarmsPartition : Iterables.partition(srcAlarmNames, 100)) {
        DescribeAlarmsResult describeAlarmsResult = srcCloudWatchClient
                .describeAlarms(new DescribeAlarmsRequest().withAlarmNames(srcAlarmsPartition));

        for (MetricAlarm srcMetricAlarm : describeAlarmsResult.getMetricAlarms()) {
            if (srcMetricAlarm.getAlarmName().startsWith("TargetTracking-")) {
                // Target Tracking policies auto-create their alarms, so we don't need to copy them
                continue;
            }/*  ww w.ja  v a 2  s. c  o  m*/

            String dstAlarmName = srcMetricAlarm.getAlarmName().replaceAll(srcServiceName, dstServiceName);
            if (!dstAlarmName.contains(dstServiceName)) {
                dstAlarmName = dstAlarmName + "-" + dstServiceName;
            }

            dstCloudWatchClient.putMetricAlarm(
                    buildPutMetricAlarmRequest(srcMetricAlarm, dstAlarmName, dstServiceName, clusterName,
                            srcRegion, dstRegion, srcAccountId, dstAccountId, srcPolicyArnToDstPolicyArn));
        }
    }
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

private Map<Cell, Value> getRowsAllColsInternal(final String tableName, final Iterable<byte[]> rows,
        final long startTs) throws Exception {
    int rowCount = 0;
    String getRowsQuery = "SELECT * FROM " + getFullTableName(tableName) + " WHERE "
            + CassandraConstants.ROW_NAME + " = ?";
    Map<Cell, Value> result = Maps.newHashMap();
    final CassandraKeyValueServiceConfig config = configManager.getConfig();
    int fetchBatchCount = config.fetchBatchCount();
    for (final List<byte[]> batch : Iterables.partition(rows, fetchBatchCount)) {
        rowCount += batch.size();//from w  ww. j ava 2s  .  co  m
        List<ResultSetFuture> resultSetFutures = Lists.newArrayListWithExpectedSize(rowCount);
        PreparedStatement preparedStatement = getPreparedStatement(tableName, getRowsQuery, session);
        for (byte[] row : batch) {
            BoundStatement boundStatement = preparedStatement.bind(ByteBuffer.wrap(row));
            resultSetFutures.add(session.executeAsync(boundStatement));
        }
        for (ResultSetFuture resultSetFuture : resultSetFutures) {
            ResultSet resultSet;
            try {
                resultSet = resultSetFuture.getUninterruptibly();
            } catch (Throwable t) {
                throw Throwables.throwUncheckedException(t);
            }
            for (Row row : resultSet.all()) {
                Cell c = Cell.create(CQLKeyValueServices.getRowName(row), CQLKeyValueServices.getColName(row));
                if ((CQLKeyValueServices.getTs(row) < startTs) && (!result.containsKey(c)
                        || (result.get(c).getTimestamp() < CQLKeyValueServices.getTs(row)))) {
                    result.put(
                            Cell.create(CQLKeyValueServices.getRowName(row),
                                    CQLKeyValueServices.getColName(row)),
                            Value.create(CQLKeyValueServices.getValue(row), CQLKeyValueServices.getTs(row)));
                }
            }
            CQLKeyValueServices.logTracedQuery(getRowsQuery, resultSet, session,
                    cqlStatementCache.NORMAL_QUERY);
        }
    }
    if (rowCount > fetchBatchCount) {
        log.warn("Rebatched in getRows a call to " + tableName + " that attempted to multiget " + rowCount
                + " rows; this may indicate overly-large batching on a higher level.\n"
                + CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
    }
    return result;
}

From source file:com.palantir.atlasdb.cleaner.Scrubber.java

void scrubImmediately(final TransactionManager txManager,
        final Multimap<String, Cell> tableNameToCell, final long scrubTimestamp, final long commitTimestamp) {
    if (log.isInfoEnabled()) {
        log.info("Scrubbing a total of " + tableNameToCell.size() + " cells immediately.");
    }/*from   w  w  w .j  av  a 2  s  .  com*/

    // Note that if the background scrub thread is also running at the same time, it will try to scrub
    // the same cells as the current thread (since these cells were queued for scrubbing right before
    // the hard delete transaction committed; while this is unfortunate (because it means we will be
    // doing more work than necessary), the behavior is still correct
    long nextImmutableTimestamp;
    while ((nextImmutableTimestamp = immutableTimestampSupplier.get()) < commitTimestamp) {
        try {
            if (log.isInfoEnabled()) {
                log.info(String.format(
                        "Sleeping because immutable timestamp %d has not advanced to at least commit timestamp %d",
                        nextImmutableTimestamp, commitTimestamp));
            }
            Thread.sleep(AtlasDbConstants.SCRUBBER_RETRY_DELAY_MILLIS);
        } catch (InterruptedException e) {
            log.error("Interrupted while waiting for immutableTimestamp to advance past commitTimestamp", e);
        }
    }

    List<Future<Void>> scrubFutures = Lists.newArrayList();
    for (List<Entry<String, Cell>> batch : Iterables.partition(tableNameToCell.entries(),
            batchSizeSupplier.get())) {
        final Multimap<String, Cell> batchMultimap = HashMultimap.create();
        for (Entry<String, Cell> e : batch) {
            batchMultimap.put(e.getKey(), e.getValue());
        }

        final Callable<Void> c = new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                if (log.isInfoEnabled()) {
                    log.info("Scrubbing " + batchMultimap.size() + " cells immediately.");
                }

                // Here we don't need to check scrub timestamps because we guarantee that scrubImmediately is called
                // AFTER the transaction commits
                scrubCells(txManager, batchMultimap, scrubTimestamp, TransactionType.AGGRESSIVE_HARD_DELETE);

                Multimap<Cell, Long> cellToScrubTimestamp = HashMultimap.create();

                cellToScrubTimestamp = Multimaps.invertFrom(
                        Multimaps.index(batchMultimap.values(), Functions.constant(scrubTimestamp)),
                        cellToScrubTimestamp);

                scrubberStore.markCellsAsScrubbed(cellToScrubTimestamp, batchSizeSupplier.get());

                if (log.isInfoEnabled()) {
                    log.info("Completed scrub immediately.");
                }
                return null;
            }
        };
        if (!inScrubThread.get()) {
            scrubFutures.add(exec.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    inScrubThread.set(true);
                    c.call();
                    return null;
                }
            }));
        } else {
            try {
                c.call();
            } catch (Exception e) {
                throw Throwables.throwUncheckedException(e);
            }
        }
    }

    for (Future<Void> future : scrubFutures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw Throwables.throwUncheckedException(e);
        } catch (ExecutionException e) {
            throw Throwables.rewrapAndThrowUncheckedException(e);
        }
    }
}

From source file:com.b2international.snowowl.snomed.datastore.id.cis.CisSnomedIdentifierService.java

@Override
public void deprecate(final Set<String> componentIds) {
    LOGGER.debug("Deprecating {} component IDs.", componentIds.size());

    final Map<String, SctId> sctIds = getSctIds(componentIds);
    final Map<String, SctId> problemSctIds = ImmutableMap.copyOf(Maps.filterValues(sctIds,
            Predicates.<SctId>not(Predicates.or(SctId::isAssigned, SctId::isPublished, SctId::isDeprecated))));

    if (!problemSctIds.isEmpty()) {
        throw new SctIdStatusException(
                "Cannot deprecate %s component IDs because they are not assigned, published, or already deprecated.",
                problemSctIds);/*from   w w w  .  jav  a2 s.  c o  m*/
    }

    final Map<String, SctId> assignedOrPublishedSctIds = ImmutableMap
            .copyOf(Maps.filterValues(sctIds, Predicates.or(SctId::isAssigned, SctId::isPublished)));

    if (assignedOrPublishedSctIds.isEmpty()) {
        return;
    }

    HttpPut deprecateRequest = null;
    String currentNamespace = null;

    try {

        if (assignedOrPublishedSctIds.size() > 1) {
            final Multimap<String, String> componentIdsByNamespace = toNamespaceMultimap(
                    assignedOrPublishedSctIds.keySet());
            for (final Entry<String, Collection<String>> entry : componentIdsByNamespace.asMap().entrySet()) {
                currentNamespace = entry.getKey();

                for (final Collection<String> bulkIds : Iterables.partition(entry.getValue(), BULK_LIMIT)) {
                    LOGGER.debug(
                            String.format("Sending bulk deprecation request for namespace %s with size %d.",
                                    currentNamespace, bulkIds.size()));
                    deprecateRequest = httpPut(String.format("sct/bulk/deprecate?token=%s", getToken()),
                            createBulkDeprecationData(currentNamespace, bulkIds));
                    execute(deprecateRequest);
                }
            }

        } else {

            final String componentId = Iterables.getOnlyElement(assignedOrPublishedSctIds.keySet());
            currentNamespace = SnomedIdentifiers.getNamespace(componentId);
            deprecateRequest = httpPut(String.format("sct/deprecate?token=%s", getToken()),
                    createDeprecationData(componentId));
            execute(deprecateRequest);
        }

    } catch (IOException e) {
        throw new SnowowlRuntimeException(
                String.format("Exception while deprecating IDs for namespace %s.", currentNamespace), e);
    } finally {
        release(deprecateRequest);
    }
}

From source file:org.locationtech.geogig.storage.postgresql.PGConflictsDatabase.java

@Override
public void removeConflicts(final @Nullable String ns, final Iterable<String> paths) {
    checkNotNull(paths, "paths is null");
    final String namespace = namespace(ns);

    final String sql = format("DELETE FROM %s WHERE repository = ? AND namespace = ? AND path = ANY(?)",
            conflictsTable);//from   w w  w . ja va 2 s  .  c  om

    try (Connection cx = PGStorage.newConnection(dataSource)) {
        cx.setAutoCommit(false);
        try (PreparedStatement ps = cx.prepareStatement(sql)) {
            final int partitionSize = 1000;
            Iterable<List<String>> partitions = Iterables.partition(paths, partitionSize);
            for (List<String> partition : partitions) {
                String[] pathsArg = partition.toArray(new String[partition.size()]);
                Array array = cx.createArrayOf("varchar", pathsArg);

                ps.clearParameters();
                ps.setInt(1, repositoryId);
                ps.setString(2, namespace);
                ps.setArray(3, array);
                ps.executeUpdate();
            }
            cx.commit();
        } catch (SQLException e) {
            cx.rollback();
            throw e;
        } finally {
            cx.setAutoCommit(true);
        }
    } catch (SQLException e) {
        throw propagate(e);
    }
}