Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:com.google.walkaround.wave.server.googleimport.FindRemoteWavesProcessor.java

private void storeResults(List<RemoteConvWavelet> results) throws PermanentFailure {
    for (final List<RemoteConvWavelet> partition : Iterables.partition(results, MAX_WAVELETS_PER_TRANSACTION)) {
        new RetryHelper().run(new RetryHelper.VoidBody() {
            @Override/* w w  w.  ja  v a 2  s.co m*/
            public void run() throws RetryableFailure, PermanentFailure {
                CheckedTransaction tx = datastore.beginTransaction();
                try {
                    if (perUserTable.addRemoteWavelets(tx, userId, partition)) {
                        tx.commit();
                    }
                } finally {
                    tx.close();
                }
            }
        });
    }
    log.info("Successfully added " + results.size() + " remote waves");
}

From source file:org.obm.push.mail.EmailChanges.java

public Iterable<EmailChanges> partition(int windowSize) {
    Preconditions.checkArgument(windowSize > 0);
    if (sumOfChanges() == 0) {
        return ImmutableList.<EmailChanges>of();
    }/*from   ww  w .  j  a va 2s.  co  m*/
    if (sumOfChanges() < windowSize) {
        return ImmutableList.of(this);
    }
    return FluentIterable.from(Iterables.partition(toEntries(), windowSize))
            .transform(new Function<List<EmailPartitionEntry>, EmailChanges>() {
                @Override
                public EmailChanges apply(List<EmailPartitionEntry> input) {
                    return EmailChanges.fromEntries(input);
                }
            });
}

From source file:com.google.devtools.build.lib.skyframe.RecursivePackageProviderBackedTargetPatternResolver.java

private <E extends Exception> void findTargetsBeneathDirectoryParImpl(final RepositoryName repository,
        final String originalPattern, String directory, boolean rulesOnly,
        ImmutableSet<PathFragment> excludedSubdirectories, final ThreadSafeBatchCallback<Target, E> callback,
        Class<E> exceptionClass, ExecutorService executor)
        throws TargetParsingException, E, InterruptedException {
    final FilteringPolicy actualPolicy = rulesOnly ? FilteringPolicies.and(FilteringPolicies.RULES_ONLY, policy)
            : policy;/*from w  w  w .ja  v a2  s  .c  o  m*/
    PathFragment pathFragment = TargetPatternResolverUtil.getPathFragment(directory);
    Iterable<PathFragment> packagesUnderDirectory = recursivePackageProvider
            .getPackagesUnderDirectory(repository, pathFragment, excludedSubdirectories);

    Iterable<PackageIdentifier> pkgIds = Iterables.transform(packagesUnderDirectory,
            new Function<PathFragment, PackageIdentifier>() {
                @Override
                public PackageIdentifier apply(PathFragment path) {
                    return PackageIdentifier.create(repository, path);
                }
            });
    final AtomicBoolean foundTarget = new AtomicBoolean(false);

    // For very large sets of packages, we may not want to process all of them at once, so we split
    // into batches.
    List<List<PackageIdentifier>> partitions = ImmutableList
            .copyOf(Iterables.partition(pkgIds, MAX_PACKAGES_BULK_GET));
    ArrayList<Future<Void>> tasks = new ArrayList<>(partitions.size());
    for (final Iterable<PackageIdentifier> pkgIdBatch : partitions) {
        tasks.add(executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws E, TargetParsingException, InterruptedException {
                ImmutableSet<PackageIdentifier> pkgIdBatchSet = ImmutableSet.copyOf(pkgIdBatch);
                packageSemaphore.acquireAll(pkgIdBatchSet);
                try {
                    Iterable<ResolvedTargets<Target>> resolvedTargets = bulkGetTargetsInPackage(originalPattern,
                            pkgIdBatch, NO_FILTER).values();
                    List<Target> filteredTargets = new ArrayList<>(calculateSize(resolvedTargets));
                    for (ResolvedTargets<Target> targets : resolvedTargets) {
                        for (Target target : targets.getTargets()) {
                            // Perform the no-targets-found check before applying the filtering policy
                            // so we only return the error if the input directory's subtree really
                            // contains no targets.
                            foundTarget.set(true);
                            if (actualPolicy.shouldRetain(target, false)) {
                                filteredTargets.add(target);
                            }
                        }
                    }
                    callback.process(filteredTargets);
                } finally {
                    packageSemaphore.releaseAll(pkgIdBatchSet);
                }
                return null;
            }
        }));
    }
    try {
        MoreFutures.waitForAllInterruptiblyFailFast(tasks);
    } catch (ExecutionException e) {
        Throwables.propagateIfPossible(e.getCause(), exceptionClass);
        Throwables.propagateIfPossible(e.getCause(), TargetParsingException.class, InterruptedException.class);
        throw new IllegalStateException(e);
    }
    if (!foundTarget.get()) {
        throw new TargetParsingException("no targets found beneath '" + pathFragment + "'");
    }
}

From source file:com.google.walkaround.wave.server.googleimport.FindRemoteWavesProcessor.java

private void scheduleFindWaveletTasks(final SourceInstance instance, List<RobotSearchDigest> results,
        @Nullable final ImportSettings autoImportSettings) throws PermanentFailure {
    for (final List<RobotSearchDigest> partition : Iterables.partition(results,
            // 5 tasks per transaction.
            5)) {/*from   w  w w .  j ava  2 s .  c om*/
        new RetryHelper().run(new RetryHelper.VoidBody() {
            @Override
            public void run() throws RetryableFailure, PermanentFailure {
                CheckedTransaction tx = datastore.beginTransaction();
                try {
                    for (RobotSearchDigest result : partition) {
                        FindWaveletsForRemoteWaveTask task = new FindWaveletsForRemoteWaveTaskGsonImpl();
                        task.setInstance(instance.serialize());
                        task.setWaveDigest(result);
                        if (autoImportSettings != null) {
                            task.setAutoImportSettings(autoImportSettings);
                        }
                        ImportTaskPayload payload = new ImportTaskPayloadGsonImpl();
                        payload.setFindWaveletsTask(task);
                        perUserTable.addTask(tx, userId, payload);
                    }
                    tx.commit();
                } finally {
                    tx.close();
                }
            }
        });
    }
    log.info("Successfully scheduled import of " + results.size() + " waves");
}

From source file:com.eucalyptus.portal.SimpleQueueClientManager.java

public List<Message> receiveAllMessages(final String queueName, final boolean shouldDelete) throws Exception {
    try {/*from   w  ww.j a v a 2 s .  c o m*/
        final int visibilityTimeout = 600;
        final int visibilityBuffer = 300;
        final long startTime = System.currentTimeMillis();
        final List<Message> messages = Lists.newArrayList();
        while ((System.currentTimeMillis() - startTime) < ((visibilityTimeout - visibilityBuffer) * 1000L)) {
            final ReceiveMessageRequest req = new ReceiveMessageRequest();
            req.setQueueUrl(getQueueUrl(queueName));
            req.setMaxNumberOfMessages(10);
            req.setWaitTimeSeconds(0);
            req.setVisibilityTimeout(visibilityTimeout);

            final ReceiveMessageResult result = getSimpleQueueClient().receiveMessage(req);
            final List<Message> received = result.getMessages();
            if (received == null || received.size() <= 0)
                break;
            messages.addAll(received);
        }

        // TODO: Use PurgeQueue
        if (shouldDelete) {
            for (final List<Message> partition : Iterables.partition(messages, 10)) {
                final DeleteMessageBatchRequest delReq = new DeleteMessageBatchRequest();
                delReq.setQueueUrl(getQueueUrl(queueName));
                delReq.setEntries(partition.stream().map(m -> new DeleteMessageBatchRequestEntry()
                        .withId(m.getMessageId()).withReceiptHandle(m.getReceiptHandle()))
                        .collect(Collectors.toList()));
                getSimpleQueueClient().deleteMessageBatch(delReq);
            }
        }
        return messages;
    } catch (final AmazonServiceException ex) {
        throw new Exception("Failed to receive messages due to service error", ex);
    } catch (final AmazonClientException ex) {
        throw new Exception("Failed to receive messages due to client error", ex);
    }
}

From source file:com.netflix.spinnaker.cats.redis.cache.RedisCache.java

@Override
public Collection<CacheData> getAll(String type, Collection<String> identifiers, CacheFilter cacheFilter) {
    if (identifiers.isEmpty()) {
        return Collections.emptySet();
    }/*  ww w  .  ja  v  a  2  s  .c om*/
    Collection<String> ids = new LinkedHashSet<>(identifiers);
    final List<String> knownRels;
    Set<String> allRelationships = scanMembers(allRelationshipsId(type));
    if (cacheFilter == null) {
        knownRels = new ArrayList<>(allRelationships);
    } else {
        knownRels = new ArrayList<>(cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, allRelationships));
    }

    Collection<CacheData> result = new ArrayList<>(ids.size());

    for (List<String> idPart : Iterables.partition(ids, options.getMaxGetBatchSize())) {
        result.addAll(getItems(type, idPart, knownRels));
    }

    return result;
}

From source file:org.dcache.alarms.dao.impl.DataNucleusLogEntryStore.java

@Override
public long update(Collection<LogEntry> selected) {
    if (selected.isEmpty()) {
        return 0;
    }//from   w ww .j  a v  a 2  s . co  m

    PersistenceManager updateManager = pmf.getPersistenceManager();
    if (updateManager == null) {
        return 0;
    }

    /**
     * Too many object updates in a single transaction will
     * cause errors, even StackOverflow exceptions.
     * Break up into smaller batches.
     */
    long[] total = new long[] { 0L };

    try {
        Iterables.partition(selected, 100).forEach((partition) -> {
            total[0] += update(partition, updateManager);
        });
    } finally {
        updateManager.close();
    }

    return total[0];
}

From source file:com.google.walkaround.wave.server.googleimport.FindRemoteWavesProcessor.java

private void scheduleImportTasks(List<RemoteConvWavelet> results, final ImportSettings autoImportSettings)
        throws PermanentFailure {
    for (final List<RemoteConvWavelet> partition : Iterables.partition(results,
            // 5 tasks per transaction.
            5)) {/*from   w ww  .  j a v  a2s  .  co  m*/
        new RetryHelper().run(new RetryHelper.VoidBody() {
            @Override
            public void run() throws RetryableFailure, PermanentFailure {
                CheckedTransaction tx = datastore.beginTransaction();
                try {
                    for (RemoteConvWavelet wavelet : partition) {
                        ImportWaveletTask task = new ImportWaveletTaskGsonImpl();
                        task.setInstance(wavelet.getSourceInstance().serialize());
                        task.setWaveId(wavelet.getDigest().getWaveId());
                        task.setWaveletId(wavelet.getWaveletId().serialise());
                        task.setSettings(autoImportSettings);
                        ImportTaskPayload payload = new ImportTaskPayloadGsonImpl();
                        payload.setImportWaveletTask(task);
                        perUserTable.addTask(tx, userId, payload);
                    }
                    tx.commit();
                } finally {
                    tx.close();
                }
            }
        });
    }
    log.info("Successfully scheduled import of " + results.size() + " waves");
}

From source file:com.eucalyptus.cloudwatch.common.internal.domain.listmetrics.ListMetricManager.java

public static void addMetricBatch(List<ListMetric> dataBatch) {
    // sort the collection by common items to require fewer lookups
    Multimap<PrefetchFields, ListMetric> dataBatchPrefetchMap = LinkedListMultimap.create();
    for (final ListMetric item : dataBatch) {
        PrefetchFields prefetchFields = new PrefetchFields(item.getAccountId(), item.getNamespace(),
                item.getMetricName());//ww w .j  av a 2  s.com
        dataBatchPrefetchMap.put(prefetchFields, item);
    }
    // do db stuff in a certain number of operations per connection
    for (List<PrefetchFields> prefetchFieldsListPartial : Iterables.partition(dataBatchPrefetchMap.keySet(),
            LIST_METRIC_NUM_DB_OPERATIONS_PER_TRANSACTION)) {
        try (final TransactionResource db = Entities.transactionFor(ListMetric.class)) {
            int numOperations = 0;
            for (PrefetchFields prefetchFields : prefetchFieldsListPartial) {
                // Prefetch all list metrics with same metric name/namespace/account id
                Map<NonPrefetchFields, ListMetric> dataCache = Maps.newHashMap();
                Criteria criteria = Entities.createCriteria(ListMetric.class)
                        .add(Restrictions.eq("accountId", prefetchFields.getAccountId()))
                        .add(Restrictions.eq("namespace", prefetchFields.getNamespace()))
                        .add(Restrictions.eq("metricName", prefetchFields.getMetricName()));
                List<ListMetric> results = (List<ListMetric>) criteria.list();
                for (ListMetric result : results) {
                    dataCache.put(new NonPrefetchFields(result.getMetricType(), result.getDimensionMap()),
                            result);
                }
                for (ListMetric listMetric : dataBatchPrefetchMap.get(prefetchFields)) {
                    NonPrefetchFields cacheKey = new NonPrefetchFields(listMetric.getMetricType(),
                            listMetric.getDimensionMap());
                    if (dataCache.containsKey(cacheKey)) {
                        dataCache.get(cacheKey).updateTimeStamps();
                    } else {
                        Entities.persist(listMetric);
                        dataCache.put(cacheKey, listMetric);
                    }
                }
                numOperations++;
                if (numOperations % LIST_METRIC_NUM_DB_OPERATIONS_UNTIL_SESSION_FLUSH == 0) {
                    Entities.flushSession(ListMetric.class);
                    Entities.clearSession(ListMetric.class);
                }
            }
            db.commit();
        }
    }
}

From source file:google.registry.rdap.RdapDomainSearchAction.java

/**
 * Locates all domains which are linked to a set of host keys.
 *
 * <p>This method is called by {@link #searchByNameserverLdhName} and
 * {@link #searchByNameserverIp} after they assemble the relevant host keys.
 */// w  w w  .  j  a v  a 2  s . c  om
private RdapSearchResults searchByNameserverRefs(final Iterable<Key<HostResource>> hostKeys,
        final DateTime now) {
    // We must break the query up into chunks, because the in operator is limited to 30 subqueries.
    // Since it is possible for the same domain to show up more than once in our result list (if
    // we do a wildcard nameserver search that returns multiple nameservers used by the same
    // domain), we must create a set of resulting {@link DomainResource} objects. But we use a
    // LinkedHashSet to preserve the order in which we found the domains.
    LinkedHashSet<DomainResource> domains = new LinkedHashSet<>();
    for (List<Key<HostResource>> chunk : Iterables.partition(hostKeys, 30)) {
        for (DomainResource domain : ofy().load().type(DomainResource.class).filter("nsHosts in", chunk)
                .filter("deletionTime >", now).limit(rdapResultSetMaxSize + 1)) {
            if (!domains.contains(domain)) {
                if (domains.size() >= rdapResultSetMaxSize) {
                    return makeSearchResults(ImmutableList.copyOf(domains), true, now);
                }
                domains.add(domain);
            }
        }
    }
    return makeSearchResults(ImmutableList.copyOf(domains), false, now);
}