Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.salesforce.ide.core.remote.MetadataStubExt.java

private List<FileProperties> tryOneByOne(List<ListMetadataQuery> queries, IProgressMonitor monitor)
        throws MonitorCanceledException, ForceRemoteException {
    List<FileProperties> filePropertiesSubList = new ArrayList<FileProperties>();
    for (List<ListMetadataQuery> listofOneQuery : Lists.partition(Lists.newArrayList(queries), 1)) {
        try {/* www .  j a va  2 s  .  c  o  m*/
            filePropertiesSubList.addAll(getFileProperties(listofOneQuery, monitor));
        } catch (ConnectionException e) {
            if (e instanceof SoapFaultException) {
                logger.warn(e.getLocalizedMessage());
            } else if (ForceExceptionUtils.isReadTimeoutException(e)) {
                logTimeout(listofOneQuery.get(0));
            } else {
                ForceExceptionUtils.throwTranslatedException(e, connection);
            }
        }
    }
    return filePropertiesSubList;
}

From source file:org.sonar.ide.eclipse.internal.ui.views.MeasuresView.java

public List<ISonarMeasure> getMeasures(EclipseSonar index, ISonarResource sonarResource) {
    List<ISonarMeasure> result = Lists.newArrayList();
    Map<String, Metric> metricsByKey = getMetrics(index);
    List<String> keys = Lists.newArrayList(metricsByKey.keySet());
    /*/*from  ww w .  j  a v  a  2s .  c o m*/
     * Workaround for http://jira.codehaus.org/browse/SONAR-2430
     * Split list of metrics into small chunks and load measures by performing several queries.
     */
    for (List<String> keysForRequest : Lists.partition(keys, 5)) {
        String[] metricKeys = keysForRequest.toArray(new String[keysForRequest.size()]);
        ResourceQuery query = ResourceQuery.createForMetrics(sonarResource.getKey(), metricKeys)
                .setIncludeTrends(true);
        Resource resource = index.getSonar().find(query);

        for (Measure measure : resource.getMeasures()) {
            final Metric metric = metricsByKey.get(measure.getMetricKey());
            // Hacks around SONAR-1620
            if (!metric.getHidden() && !"DATA".equals(metric.getType())
                    && StringUtils.isNotBlank(measure.getFormattedValue())) {
                result.add(SonarCorePlugin.createSonarMeasure(sonarResource, metric, measure));
            }
        }
    }
    return result;
}

From source file:edu.harvard.med.screensaver.ui.libraries.LibraryCopyPlateSearchResults.java

private void initialize(EntityDataFetcher<Plate, Integer> plateDataFetcher) {
    initialize(new InMemoryEntityDataModel<Plate, Integer, Plate>(plateDataFetcher) {
        private Predicate<TableColumn<Plate, ?>> isScreeningStatisticsColumnWithCriteria = new Predicate<TableColumn<Plate, ?>>() {
            @Override/*from   w  w w.j  av  a  2s  . co m*/
            public boolean apply(TableColumn<Plate, ?> column) {
                return screeningStatisticColumns.contains(column) && column.hasCriteria();
            }
        };
        private Function<List<Plate>, Void> calculatePlateScreeningStatistics = new Function<List<Plate>, Void>() {
            @Override
            public Void apply(List<Plate> plates) {
                _librariesDao.calculatePlateScreeningStatistics(plates);
                return null;
            }
        };
        private Predicate<TableColumn<Plate, ?>> isVolumeStatisticsColumnWithCriteria = new Predicate<TableColumn<Plate, ?>>() {
            @Override
            public boolean apply(TableColumn<Plate, ?> column) {
                return volumeStatisticColumns.contains(column) && column.hasCriteria();
            }
        };
        private Function<List<Copy>, Void> calculateCopyScreeningStatistics = new Function<List<Copy>, Void>() {
            @Override
            public Void apply(List<Copy> copies) {
                _librariesDao.calculateCopyScreeningStatistics(copies);
                return null;
            }
        };

        @Override
        public void fetch(List<? extends TableColumn<Plate, ?>> columns) {
            // add fetch properties that are needed for review message generation
            if (columns.size() > 0) {
                ((HasFetchPaths<Plate>) columns.get(0)).addRelationshipPath(Plate.location);
                ((HasFetchPaths<Plate>) columns.get(0)).addRelationshipPath(Plate.copy.to(Copy.library));
            }
            ((HasFetchPaths<Plate>) columns.get(0))
                    .addRelationshipPath(RelationshipPath.from(Plate.class).to("updateActivities"));

            super.fetch(columns);
        }

        @Override
        public void filter(List<? extends TableColumn<Plate, ?>> columns) {
            if (_mode == Mode.ALL && !hasCriteriaDefined(getColumnManager().getAllColumns())) {
                setWrappedData(Collections.EMPTY_LIST); // for memory performance, initialize with an empty list.
            } else {
                boolean calcScreeningStatisticsBeforeFiltering = Iterables.any(columns,
                        isScreeningStatisticsColumnWithCriteria);
                boolean calcVolumeStatisticsBeforeFiltering = Iterables.any(columns,
                        isVolumeStatisticsColumnWithCriteria);
                if (calcScreeningStatisticsBeforeFiltering) {
                    calculateScreeningStatistics(_unfilteredData);
                }
                if (calcVolumeStatisticsBeforeFiltering) {
                    calculateVolumeStatistics(_unfilteredData);
                }

                super.filter(columns);

                if (!calcScreeningStatisticsBeforeFiltering) {
                    calculateScreeningStatistics(_unfilteredData);
                }
                if (!calcVolumeStatisticsBeforeFiltering) {
                    calculateVolumeStatistics(_unfilteredData);
                }
            }

            updateReviewMessage();
        }

        private boolean hasCriteriaDefined(List<? extends TableColumn<?, ?>> columns) {
            for (TableColumn<?, ?> column : columns) {
                if (column.hasCriteria())
                    return true;
            }
            return false;
        }

        private void calculateScreeningStatistics(Iterable<Plate> plates) {
            List<Plate> platesWithoutStatistics = Lists
                    .newArrayList(Iterables.filter(plates, PlateScreeningStatisticsNotInitialized));
            for (List<Plate> partition : Lists.partition(platesWithoutStatistics, 1024)) {
                _librariesDao.calculatePlateScreeningStatistics(partition);
            }
        }

        private void calculateVolumeStatistics(Iterable<Plate> plates) {
            List<Plate> platesWithoutStatistics = Lists
                    .newArrayList(Iterables.filter(plates, PlateVolumeStatisticsNotInitialized));
            for (List<Plate> partition : Lists.partition(platesWithoutStatistics, 1024)) {
                _librariesDao.calculatePlateVolumeStatistics(partition);
            }
        }
    });
    _libraryCopyPlateBatchEditor.initialize();
}

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@Nonnull
@Override/*  w w  w  .ja  v a2s . co  m*/
@Transactional(readOnly = true)
public Map<String, ObjectNode> getDataMetadataMap(@Nonnull final List<String> uris) {
    final Map<String, ObjectNode> result = Maps.newHashMap();
    if (!uris.isEmpty()) {
        final List<List<String>> parts = Lists.partition(uris, config.getUserMetadataMaxInClauseItems());
        parts.forEach(keys -> result.putAll(_getMetadataMap(keys, SQL.GET_DATA_METADATAS)));
    }
    return result;
}

From source file:hu.bme.mit.trainbenchmark.benchmark.fourstore.driver.FourStoreDriver.java

public void deleteVertices(final List<String> uris) throws IOException {
    if (uris.isEmpty()) {
        return;// w ww .  j a  v a2 s . c o  m
    }

    final List<List<String>> partitions = Lists.partition(uris, PARTITION_SIZE);
    for (final List<String> partition : partitions) {
        deleteVertexPartition(partition);
    }
}

From source file:dagger.internal.codegen.ComponentModelBuilder.java

private void addConstructor() {
    List<List<CodeBlock>> partitions = Lists.partition(generatedComponentModel.getInitializations(),
            INITIALIZATIONS_PER_INITIALIZE_METHOD);

    ImmutableList<ParameterSpec> constructorParameters = constructorParameters();
    MethodSpec.Builder constructor = constructorBuilder()
            .addModifiers(generatedComponentModel.isAbstract() ? PROTECTED : PRIVATE)
            .addParameters(constructorParameters);

    if (generatedComponentModel.supermodel().isPresent()) {
        constructor.addStatement(CodeBlock.of("super($L)", constructorParameters.stream()
                .map(param -> CodeBlock.of("$N", param)).collect(toParametersCodeBlock())));
    }//from  www .  ja va 2  s .  c  om

    ImmutableList<ParameterSpec> initializeParameters = initializeParameters();
    CodeBlock initializeParametersCodeBlock = constructorParameters.stream()
            .map(param -> CodeBlock.of("$N", param)).collect(toParametersCodeBlock());

    UniqueNameSet methodNames = new UniqueNameSet();
    for (List<CodeBlock> partition : partitions) {
        String methodName = methodNames.getUniqueName("initialize");
        MethodSpec.Builder initializeMethod = methodBuilder(methodName).addModifiers(PRIVATE)
                /* TODO(gak): Strictly speaking, we only need the suppression here if we are also
                 * initializing a raw field in this method, but the structure of this code makes it
                 * awkward to pass that bit through.  This will be cleaned up when we no longer
                 * separate fields and initilization as we do now. */
                .addAnnotation(AnnotationSpecs.suppressWarnings(UNCHECKED))
                .addCode(CodeBlocks.concat(partition));
        initializeMethod.addParameters(initializeParameters);
        constructor.addStatement("$L($L)", methodName, initializeParametersCodeBlock);
        generatedComponentModel.addMethod(INITIALIZE_METHOD, initializeMethod.build());
    }
    generatedComponentModel.addMethod(CONSTRUCTOR, constructor.build());
}

From source file:org.eclipse.hawkbit.repository.jpa.JpaDeploymentManagement.java

/**
 * method assigns the {@link DistributionSet} to all {@link Target}s by
 * their IDs with a specific {@link ActionType} and {@code forcetime}.
 * //from  www . ja v  a 2 s .c o  m
 * 
 * In case the update was executed offline (i.e. not managed by hawkBit) the
 * handling differs my means that:<br/>
 * A. it ignores targets completely that are in
 * {@link TargetUpdateStatus#PENDING}.<br/>
 * B. it created completed actions.<br/>
 * C. sets both installed and assigned DS on the target and switches the
 * status to {@link TargetUpdateStatus#IN_SYNC} <br/>
 * D. does not send a {@link TargetAssignDistributionSetEvent}.<br/>
 *
 * @param dsID
 *            the ID of the distribution set to assign
 * @param targetsWithActionType
 *            a list of all targets and their action type
 * @param actionMessage
 *            an optional message to be written into the action status
 * @param assignmentStrategy
 *            the assignment strategy (online /offline)
 * @return the assignment result
 *
 * @throw IncompleteDistributionSetException if mandatory
 *        {@link SoftwareModuleType} are not assigned as define by the
 *        {@link DistributionSetType}.
 */
private DistributionSetAssignmentResult assignDistributionSetToTargets(final Long dsID,
        final Collection<TargetWithActionType> targetsWithActionType, final String actionMessage,
        final AbstractDsAssignmentStrategy assignmentStrategy) {

    final JpaDistributionSet distributionSetEntity = getAndValidateDsById(dsID);
    final List<String> controllerIDs = getControllerIdsForAssignmentAndCheckQuota(targetsWithActionType,
            distributionSetEntity);
    final List<JpaTarget> targetEntities = assignmentStrategy.findTargetsForAssignment(controllerIDs,
            distributionSetEntity.getId());

    if (targetEntities.isEmpty()) {
        // detaching as it is not necessary to persist the set itself
        entityManager.detach(distributionSetEntity);
        // return with nothing as all targets had the DS already assigned
        return new DistributionSetAssignmentResult(Collections.emptyList(), 0, targetsWithActionType.size(),
                Collections.emptyList(), targetManagement);
    }

    // split tIDs length into max entries in-statement because many database
    // have constraint of max entries in in-statements e.g. Oracle with
    // maximum 1000 elements, so we need to split the entries here and
    // execute multiple statements
    final List<List<Long>> targetEntitiesIdsChunks = Lists.partition(
            targetEntities.stream().map(Target::getId).collect(Collectors.toList()),
            Constants.MAX_ENTRIES_IN_STATEMENT);

    // override all active actions and set them into canceling state, we
    // need to remember which one we have been switched to canceling state
    // because for targets which we have changed to canceling we don't want
    // to publish the new action update event.
    final Set<Long> cancelingTargetEntitiesIds = closeOrCancelActiveActions(assignmentStrategy,
            targetEntitiesIdsChunks);
    // cancel all scheduled actions which are in-active, these actions were
    // not active before and the manual assignment which has been done
    // cancels them
    targetEntitiesIdsChunks.forEach(this::cancelInactiveScheduledActionsForTargets);

    setAssignedDistributionSetAndTargetUpdateStatus(assignmentStrategy, distributionSetEntity,
            targetEntitiesIdsChunks);

    final Map<String, JpaAction> controllerIdsToActions = createActions(targetsWithActionType, targetEntities,
            assignmentStrategy, distributionSetEntity);
    // create initial action status when action is created so we remember
    // the initial running status because we will change the status
    // of the action itself and with this action status we have a nicer
    // action history.
    createActionsStatus(controllerIdsToActions.values(), assignmentStrategy, actionMessage);

    detachEntitiesAndSendAssignmentEvents(distributionSetEntity, targetEntities, assignmentStrategy,
            cancelingTargetEntitiesIds, controllerIdsToActions);

    return new DistributionSetAssignmentResult(
            targetEntities.stream().map(Target::getControllerId).collect(Collectors.toList()),
            targetEntities.size(), controllerIDs.size() - targetEntities.size(),
            Lists.newArrayList(controllerIdsToActions.values()), targetManagement);
}

From source file:com.flipkart.foxtrot.core.querystore.impl.ElasticsearchQueryStore.java

@Override
public void cleanup(Set<String> tables) throws QueryStoreException {
    List<String> indicesToDelete = new ArrayList<String>();
    try {//from  ww w  . java2 s  .co  m
        IndicesStatusResponse response = connection.getClient().admin().indices().prepareStatus().execute()
                .actionGet();
        Set<String> currentIndices = response.getIndices().keySet();

        for (String currentIndex : currentIndices) {
            String table = ElasticsearchUtils.getTableNameFromIndex(currentIndex);
            if (table != null && tables.contains(table)) {
                boolean indexEligibleForDeletion;
                try {
                    indexEligibleForDeletion = ElasticsearchUtils.isIndexEligibleForDeletion(currentIndex,
                            tableMetadataManager.get(table));
                    if (indexEligibleForDeletion) {
                        logger.warn(String.format("Index eligible for deletion : %s", currentIndex));
                        indicesToDelete.add(currentIndex);
                    }
                } catch (Exception ex) {
                    logger.error(String.format("Unable to Get Table details for Table : %s", table), ex);
                }
            }
        }
        logger.warn(String.format("Deleting Indexes - Indexes - %s", indicesToDelete));
        if (indicesToDelete.size() > 0) {
            List<List<String>> subLists = Lists.partition(indicesToDelete, 5);
            for (List<String> subList : subLists) {
                try {
                    connection.getClient().admin().indices()
                            .prepareDelete(subList.toArray(new String[subList.size()])).execute()
                            .actionGet(TimeValue.timeValueMinutes(5));
                    logger.warn(String.format("Deleted Indexes - Indexes - %s", subList));
                } catch (Exception e) {
                    logger.error(String.format("Index deletion failed - Indexes - %s", subList), e);
                }
            }
        }
    } catch (Exception ex) {
        logger.error(String.format("Unable to delete Indexes - %s", indicesToDelete), ex);
        throw new QueryStoreException(QueryStoreException.ErrorCode.DATA_CLEANUP_ERROR,
                String.format("Unable to delete Indexes - %s", indicesToDelete), ex);
    }
}

From source file:com.b2international.snowowl.snomed.api.impl.SnomedBrowserService.java

@Override
public ISnomedBrowserBulkChangeRun getBulkChange(String branch, String bulkId, List<ExtendedLocale> locales,
        Options expand) {/*from   w w w . j a va2s  . c om*/

    SnomedBrowserBulkChangeRun run = bulkChangeRuns.getIfPresent(bulkId);

    if (run != null && run.getConceptIds() != null && expand.containsKey("concepts")) {

        if (CompareUtils.isEmpty(run.getConceptIds())) {
            run.setConcepts(Collections.emptyList());
        } else {

            LOGGER.info(">>> Collecting bulk concept create / update results on {}", branch);

            Map<String, ISnomedBrowserConcept> allConcepts = newHashMap();

            for (List<String> conceptIdPartitions : Lists.partition(run.getConceptIds(), 1000)) {
                Set<ISnomedBrowserConcept> concepts = getConceptDetailsInBulk(branch,
                        ImmutableSet.copyOf(conceptIdPartitions), locales);
                allConcepts.putAll(concepts.stream().collect(toMap(ISnomedBrowserConcept::getId, c -> c)));
            }

            run.setConcepts(run.getConceptIds().stream().map(allConcepts::get).collect(toList())); // keep the order

            LOGGER.info("<<< Bulk concept create / update results are ready on {}", branch);
        }

    }

    return run;
}

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Nonnull
@Override/*  w w  w . jav  a 2s .  c  o  m*/
public Map<String, ObjectNode> getDefinitionMetadataMap(@Nonnull final List<QualifiedName> names) {
    if (!names.isEmpty()) {
        final List<List<QualifiedName>> parts = Lists.partition(names,
                config.getUserMetadataMaxInClauseItems());
        return parts.stream().map(keys -> _getMetadataMap(keys, SQL.GET_DEFINITION_METADATAS))
                .flatMap(it -> it.entrySet().stream()).collect(Collectors
                        .toMap(it -> QualifiedName.fromString(it.getKey()).toString(), Map.Entry::getValue));
    } else {
        return Collections.emptyMap();
    }
}