Example usage for com.google.common.collect Lists partition

List of usage examples for com.google.common.collect Lists partition

Introduction

In this page you can find the example usage for com.google.common.collect Lists partition.

Prototype

public static <T> List<List<T>> partition(List<T> list, int size) 

Source Link

Document

Returns consecutive List#subList(int,int) sublists of a list, each of the same size (the final list may be smaller).

Usage

From source file:com.netflix.simianarmy.aws.janitor.crawler.edda.EddaImageJanitorCrawler.java

private void addLastReferenceInfo(List<Resource> resources, long since) {
    Validate.notNull(resources);//from  w  w w  . j a  v  a 2s  . c  o  m
    LOGGER.info(String.format("Updating the latest reference info for %d images", resources.size()));
    Map<String, List<Resource>> regionToResources = Maps.newHashMap();
    for (Resource resource : resources) {
        List<Resource> regionalList = regionToResources.get(resource.getRegion());
        if (regionalList == null) {
            regionalList = Lists.newArrayList();
            regionToResources.put(resource.getRegion(), regionalList);
        }
        regionalList.add(resource);
    }
    //
    for (Map.Entry<String, List<Resource>> entry : regionToResources.entrySet()) {
        String region = entry.getKey();
        LOGGER.info(String.format("Updating the latest reference info for %d images in region %s",
                resources.size(), region));
        for (List<Resource> batch : Lists.partition(entry.getValue(), BATCH_SIZE)) {
            LOGGER.info(String.format("Processing batch of size %d", batch.size()));
            updateReferenceTimeByInstance(region, batch, since);
            updateReferenceTimeByLaunchConfig(region, batch, since);
        }
    }
}

From source file:com.google.cloud.dataflow.sdk.util.GcsUtil.java

List<BatchRequest> makeRemoveBatches(Collection<String> filenames) throws IOException {
    List<BatchRequest> batches = new LinkedList<>();
    for (List<String> filesToDelete : Lists.partition(Lists.newArrayList(filenames), MAX_REQUESTS_PER_BATCH)) {
        BatchRequest batch = storageClient.batch();
        for (String file : filesToDelete) {
            enqueueDelete(GcsPath.fromUri(file), batch);
        }//from   w  ww  . j ava 2  s  . c  o  m
        batches.add(batch);
    }
    return batches;
}

From source file:com.netflix.metacat.metadata.mysql.MysqlUserMetadataService.java

@SuppressWarnings("checkstyle:methodname")
private Map<String, ObjectNode> _getPartitionDefinitionMetadata(final List<QualifiedName> names) {
    final List<List<QualifiedName>> parts = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
    return parts.stream().map(keys -> _getMetadataMap(keys, SQL.GET_PARTITION_DEFINITION_METADATAS))
            .flatMap(it -> it.entrySet().stream()).collect(Collectors
                    .toMap(it -> QualifiedName.fromString(it.getKey()).toString(), Map.Entry::getValue));
}

From source file:org.eclipse.hawkbit.repository.jpa.JpaControllerManagement.java

private Void updateLastTargetQueries(final String tenant, final List<TargetPoll> polls) {
    LOG.debug("Persist {} targetqueries.", polls.size());

    final List<List<String>> pollChunks = Lists.partition(
            polls.stream().map(TargetPoll::getControllerId).collect(Collectors.toList()),
            Constants.MAX_ENTRIES_IN_STATEMENT);

    pollChunks.forEach(chunk -> {//from   ww  w.  j  a v  a2s.  c  o  m
        setLastTargetQuery(tenant, System.currentTimeMillis(), chunk);
        chunk.forEach(controllerId -> afterCommit.afterCommit(
                () -> eventPublisher.publishEvent(new TargetPollEvent(controllerId, tenant, bus.getId()))));
    });

    return null;
}

From source file:org.wso2.carbon.analytics.datasource.rdbms.RDBMSAnalyticsRecordStore.java

public AnalyticsIterator<Record> getRecords(int tenantId, String tableName, List<String> columns,
        List<String> ids) throws AnalyticsException, AnalyticsTableNotAvailableException {
    if (ids.isEmpty()) {
        return new EmptyResultSetAnalyticsIterator();
    }//from   w  w  w.java  2  s  .  co  m
    if (ids.size() > this.rdbmsQueryConfigurationEntry.getRecordBatchSize()) {
        List<List<String>> idsSubLists = Lists.partition(ids,
                this.rdbmsQueryConfigurationEntry.getRecordBatchSize());
        RDBMSIDsRecordGroup[] rdbmsIDsRecordGroups = new RDBMSIDsRecordGroup[idsSubLists.size()];
        int index = 0;
        for (List<String> idSubList : idsSubLists) {
            rdbmsIDsRecordGroups[index] = new RDBMSIDsRecordGroup(tenantId, tableName, columns, idSubList);
            index++;
        }
        return new RDBMSRecordIDListIterator(this, rdbmsIDsRecordGroups);
    }
    String recordGetSQL = this.generateGetRecordRetrievalWithIdQuery(tenantId, tableName, ids.size());
    Connection conn = null;
    PreparedStatement stmt = null;
    ResultSet rs = null;
    try {
        conn = this.getConnection();
        stmt = conn.prepareStatement(recordGetSQL);
        for (int i = 0; i < ids.size(); i++) {
            stmt.setString(i + 1, ids.get(i));
        }
        rs = stmt.executeQuery();
        return new RDBMSResultSetIterator(tenantId, tableName, columns, conn, stmt, rs);
    } catch (SQLException e) {
        if (conn != null && !this.tableExists(conn, tenantId, tableName)) {
            RDBMSUtils.cleanupConnection(rs, stmt, conn);
            throw new AnalyticsTableNotAvailableException(tenantId, tableName);
        } else {
            RDBMSUtils.cleanupConnection(rs, stmt, conn);
            throw new AnalyticsException("Error in retrieving records: " + e.getMessage(), e);
        }
    }
}

From source file:io.bitsquare.gui.main.disputes.trader.TraderDisputeView.java

@Override
protected void activate() {
    filterTextField.textProperty().addListener(filterTextFieldListener);
    disputeManager.cleanupDisputes();/*ww  w .  j a  v  a 2 s  .  c o m*/

    filteredList = new FilteredList<>(disputeManager.getDisputesAsObservableList());
    applyFilteredListPredicate(filterTextField.getText());

    sortedList = new SortedList<>(filteredList);
    sortedList.comparatorProperty().bind(tableView.comparatorProperty());
    tableView.setItems(sortedList);

    // sortedList.setComparator((o1, o2) -> o2.getOpeningDate().compareTo(o1.getOpeningDate()));
    selectedDisputeSubscription = EasyBind.subscribe(tableView.getSelectionModel().selectedItemProperty(),
            this::onSelectDispute);

    Dispute selectedItem = tableView.getSelectionModel().getSelectedItem();
    if (selectedItem != null)
        tableView.getSelectionModel().select(selectedItem);

    scrollToBottom();

    scene = root.getScene();
    if (scene != null)
        scene.addEventHandler(KeyEvent.KEY_RELEASED, keyEventEventHandler);

    // If doPrint=true we print out a html page which opens tabs with all deposit txs 
    // (firefox needs about:config change to allow > 20 tabs)
    // Useful to check if there any funds in not finished trades (no payout tx done).
    // Last check 10.02.2017 found 8 trades and we contacted all traders as far as possible (email if available 
    // otherwise in-app private notification)
    boolean doPrint = false;
    if (doPrint) {
        try {
            DateFormat formatter = new SimpleDateFormat("dd/MM/yy");
            Date startDate = formatter.parse("10/02/17");
            startDate = new Date(0); // print all from start

            HashMap<String, Dispute> map = new HashMap<>();
            disputeManager.getDisputesAsObservableList().stream().forEach(dispute -> {
                map.put(dispute.getDepositTxId(), dispute);
            });

            final Date finalStartDate = startDate;
            List<Dispute> disputes = new ArrayList<>(map.values());
            disputes.sort((o1, o2) -> o1.getOpeningDate().compareTo(o2.getOpeningDate()));
            List<List<Dispute>> subLists = Lists.partition(disputes, 1000);
            StringBuilder sb = new StringBuilder();
            subLists.stream().forEach(list -> {
                StringBuilder sb1 = new StringBuilder(
                        "\n<html><head><script type=\"text/javascript\">function load(){\n");
                StringBuilder sb2 = new StringBuilder("\n}</script></head><body onload=\"load()\">\n");
                list.stream().forEach(dispute -> {
                    if (dispute.getOpeningDate().after(finalStartDate)) {
                        String txId = dispute.getDepositTxId();
                        sb1.append("window.open(\"https://blockchain.info/tx/").append(txId)
                                .append("\", '_blank');\n");

                        sb2.append("Dispute ID: ").append(dispute.getId()).append(" Tx ID: ")
                                .append("<a href=\"https://blockchain.info/tx/").append(txId).append("\">")
                                .append(txId).append("</a> ").append("Opening date: ")
                                .append(formatter.format(dispute.getOpeningDate())).append("<br/>\n");
                    }
                });
                sb2.append("</body></html>");
                String res = sb1.toString() + sb2.toString();

                sb.append(res).append("\n\n\n");
            });
            log.info(sb.toString());
        } catch (ParseException ignore) {
        }
    }
}

From source file:org.sonar.css.checks.RuleDescriptionsGenerator.java

private String generateHtmlCssFunctionTable(List<StandardFunction> standardFunctions) {
    StringBuilder html = new StringBuilder("<table style=\"border: 0;\">\n");
    List<List<StandardFunction>> subLists = Lists.partition(standardFunctions, 3);
    for (List<StandardFunction> subList : subLists) {
        html.append("<tr>");
        for (StandardFunction standardCssFunction : subList) {
            List<String> links = standardCssFunction.getLinks().stream().filter(f -> !f.contains("lesscss.org"))
                    .collect(Collectors.toList());
            html.append("<td style=\"border: 0; \">");
            if (!links.isEmpty()) {
                html.append("<a target=\"_blank\" href=\"").append(links.get(0)).append("\">");
            }//  ww  w.  j a v  a 2  s.  c  om
            html.append("<code>").append(standardCssFunction.getName()).append("</code>");
            if (!links.isEmpty()) {
                html.append("</a>");
            }
            html.append("</code>");
            for (int i = 1; i < links.size(); i++) {
                html.append("&nbsp;&nbsp;<a target=\"_blank\" href=\"").append(links.get(i)).append("\">#")
                        .append(i + 1).append("</a>");
            }
            html.append("</td>\n");
        }
        html.append("</tr>");
    }
    html.append("</table>\n");
    return html.toString();
}

From source file:com.navercorp.pinpoint.common.hbase.HbaseTemplate2.java

@Override
public <T> List<T> findParallel(final TableName tableName, final List<Scan> scans,
        final ResultsExtractor<T> action) {
    assertAccessAvailable();//from w  ww.j ava 2s .co m
    if (!this.enableParallelScan || scans.size() == 1) {
        return find(tableName, scans, action);
    }
    List<T> results = new ArrayList<>(scans.size());
    List<Callable<T>> callables = new ArrayList<>(scans.size());
    for (final Scan scan : scans) {
        callables.add(new Callable<T>() {
            @Override
            public T call() throws Exception {
                return execute(tableName, new TableCallback<T>() {
                    @Override
                    public T doInTable(Table table) throws Throwable {
                        final ResultScanner scanner = table.getScanner(scan);
                        try {
                            return action.extractData(scanner);
                        } finally {
                            scanner.close();
                        }
                    }
                });
            }
        });
    }
    List<List<Callable<T>>> callablePartitions = Lists.partition(callables, this.maxThreadsPerParallelScan);
    for (List<Callable<T>> callablePartition : callablePartitions) {
        try {
            List<Future<T>> futures = this.executor.invokeAll(callablePartition);
            for (Future<T> future : futures) {
                results.add(future.get());
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            logger.warn("interrupted while findParallel [{}].", tableName);
            return Collections.emptyList();
        } catch (ExecutionException e) {
            logger.warn("findParallel [{}], error : {}", tableName, e);
            return Collections.emptyList();
        }
    }
    return results;
}

From source file:com.netflix.spinnaker.cats.redis.cache.RedisCache.java

private Map<String, byte[]> getHashes(String type, Collection<CacheData> items) {
    if (isHashingDisabled(type)) {
        return Collections.emptyMap();
    }//from ww w  . j  av a 2s .c  o m

    final List<String> hashKeys = getKeys(type, items);
    if (hashKeys.isEmpty()) {
        return Collections.emptyMap();
    }

    final List<byte[]> hashValues = new ArrayList<>(hashKeys.size());
    final byte[] hashesId = hashesId(type);

    try (Jedis jedis = source.getJedis()) {
        for (List<String> hashPart : Lists.partition(hashKeys, options.getMaxHmgetSize())) {
            hashValues.addAll(jedis.hmget(hashesId, stringsToBytes(hashPart)));
        }
    }
    if (hashValues.size() != hashKeys.size()) {
        throw new RuntimeException("Expected same size result as request");
    }
    final Map<String, byte[]> hashes = new HashMap<>(hashKeys.size());
    for (int i = 0; i < hashValues.size(); i++) {
        final byte[] hashValue = hashValues.get(i);
        if (hashValue != null) {
            hashes.put(hashKeys.get(i), hashValue);
        }
    }

    return isHashingDisabled(type) ? Collections.emptyMap() : hashes;
}

From source file:org.apache.jackrabbit.oak.plugins.document.rdb.RDBBlobStore.java

@Override
public long countDeleteChunks(List<String> chunkIds, long maxLastModifiedTime) throws Exception {
    long count = 0;

    for (List<String> chunk : Lists.partition(chunkIds, RDBJDBCTools.MAX_IN_CLAUSE)) {
        Connection con = this.ch.getRWConnection();
        PreparedStatement prepMeta = null;
        PreparedStatement prepData = null;

        try {//from ww  w  .  j a  v a2 s  . c o m
            PreparedStatementComponent inClause = RDBJDBCTools.createInStatement("ID", chunk, false);

            StringBuilder metaStatement = new StringBuilder("delete from " + this.tnMeta + " where ")
                    .append(inClause.getStatementComponent());
            StringBuilder dataStatement = new StringBuilder("delete from " + this.tnData + " where ")
                    .append(inClause.getStatementComponent());

            if (maxLastModifiedTime > 0) {
                // delete only if the last modified is OLDER than x
                metaStatement.append(" and LASTMOD <= ?");
                // delete if there is NO entry where the last modified of
                // the meta is YOUNGER than x
                dataStatement.append(" and not exists(select * from " + this.tnMeta + " where " + this.tnMeta
                        + ".ID = " + this.tnData + ".ID and LASTMOD > ?)");
            }

            prepMeta = con.prepareStatement(metaStatement.toString());
            prepData = con.prepareStatement(dataStatement.toString());

            int mindex = 1, dindex = 1;
            mindex = inClause.setParameters(prepMeta, mindex);
            dindex = inClause.setParameters(prepData, dindex);

            if (maxLastModifiedTime > 0) {
                prepMeta.setLong(mindex, maxLastModifiedTime);
                prepData.setLong(dindex, maxLastModifiedTime);
            }

            int deletedMeta = prepMeta.executeUpdate();
            LOG.trace("delete-meta rows={}", deletedMeta);
            int deletedData = prepData.executeUpdate();
            LOG.trace("delete-data rows={}", deletedData);

            if (deletedMeta != deletedData) {
                String message = String.format(
                        "chunk deletion affected different numbers of DATA records (%s) and META records (%s)",
                        deletedData, deletedMeta);
                LOG.info(message);
            }

            count += deletedMeta;
        } finally {
            closeStatement(prepMeta);
            closeStatement(prepData);
            con.commit();
            this.ch.closeConnection(con);
        }
    }

    return count;
}