List of usage examples for com.google.common.collect Lists partition
public static <T> List<List<T>> partition(List<T> list, int size)
From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java
@SuppressWarnings("checkstyle:methodname") private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) { final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames(); final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(), qNames, excludeQualifiedNames); final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream() .map(s -> service.submit(() -> { final QualifiedName tableName = QualifiedName.fromString(s, false); final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList(); int offset = 0; int count; Sort sort;/*w ww.j a va2 s. c o m*/ if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) { sort = new Sort("id", SortOrder.ASC); } else { sort = new Sort("part_id", SortOrder.ASC); } final Pageable pageable = new Pageable(10000, offset); do { final List<PartitionDto> partitionDtos = partitionService.list(tableName, null, null, sort, pageable, true, true, true); count = partitionDtos.size(); if (!partitionDtos.isEmpty()) { final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000); partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures .add(indexPartitionDtos(tableName, subPartitionsDtos))); offset = offset + count; pageable.setOffset(offset); } } while (count == 10000); return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null)); })).collect(Collectors.toList()); final ListenableFuture<Void> processPartitionsFuture = Futures .transformAsync(Futures.successfulAsList(futures), input -> { final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL) .collect(Collectors.toList()); return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null)); }); return Futures.transformAsync(processPartitionsFuture, input -> { elasticSearchUtil.refresh(); final List<ListenableFuture<Void>> cleanUpFutures = tables.stream() .map(s -> service.submit( () -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames))) .collect(Collectors.toList()); return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null)); }); }
From source file:com.cloudant.sync.replication.BasicPullStrategy.java
private int processOneChangesBatch(ChangesResultWrapper changeFeeds) throws ExecutionException, InterruptedException { String feed = String.format("Change feed: { last_seq: %s, change size: %s}", changeFeeds.getLastSeq(), changeFeeds.getResults().size()); Log.d(this.name, feed); Multimap<String, String> openRevs = changeFeeds.openRevisions(0, changeFeeds.size()); Map<String, Collection<String>> missingRevisions = this.targetDb.getDbCore().revsDiff(openRevs); int changesProcessed = 0; // Process the changes in batches List<String> ids = Lists.newArrayList(missingRevisions.keySet()); List<List<String>> batches = Lists.partition(ids, this.config.insertBatchSize); for (List<String> batch : batches) { if (this.cancel) { break; }//ww w . j a v a2 s.co m List<Callable<DocumentRevsList>> tasks = createTasks(batch, missingRevisions); try { List<Future<DocumentRevsList>> futures = executor.invokeAll(tasks); for (Future<DocumentRevsList> future : futures) { DocumentRevsList result = future.get(); // We promise not to insert documents after cancel is set if (this.cancel) { break; } HashMap<String, List<PreparedAttachment>> atts = new HashMap<String, List<PreparedAttachment>>(); // now put together a list of attachments we need to download if (!config.pullAttachmentsInline) { try { for (DocumentRevs documentRevs : result) { Map<String, Object> attachments = documentRevs.getAttachments(); // keep track of attachments we are going to prepare ArrayList<PreparedAttachment> preparedAtts = new ArrayList<PreparedAttachment>(); atts.put(documentRevs.getId(), preparedAtts); for (String attachmentName : attachments.keySet()) { int revpos = (Integer) ((Map<String, Object>) attachments.get(attachmentName)) .get("revpos"); // do we already have the attachment @ this revpos? // look back up the tree for this document and see: // if we already have it, then we don't need to fetch it DocumentRevs.Revisions revs = documentRevs.getRevisions(); int offset = revs.getStart() - revpos; if (offset >= 0 && offset < revs.getIds().size()) { String revId = String.valueOf(revpos) + "-" + revs.getIds().get(offset); DocumentRevision dr = this.targetDb.getDbCore() .getDocument(documentRevs.getId(), revId); if (dr != null) { Attachment a = this.targetDb.getDbCore().getAttachment(dr, attachmentName); if (a != null) { // skip attachment, already got it continue; } } } String contentType = ((Map<String, String>) attachments.get(attachmentName)) .get("content_type"); String encoding = (String) ((Map<String, Object>) attachments .get(attachmentName)).get("encoding"); UnsavedStreamAttachment usa = this.sourceDb.getAttachmentStream( documentRevs.getId(), documentRevs.getRev(), attachmentName, contentType, encoding); DocumentRevision doc = this.targetDb.getDbCore() .getDocument(documentRevs.getId()); // by preparing the attachment here, it is downloaded outside of the database transaction preparedAtts.add(this.targetDb.prepareAttachment(usa, doc)); } } } catch (Exception e) { Log.e(LOG_TAG, "There was a problem downloading an attachment to the datastore, terminating replication"); Log.e(LOG_TAG, "Exception was: " + e); this.cancel = true; } } if (this.cancel) break; boolean ok = true; // start tx this.targetDb.getDbCore().getSQLDatabase().beginTransaction(); this.targetDb.bulkInsert(result, config.pullAttachmentsInline); // now add the attachments we have just downloaded try { for (String id : atts.keySet()) { DocumentRevision doc = this.targetDb.getDbCore().getDocument(id); for (PreparedAttachment att : atts.get(id)) { this.targetDb.addAttachment(att, doc); } } } catch (Exception e) { Log.e(LOG_TAG, "There was a problem adding an attachment to the datastore, terminating replication"); Log.e(LOG_TAG, "Exception was: " + e); this.cancel = true; ok = false; } if (ok) { this.targetDb.getDbCore().getSQLDatabase().setTransactionSuccessful(); } // end tx this.targetDb.getDbCore().getSQLDatabase().endTransaction(); changesProcessed++; } } catch (InterruptedException ex) { // invokeAll() or future.get() was interrupted, expected on // cancelling as shutdownNow is called in setCancel() if (this.cancel) { break; } else { throw ex; } } } if (!this.cancel) { this.targetDb.putCheckpoint(this.getReplicationId(), changeFeeds.getLastSeq()); } return changesProcessed; }
From source file:com.netflix.spinnaker.cats.redis.cache.RedisCache.java
private void evictItems(String type, List<String> identifiers, Collection<String> allRelationships) { List<String> delKeys = new ArrayList<>((allRelationships.size() + 1) * identifiers.size()); for (String id : identifiers) { for (String relationship : allRelationships) { delKeys.add(relationshipId(type, id, relationship)); }/*from w ww. java 2 s .com*/ delKeys.add(attributesId(type, id)); } int delOperations = 0; int hdelOperations = 0; int sremOperations = 0; try (Jedis jedis = source.getJedis()) { Pipeline pipe = jedis.pipelined(); for (List<String> delPartition : Lists.partition(delKeys, options.getMaxDelSize())) { pipe.del(delPartition.toArray(new String[delPartition.size()])); delOperations++; pipe.hdel(hashesId(type), stringsToBytes(delPartition)); hdelOperations++; } for (List<String> idPartition : Lists.partition(identifiers, options.getMaxDelSize())) { String[] ids = idPartition.toArray(new String[idPartition.size()]); pipe.srem(allOfTypeId(type), ids); sremOperations++; pipe.srem(allOfTypeReindex(type), ids); sremOperations++; } pipe.sync(); } cacheMetrics.evict(prefix, type, identifiers.size(), delKeys.size(), delKeys.size(), delOperations, hdelOperations, sremOperations); }
From source file:eu.mondo.driver.fourstore.FourStoreGraphDriverReadWrite.java
@Override public void deleteVertices(final List<String> uris) throws IOException { if (uris.isEmpty()) { return;/*w ww .ja v a 2s.co m*/ } final List<List<String>> partitions = Lists.partition(uris, PARTITION_SIZE); for (final List<String> partition : partitions) { deleteVertexPartition(partition); } }
From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionUpdater.java
private void replaceDomainGroups() throws SQLException { // Remove groups Closer closer = Closer.create();//from w w w .j av a 2s .com try { PreparedStatement stmt = closer.register(conn.prepareStatement("DELETE FROM " + config.getTablePrefix() + "region_groups " + "WHERE region_id = ? " + "AND world_id = " + worldId)); for (List<ProtectedRegion> partition : Lists.partition(domainsToReplace, StatementBatch.MAX_BATCH_SIZE)) { for (ProtectedRegion region : partition) { stmt.setString(1, region.getId()); stmt.addBatch(); } stmt.executeBatch(); } } finally { closer.closeQuietly(); } // Add groups closer = Closer.create(); try { PreparedStatement stmt = closer .register(conn.prepareStatement("INSERT INTO " + config.getTablePrefix() + "region_groups " + "(region_id, world_id, group_id, owner) " + "VALUES (?, " + worldId + ", ?, ?)")); StatementBatch batch = new StatementBatch(stmt, StatementBatch.MAX_BATCH_SIZE); for (ProtectedRegion region : domainsToReplace) { insertDomainGroups(stmt, batch, region, region.getMembers(), false); // owner = false insertDomainGroups(stmt, batch, region, region.getOwners(), true); // owner = true } batch.executeRemaining(); } finally { closer.closeQuietly(); } }
From source file:org.sonar.core.rule.RuleDao.java
public List<RuleRuleTagDto> selectTagsByRuleIds(List<Integer> ruleIds, SqlSession session) { List<RuleRuleTagDto> dtos = newArrayList(); List<List<Integer>> partitionList = Lists.partition(newArrayList(ruleIds), 1000); for (List<Integer> partition : partitionList) { dtos.addAll(getMapper(session).selectTagsByRuleIds(partition)); }/* w w w . j a v a 2 s . c o m*/ return dtos; }
From source file:com.imaginea.kodebeagle.base.tasks.QueryKBServerTask.java
private void putChunkedFileContentInMap(final List<String> fileNamesList) { List<List<String>> subLists = Lists.partition(fileNamesList, CHUNK_SIZE); for (List<String> subList : subLists) { searchUtils.fetchContentsAndUpdateMap(subList); }/*from www .j a v a2 s .c o m*/ }
From source file:com.imaginea.kodebeagle.tasks.QueryKBServerTask.java
private void putChunkedFileContentInMap(final List<String> fileNamesList) { List<List<String>> subLists = Lists.partition(fileNamesList, CHUNK_SIZE); for (List<String> subList : subLists) { esUtils.fetchContentsAndUpdateMap(subList); }/*from w ww . j ava 2s .c o m*/ }
From source file:com.google.jstestdriver.FileUploader.java
public void uploadToServer(final Collection<JstdTestCaseDelta> deltas) { if (deltas.isEmpty()) { return;/*ww w . j a v a2 s.co m*/ } List<JstdTestCaseDelta> loadedDeltas = Lists.newArrayListWithCapacity(deltas.size()); for (JstdTestCaseDelta delta : deltas) { loadedDeltas.add(delta.loadFiles(fileLoader)); } for (List<JstdTestCaseDelta> partition : Lists.partition(loadedDeltas, 50)) { Map<String, String> uploadFileParams = new LinkedHashMap<String, String>(); uploadFileParams.put("action", DeltaUpload.ACTION); uploadFileParams.put("data", gson.toJson(partition)); server.post(baseUrl + "/fileSet", uploadFileParams); } }
From source file:com.falcon.orca.actors.NodeManager.java
@Override @SuppressWarnings(value = "unchecked") public void onReceive(Object message) { if (message instanceof NodeManagerCommand) { switch (((NodeManagerCommand) message).getType()) { case REGISTER_TO_MASTER: { clusterManager = getSender(); ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.REGISTER_NODE); clusterManager.tell(clusterManagerCommand, getSelf()); break; }// w w w .j a v a 2 s. c o m case UNREGISTER_FROM_MASTER: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.STOP); manager.tell(managerCommand, manager); } ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.UNREGISTER_NODE); clusterManager.tell(clusterManagerCommand, getSelf()); cluster.leave(cluster.selfAddress()); context().stop(getSelf()); break; } case START_LOAD: if (manager != null && !manager.isTerminated()) { printOnCmd("Already running a job, please wait!!"); } else { NodeManagerCommand nodeManagerCommand = (NodeManagerCommand) message; RunDetails runDetails = (RunDetails) nodeManagerCommand.getFromContext("runDetails"); manager = getContext().actorOf(Manager.props(runDetails, dynDataStore)); ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.START); manager.tell(managerCommand, getSelf()); ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.LOAD_GENERATION_START); clusterManager.tell(clusterManagerCommand, getSelf()); } break; case PAUSE_LOAD: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.PAUSE); manager.tell(managerCommand, getSelf()); ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.LOAD_GENERATION_PAUSED); clusterManager.tell(clusterManagerCommand, getSelf()); } else { printOnCmd("No active jobs to pause."); } break; } case RESUME_LOAD: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.RESUME); manager.tell(managerCommand, manager); ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.LOAD_GENERATION_RESUMED); clusterManager.tell(clusterManagerCommand, getSelf()); } else { printOnCmd("No paused jobs to resume."); } break; } case STOP_LOAD: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.STOP); manager.tell(managerCommand, manager); ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.LOAD_GENERATION_COMPLETE); clusterManager.tell(clusterManagerCommand, getSelf()); printOnCmd("Job killed successfully."); } else { printOnCmd("No job running."); } break; } case EXIT: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.STOP); manager.tell(managerCommand, manager); } ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.UNREGISTER_NODE); clusterManager.tell(clusterManagerCommand, getSelf()); cluster.leave(cluster.selfAddress()); context().stop(getSelf()); break; } case REMOTE_EXIT: { if (manager != null && !manager.isTerminated()) { ManagerCommand managerCommand = new ManagerCommand(); managerCommand.setType(ManagerCommandType.STOP); manager.tell(managerCommand, manager); } cluster.leave(cluster.selfAddress()); context().stop(getSelf()); break; } case LOAD_COMPLETE: { ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.LOAD_GENERATION_COMPLETE); clusterManagerCommand.putOnContext("runResult", ((NodeManagerCommand) message).getFromContext("runResult")); clusterManager.tell(clusterManagerCommand, getSelf()); responseTimes = (List<Long>) ((NodeManagerCommand) message).getFromContext("responseTimes"); break; } case SEND_DATA: { List<List<Long>> responseTimePartitions = Lists.partition(responseTimes, 1000); for (List<Long> responseTimePartition : responseTimePartitions) { ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.TAKE_DATA); clusterManagerCommand.putOnContext("responseTimes", new ArrayList<>(responseTimePartition)); getSender().tell(clusterManagerCommand, getSelf()); } ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand(); clusterManagerCommand.setType(ClustermanagerCommandType.DATA_SEND_COMPLETE); getSender().tell(clusterManagerCommand, getSelf()); break; } case CLEAR_LOAD_DATA: { this.bodyParams.clear(); this.urlParams.clear(); this.bodyParamsUseType.clear(); this.urlParamsUseType.clear(); this.dataGenerators.clear(); break; } case TAKE_LOAD_DATA: { String template = (String) ((NodeManagerCommand) message).getFromContext("template"); if (!StringUtils.isBlank(template)) { this.template = template; } String urlTemplate = (String) ((NodeManagerCommand) message).getFromContext("urlTemplate"); if (!StringUtils.isBlank(urlTemplate)) { this.urlTemplate = urlTemplate; } String key = (String) ((NodeManagerCommand) message).getFromContext("key"); String dataType = (String) ((NodeManagerCommand) message).getFromContext("dataType"); if (dataType != null && dataType.equalsIgnoreCase("body")) { List<Object> data = (List<Object>) ((NodeManagerCommand) message).getFromContext("data"); DynVarUseType varUseType = (DynVarUseType) ((NodeManagerCommand) message) .getFromContext("dataUseType"); if (!bodyParams.containsKey(key)) { if (varUseType.equals(DynVarUseType.USE_MULTIPLE)) { bodyParams.put(key, new ArrayList<>()); bodyParamsUseType.put(key, DynVarUseType.USE_MULTIPLE); } else if (varUseType.equals(DynVarUseType.USE_ONCE)) { bodyParams.put(key, new LinkedList<>()); bodyParamsUseType.put(key, DynVarUseType.USE_ONCE); } } bodyParams.get(key).addAll(data); } else if (dataType != null && dataType.equalsIgnoreCase("url")) { List<Object> data = (List<Object>) ((NodeManagerCommand) message).getFromContext("data"); DynVarUseType varUseType = (DynVarUseType) ((NodeManagerCommand) message) .getFromContext("dataUseType"); if (!urlParams.containsKey(key)) { if (varUseType.equals(DynVarUseType.USE_MULTIPLE)) { urlParams.put(key, new ArrayList<>()); urlParamsUseType.put(key, DynVarUseType.USE_MULTIPLE); } else if (varUseType.equals(DynVarUseType.USE_ONCE)) { urlParams.put(key, new LinkedList<>()); urlParamsUseType.put(key, DynVarUseType.USE_ONCE); } } urlParams.get(key).addAll(data); } else if (dataType != null && dataType.equalsIgnoreCase("generator")) { DynGenerator generator = (DynGenerator) ((NodeManagerCommand) message).getFromContext("data"); dataGenerators.put(key, generator); } break; } case LOAD_DATA_COMPLETE: { this.dynDataStore = new DynDataStore(bodyParams, urlParams, bodyParamsUseType, urlParamsUseType, dataGenerators, template, urlTemplate); break; } default: unhandled(message); } } }