List of usage examples for com.google.common.collect Lists partition
public static <T> List<List<T>> partition(List<T> list, int size)
From source file:org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.java
@CheckForNull private <T extends Document> void internalUpdate(Collection<T> collection, List<String> ids, UpdateOp update) { if (isAppendableUpdate(update, true) && !requiresPreviousState(update)) { Operation modOperation = update.getChanges().get(MODIFIEDKEY); long modified = getModifiedFromOperation(modOperation); boolean modifiedIsConditional = modOperation == null || modOperation.type != UpdateOp.Operation.Type.SET; String appendData = ser.asString(update); for (List<String> chunkedIds : Lists.partition(ids, CHUNKSIZE)) { if (collection == Collection.NODES) { for (String key : chunkedIds) { nodesCache.invalidate(key); }//from ww w . j ava2s. c o m } Connection connection = null; RDBTableMetaData tmd = getTable(collection); boolean success = false; try { Stopwatch watch = startWatch(); connection = this.ch.getRWConnection(); success = db.batchedAppendingUpdate(connection, tmd, chunkedIds, modified, modifiedIsConditional, appendData); connection.commit(); //Internally 'db' would make multiple calls and number of those //remote calls would not be captured stats.doneUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, chunkedIds.size()); } catch (SQLException ex) { success = false; this.ch.rollbackConnection(connection); } finally { this.ch.closeConnection(connection); } if (success) { if (collection == Collection.NODES) { for (String id : chunkedIds) { nodesCache.invalidate(id); } } } else { for (String id : chunkedIds) { UpdateOp up = update.copy(); up = up.shallowCopy(id); internalCreateOrUpdate(collection, up, false, true); } } } } else { for (String id : ids) { UpdateOp up = update.copy(); up = up.shallowCopy(id); internalCreateOrUpdate(collection, up, false, true); } } }
From source file:org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore.java
private <T extends Document> int delete(Collection<T> collection, List<String> ids) { int numDeleted = 0; RDBTableMetaData tmd = getTable(collection); for (List<String> sublist : Lists.partition(ids, 64)) { Connection connection = null; try {/* www . j a v a 2 s . com*/ connection = this.ch.getRWConnection(); numDeleted += db.delete(connection, tmd, sublist); connection.commit(); } catch (Exception ex) { throw new DocumentStoreException(ex); } finally { this.ch.closeConnection(connection); } } return numDeleted; }
From source file:org.elasticsearch.test.ESIntegTestCase.java
/** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout.//from w w w .ja v a2s. com * * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException { Random random = getRandom(); Set<String> indicesSet = new HashSet<>(); for (IndexRequestBuilder builder : builders) { indicesSet.add(builder.request().index()); } Set<Tuple<String, String>> bogusIds = new HashSet<>(); if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) { builders = new ArrayList<>(builders); final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); // inject some bogus docs final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2); final int unicodeLen = between(1, 10); for (int i = 0; i < numBogusDocs; i++) { String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet()); String index = RandomPicks.randomFrom(random, indices); bogusIds.add(new Tuple<>(index, id)); builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}")); } } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); Collections.shuffle(builders, random); final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>(); List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) { if (frequently()) { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder .execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>( indexRequestBuilder, newLatch(inFlightAsyncOperations), errors)); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } else { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute().actionGet(); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } } else { List<List<IndexRequestBuilder>> partition = Lists.partition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble())))); logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size()); for (List<IndexRequestBuilder> segmented : partition) { BulkRequestBuilder bulkBuilder = client().prepareBulk(); for (IndexRequestBuilder indexRequestBuilder : segmented) { bulkBuilder.add(indexRequestBuilder); } BulkResponse actionGet = bulkBuilder.execute().actionGet(); assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false)); } } for (CountDownLatch operation : inFlightAsyncOperations) { operation.await(); } final List<Throwable> actualErrors = new ArrayList<>(); for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) { if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) { tuple.v1().execute().actionGet(); // re-index if rejected } else { actualErrors.add(tuple.v2()); } } assertThat(actualErrors, emptyIterable()); if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (Tuple<String, String> doc : bogusIds) { // see https://github.com/elasticsearch/elasticsearch/issues/8706 final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()) .get(); if (deleteResponse.isFound() == false) { logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2()); } } } if (forceRefresh) { assertNoFailures(client().admin().indices().prepareRefresh(indices) .setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); } }
From source file:de.thm.arsnova.dao.CouchDBDao.java
@Override public int deleteInactiveGuestVisitedSessionLists(long lastActivityBefore) { try {/* ww w. ja va 2 s . c om*/ NovaView view = new NovaView("logged_in/by_last_activity_for_guests"); view.setEndKey(lastActivityBefore); List<Document> results = this.getDatabase().view(view).getResults(); int count = 0; List<List<Document>> partitions = Lists.partition(results, BULK_PARTITION_SIZE); for (List<Document> partition : partitions) { final List<Document> newDocs = new ArrayList<>(); for (final Document oldDoc : partition) { final Document newDoc = new Document(); newDoc.setId(oldDoc.getId()); newDoc.setRev(oldDoc.getJSONObject("value").getString("_rev")); newDoc.put("_deleted", true); newDocs.add(newDoc); LOGGER.debug("Marked logged_in document {} for deletion.", oldDoc.getId()); /* Use log type 'user' since effectively the user is deleted in case of guests */ log("delete", "type", "user", "id", oldDoc.getId()); } if (newDocs.size() > 0) { if (getDatabase().bulkSaveDocuments(newDocs.toArray(new Document[newDocs.size()]))) { count += newDocs.size(); } else { LOGGER.error("Could not bulk delete visited session lists"); } } } if (count > 0) { LOGGER.info("Deleted {} visited session lists of inactive users.", count); log("cleanup", "type", "visitedsessions", "count", count); } return count; } catch (IOException e) { LOGGER.error("Could not delete visited session lists of inactive users."); } return 0; }
From source file:org.apache.storm.daemon.ui.UIHelpers.java
/** * getBuildVisualization.// ww w. j av a2 s .c om * @param client client * @param config config * @param window window * @param id id * @param sys sys * @return getBuildVisualization * @throws TException TException */ public static Map<String, Object> getBuildVisualization(Nimbus.Iface client, Map<String, Object> config, String window, String id, boolean sys) throws TException { Map<String, Object> result = new HashMap(); Map<String, Object> visualizationData = getVisualizationData(client, window, id, sys); List<Map> streamBoxes = visualizationData.entrySet().stream().map(UIHelpers::getStreamBox) .collect(Collectors.toList()); result.put("visualizationTable", Lists.partition(streamBoxes, 4)); return result; }
From source file:de.thm.arsnova.dao.CouchDBDao.java
@Override public int deleteInactiveUsers(long lastActivityBefore) { try {//from w w w . j a v a 2 s . c o m NovaView view = new NovaView("user/inactive_by_creation"); view.setEndKey(lastActivityBefore); List<Document> results = this.getDatabase().view(view).getResults(); int count = 0; final List<List<Document>> partitions = Lists.partition(results, BULK_PARTITION_SIZE); for (List<Document> partition : partitions) { final List<Document> newDocs = new ArrayList<>(); for (Document oldDoc : partition) { final Document newDoc = new Document(); newDoc.setId(oldDoc.getId()); newDoc.setRev(oldDoc.getJSONObject("value").getString("_rev")); newDoc.put("_deleted", true); newDocs.add(newDoc); LOGGER.debug("Marked user document {} for deletion.", oldDoc.getId()); } if (newDocs.size() > 0) { if (getDatabase().bulkSaveDocuments(newDocs.toArray(new Document[newDocs.size()]))) { count += newDocs.size(); } } } if (count > 0) { LOGGER.info("Deleted {} inactive users.", count); log("cleanup", "type", "user", "count", count); } return count; } catch (IOException e) { LOGGER.error("Could not delete inactive users."); } return 0; }
From source file:org.ednovo.gooru.domain.service.resource.ResourceServiceImpl.java
@Override public void updateViewsBulk(List<UpdateViewsDTO> updateViewsDTOs, User apiCaller) { int index = 30; for (List<UpdateViewsDTO> partition : Lists.partition(updateViewsDTOs, index)) { StringBuffer gooruOids = new StringBuffer(); StringBuffer collectionIds = new StringBuffer(); StringBuffer resourceIds = new StringBuffer(); Map<String, Long> resourceMap = new HashMap<String, Long>(); for (UpdateViewsDTO updateViewsDTO : partition) { if (gooruOids.toString().trim().length() > 0) { gooruOids.append(","); }/*from w w w. j a v a2 s. c om*/ gooruOids.append(updateViewsDTO.getGooruOid()); resourceMap.put(updateViewsDTO.getGooruOid(), updateViewsDTO.getViews()); } if (gooruOids.toString().trim().length() > 0) { List<Resource> resources = this.getResourceRepository() .findAllResourcesByGooruOId(gooruOids.toString()); for (Resource resource : resources) { if (resourceMap.containsKey(resource.getGooruOid())) { resource.setViews(resourceMap.get(resource.getGooruOid())); } if (resourceIds.toString().trim().length() > 0) { resourceIds.append(","); } if (collectionIds.toString().trim().length() > 0) { collectionIds.append(","); } if (resource.getResourceType().getName() .equalsIgnoreCase(ResourceType.Type.SCOLLECTION.getType())) { collectionIds.append(resource.getGooruOid()); } else { resourceIds.append(resource.getGooruOid()); } } this.getResourceRepository().saveAll(resources); if (collectionIds.toString().trim().length() > 0) { indexProcessor.index(collectionIds.toString(), IndexProcessor.INDEX, SCOLLECTION); } else if (resourceIds.toString().trim().length() > 0) { indexProcessor.index(resourceIds.toString(), IndexProcessor.INDEX, RESOURCE); } } } }