List of usage examples for com.google.common.collect Lists partition
public static <T> List<List<T>> partition(List<T> list, int size)
From source file:eu.mondo.driver.fourstore.FourStoreGraphDriverReadWrite.java
@Override public void insertEdges(final Multimap<String, String> edges, final String type) throws IOException { if (edges.isEmpty()) { return;/* w w w . java 2 s. co m*/ } final ArrayList<String> sourceVertices = new ArrayList<>(edges.keySet()); final List<List<String>> sourceVerticesPartitions = Lists.partition(sourceVertices, PARTITION_SIZE); for (final List<String> sourceVerticesPartition : sourceVerticesPartitions) { final Multimap<String, String> edgePartition = ArrayListMultimap.create(); for (final String sourceVertexURI : sourceVerticesPartition) { final Collection<String> targetVertexURIs = edges.get(sourceVertexURI); edgePartition.putAll(sourceVertexURI, targetVertexURIs); } insertEdgesPartition(edgePartition, type); } }
From source file:com.simiacryptus.mindseye.applications.ImageClassifierBase.java
/** * Predict list.// w w w . j a v a 2 s . co m * * @param network the network * @param count the count * @param categories the categories * @param batchSize the batch size * @param asyncGC the async gc * @param nullGC the null gc * @param data the data * @return the list */ public static List<LinkedHashMap<CharSequence, Double>> predict(@Nonnull Layer network, int count, @Nonnull List<CharSequence> categories, int batchSize, boolean asyncGC, boolean nullGC, Tensor[] data) { try { return Lists.partition(Arrays.asList(data), 1).stream().flatMap(batch -> { Tensor[][] input = { batch.stream().toArray(i -> new Tensor[i]) }; Result[] inputs = ConstantResult.singleResultArray(input); @Nullable Result result = network.eval(inputs); result.freeRef(); TensorList resultData = result.getData(); //Arrays.stream(input).flatMap(Arrays::stream).forEach(ReferenceCounting::freeRef); //Arrays.stream(inputs).forEach(ReferenceCounting::freeRef); //Arrays.stream(inputs).map(Result::getData).forEach(ReferenceCounting::freeRef); List<LinkedHashMap<CharSequence, Double>> maps = resultData.stream().map(tensor -> { @Nullable double[] predictionSignal = tensor.getData(); int[] order = IntStream.range(0, 1000).mapToObj(x -> x) .sorted(Comparator.comparing(i -> -predictionSignal[i])).mapToInt(x -> x).toArray(); assert categories.size() == predictionSignal.length; @Nonnull LinkedHashMap<CharSequence, Double> topN = new LinkedHashMap<>(); for (int i = 0; i < count; i++) { int index = order[i]; topN.put(categories.get(index), predictionSignal[index]); } tensor.freeRef(); return topN; }).collect(Collectors.toList()); resultData.freeRef(); return maps.stream(); }).collect(Collectors.toList()); } finally { } }
From source file:me.lucko.luckperms.common.storage.dao.legacy.LegacySqlMigration.java
@Override public void run() { backing.getPlugin().getLog().warn("Collecting UUID data from the old tables."); Map<UUID, String> uuidData = new HashMap<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT uuid, name FROM lp_uuid")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { try { uuidData.put(UUID.fromString(rs.getString("uuid")), rs.getString("name")); } catch (IllegalArgumentException e) { e.printStackTrace(); }//from w ww . ja v a 2s . co m } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog() .warn("Found " + uuidData.size() + " uuid data entries. Copying to new tables..."); List<Map.Entry<UUID, String>> uuidEntries = new ArrayList<>(uuidData.entrySet()); List<List<Map.Entry<UUID, String>>> partitionedUuidEntries = Lists.partition(uuidEntries, 100); for (List<Map.Entry<UUID, String>> l : partitionedUuidEntries) { try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("INSERT INTO {prefix}players VALUES(?, ?, ?)"))) { for (Map.Entry<UUID, String> e : l) { ps.setString(1, e.getKey().toString()); ps.setString(2, e.getValue().toLowerCase()); ps.setString(3, "default"); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException e) { e.printStackTrace(); } } uuidData.clear(); uuidEntries.clear(); partitionedUuidEntries.clear(); backing.getPlugin().getLog().warn("Migrated all uuid data."); backing.getPlugin().getLog().warn("Starting user data migration."); Set<UUID> users = new HashSet<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT uuid FROM lp_users")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { try { users.add(UUID.fromString(rs.getString("uuid"))); } catch (IllegalArgumentException e) { e.printStackTrace(); } } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog().warn("Found " + users.size() + " user data entries. Copying to new tables..."); AtomicInteger userCounter = new AtomicInteger(0); for (UUID uuid : users) { String permsJson = null; String primaryGroup = null; try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement("SELECT primary_group, perms FROM lp_users WHERE uuid=?")) { ps.setString(1, uuid.toString()); try (ResultSet rs = ps.executeQuery()) { if (rs.next()) { permsJson = rs.getString("perms"); primaryGroup = rs.getString("primary_group"); } } } } catch (SQLException e) { e.printStackTrace(); } if (permsJson == null || primaryGroup == null) { new Throwable().printStackTrace(); continue; } Map<String, Boolean> convertedPerms = backing.getGson().fromJson(permsJson, NODE_MAP_TYPE); if (convertedPerms == null) { new Throwable().printStackTrace(); continue; } Set<NodeModel> nodes = convertedPerms.entrySet().stream() .map(e -> LegacyNodeFactory.fromLegacyString(e.getKey(), e.getValue())).map(NodeModel::fromNode) .collect(Collectors.toSet()); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement(backing.getPrefix().apply( "INSERT INTO {prefix}user_permissions(uuid, permission, value, server, world, expiry, contexts) VALUES(?, ?, ?, ?, ?, ?, ?)"))) { for (NodeModel nd : nodes) { ps.setString(1, uuid.toString()); ps.setString(2, nd.getPermission()); ps.setBoolean(3, nd.getValue()); ps.setString(4, nd.getServer()); ps.setString(5, nd.getWorld()); ps.setLong(6, nd.getExpiry()); ps.setString(7, backing.getGson() .toJson(ContextSetJsonSerializer.serializeContextSet(nd.getContexts()))); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException e) { e.printStackTrace(); } if (!primaryGroup.equalsIgnoreCase("default")) { try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("UPDATE {prefix}players SET primary_group=? WHERE uuid=?"))) { ps.setString(1, primaryGroup); ps.setString(2, uuid.toString()); ps.execute(); } } catch (SQLException e) { e.printStackTrace(); } } int i = userCounter.incrementAndGet(); if (i % 100 == 0) { backing.getPlugin().getLog().warn("Migrated " + i + " users so far..."); } } users.clear(); backing.getPlugin().getLog().warn("Migrated all user data."); backing.getPlugin().getLog().warn("Starting group data migration."); Map<String, String> groupData = new HashMap<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT name, perms FROM lp_groups")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { groupData.put(rs.getString("name"), rs.getString("perms")); } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog() .warn("Found " + groupData.size() + " group data entries. Copying to new tables..."); for (Map.Entry<String, String> e : groupData.entrySet()) { String name = e.getKey(); String permsJson = e.getValue(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("INSERT INTO {prefix}groups VALUES(?)"))) { ps.setString(1, name); ps.execute(); } } catch (SQLException ex) { ex.printStackTrace(); } Map<String, Boolean> convertedPerms = backing.getGson().fromJson(permsJson, NODE_MAP_TYPE); if (convertedPerms == null) { new Throwable().printStackTrace(); continue; } Set<NodeModel> nodes = convertedPerms.entrySet().stream() .map(ent -> LegacyNodeFactory.fromLegacyString(ent.getKey(), ent.getValue())) .map(NodeModel::fromNode).collect(Collectors.toSet()); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement(backing.getPrefix().apply( "INSERT INTO {prefix}group_permissions(name, permission, value, server, world, expiry, contexts) VALUES(?, ?, ?, ?, ?, ?, ?)"))) { for (NodeModel nd : nodes) { ps.setString(1, name); ps.setString(2, nd.getPermission()); ps.setBoolean(3, nd.getValue()); ps.setString(4, nd.getServer()); ps.setString(5, nd.getWorld()); ps.setLong(6, nd.getExpiry()); ps.setString(7, backing.getGson() .toJson(ContextSetJsonSerializer.serializeContextSet(nd.getContexts()))); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException ex) { ex.printStackTrace(); } } groupData.clear(); backing.getPlugin().getLog().warn("Migrated all group data."); backing.getPlugin().getLog().warn("Renaming action and track tables."); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("DROP TABLE {prefix}actions"))) { ps.execute(); } try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("ALTER TABLE lp_actions RENAME TO {prefix}actions"))) { ps.execute(); } try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("DROP TABLE {prefix}tracks"))) { ps.execute(); } try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("ALTER TABLE lp_tracks RENAME TO {prefix}tracks"))) { ps.execute(); } } catch (SQLException ex) { ex.printStackTrace(); } backing.getPlugin().getLog().warn("Legacy schema migration complete."); }
From source file:com.cloudant.sync.indexing.BasicQueryResult.java
@Override public Iterator<DocumentRevision> iterator() { /**// w w w . j a va2 s .c o m * Partitions a set of document IDs into batches of DocumentRevision * objects, and provides an iterator over the whole, un-partitioned set * of revision objects (as if they were not batched). */ return new Iterator<DocumentRevision>() { /** List containing lists of partitions document IDs */ private final List<List<String>> subLists = this.partition(documentIds, batchSize); /** The current partition's iterator of document objects */ private Iterator<DocumentRevision> subIterator = null; @Override public boolean hasNext() { if (subIterator == null) { return subLists.size() > 0; } else { return this.subIterator.hasNext() || subLists.size() > 0; } } @Override public DocumentRevision next() { if (subIterator == null || !subIterator.hasNext()) { List<String> ids = subLists.remove(0); subIterator = this.nextSubIterator(ids); } return subIterator.next(); } @Override public void remove() { throw new UnsupportedOperationException(); } /** * Partition a list of document IDs into batches of batchSize. * * Return a mutable list of consecutive sublists. * Same as Guava's "Lists.partition" except the result list is mutable. * It is needed because this iterator removes sublist from the partitions * as it goes. * * @See http://docs.guava-libraries.googlecode.com/git/javadoc/com/google/common/collect/Lists.html#partition(java.util.List, int) */ private List<List<String>> partition(List<String> documentIds, int batchSize) { List<List<String>> partitions = Lists.partition(documentIds, batchSize); List<List<String>> res = new LinkedList<List<String>>(); for (List<String> p : partitions) { res.add(p); } return res; } /** * Load the next partition of DocumentRevision objects for the * iterator. * * @param ids the IDs of the revisions to load. * @return an iterator over the DocumentRevision objects for `ids`. */ private Iterator<DocumentRevision> nextSubIterator(List<String> ids) { HashMap<String, DocumentRevision> map = new HashMap<String, DocumentRevision>(); for (DocumentRevision revision : datastore.getDocumentsWithIds(ids)) { map.put(revision.getId(), revision); } List<DocumentRevision> revisions = new ArrayList<DocumentRevision>(ids.size()); // return list of DocumentRevision that in the same order as input "ids" for (String id : ids) { DocumentRevision revision = map.get(id); if (revision != null) { revisions.add(revision); } } return revisions.iterator(); } }; }
From source file:org.sonar.server.component.db.ComponentDao.java
public List<ComponentDto> getByIds(DbSession session, Collection<Long> ids) { if (ids.isEmpty()) { return Collections.emptyList(); }/* w w w .ja v a 2s. co m*/ List<ComponentDto> components = newArrayList(); List<List<Long>> partitionList = Lists.partition(newArrayList(ids), 1000); for (List<Long> partition : partitionList) { List<ComponentDto> dtos = mapper(session).findByIds(partition); components.addAll(dtos); } return components; }
From source file:org.locationtech.geogig.storage.postgresql.v9.PGObjectStoreObjectIterator.java
private @Nullable ObjectInfo<T> computeNext() { if (nextBatch != null && nextBatch.hasNext()) { return nextBatch.next(); }// w w w . ja v a 2 s . c om if (!nodes.hasNext()) { return null; } { ObjectInfo<T> obj = tryNextCached(); if (obj != null) { return obj; } } final int queryBatchSize = store.getAllBatchSize; final int superPartitionBatchSize = 10 * queryBatchSize; List<ObjectInfo<T>> hits = new LinkedList<>(); List<NodeRef> cacheMisses = new ArrayList<>(superPartitionBatchSize); for (int i = 0; i < superPartitionBatchSize && nodes.hasNext(); i++) { NodeRef node = nodes.next(); ObjectId id = node.getObjectId(); RevObject cached = cache.getIfPresent(id); if (cached == null) { cacheMisses.add(node); } else { T obj = cacheHit(id, cached); if (obj != null) { hits.add(new ObjectInfo<T>(node, obj)); } } } List<List<NodeRef>> partitions = Lists.partition(cacheMisses, queryBatchSize); List<Future<List<ObjectInfo<T>>>> futures = new ArrayList<>(partitions.size()); for (List<NodeRef> partition : partitions) { Future<List<ObjectInfo<T>>> dbBatch; dbBatch = store.getObjects(partition, listener, type); futures.add(dbBatch); } final Function<Future<List<ObjectInfo<T>>>, List<ObjectInfo<T>>> futureGetter = (objs) -> { try { return objs.get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }; Iterable<List<ObjectInfo<T>>> lists = Iterables.transform(futures, futureGetter); Iterable<ObjectInfo<T>> concat = Iterables.concat(lists); Iterator<ObjectInfo<T>> iterator = concat.iterator(); nextBatch = Iterators.concat(hits.iterator(), iterator); return computeNext(); }
From source file:org.asoem.greyfish.utils.concurrent.RecursiveActions.java
public static <T, S> RecursiveAction foldLeft(final T initialValue, final List<T> list, final Function<? super T, S> f, final int size) { checkNotNull(list);//from w w w . j a v a 2 s . c om checkNotNull(f); if (list.isEmpty()) { return NULL_ACTION; } else { return new RecursiveAction() { @Override protected void compute() { checkState(inForkJoinPool(), "This action is executed from outside of an ForkJoinPool which is forbidden"); if (list.size() < size) { applyFunction(list); } else { invokeAll(partitionAndFork(list)); } } private List<RecursiveAction> partitionAndFork(final List<T> list) { // copyOf is prevents deadlock! return ImmutableList.copyOf( Lists.transform(Lists.partition(list, size), new Function<List<T>, RecursiveAction>() { @Nullable @Override public RecursiveAction apply(@Nullable final List<T> input) { return new RecursiveAction() { @Override protected void compute() { applyFunction(input); } }; } })); } private void applyFunction(final Iterable<T> elements) { for (final T element : elements) { f.apply(element); } } }; } }
From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionUpdater.java
private void setParents() throws SQLException { Closer closer = Closer.create();/*from w ww . java 2 s. c om*/ try { PreparedStatement stmt = closer.register(conn.prepareStatement("UPDATE " + config.getTablePrefix() + "region " + "SET parent = ? " + "WHERE id = ? AND world_id = " + worldId)); for (List<ProtectedRegion> partition : Lists.partition(parentsToSet, StatementBatch.MAX_BATCH_SIZE)) { for (ProtectedRegion region : partition) { ProtectedRegion parent = region.getParent(); if (parent != null) { // Parent would be null due to a race condition stmt.setString(1, parent.getId()); stmt.setString(2, region.getId()); stmt.addBatch(); } } stmt.executeBatch(); } } finally { closer.closeQuietly(); } }
From source file:org.sonar.db.debt.CharacteristicDao.java
public List<CharacteristicDto> selectCharacteristicsByIds(Collection<Integer> ids, SqlSession session) { List<CharacteristicDto> dtos = newArrayList(); List<List<Integer>> partitionList = Lists.partition(newArrayList(ids), 1000); for (List<Integer> partition : partitionList) { dtos.addAll(session.getMapper(CharacteristicMapper.class).selectCharacteristicsByIds(partition)); }/*from www.j a v a 2 s . c o m*/ return dtos; }
From source file:org.sonar.core.qualityprofile.db.ActiveRuleDao.java
public List<ActiveRuleDto> selectByIds(Collection<Integer> ids, SqlSession session) { if (ids.isEmpty()) { return Collections.emptyList(); }/*from w w w . jav a2s . c o m*/ List<ActiveRuleDto> dtosList = newArrayList(); List<List<Integer>> idsPartitionList = Lists.partition(newArrayList(ids), 1000); for (List<Integer> idsPartition : idsPartitionList) { List<ActiveRuleDto> dtos = session.selectList( "org.sonar.core.qualityprofile.db.ActiveRuleMapper.selectByIds", newArrayList(idsPartition)); dtosList.addAll(dtos); } return dtosList; }