List of usage examples for com.google.common.collect Lists partition
public static <T> List<List<T>> partition(List<T> list, int size)
From source file:com.palantir.common.collect.IterableView.java
public IterableView<List<T>> partition(final int size) { if (delegate() instanceof List) { /*/*from w w w.j a va 2s .co m*/ * Use the more efficient Lists.partition which utilizes sublists * without allocating new lists for the returned partitions. */ return of(Lists.partition(castAsList(), size)); } return of(Iterables.partition(castAsIterable(), size)); }
From source file:com.sk89q.worldguard.protection.managers.storage.sql.TableCache.java
/** * Fetch from the database rows that match the given entries, otherwise * create new entries and assign them an ID. * * @param entries a list of entries//w ww . j a v a2 s . c o m * @throws SQLException thrown on SQL error */ public void fetch(Collection<V> entries) throws SQLException { synchronized (LOCK) { // Lock across all cache instances checkNotNull(entries); // Get a list of missing entries List<V> fetchList = new ArrayList<>(); for (V entry : entries) { if (!cache.containsKey(toKey(entry))) { fetchList.add(entry); } } if (fetchList.isEmpty()) { return; // Nothing to do } // Search for entries for (List<V> partition : Lists.partition(fetchList, MAX_NUMBER_PER_QUERY)) { Closer closer = Closer.create(); try { PreparedStatement statement = closer.register(conn.prepareStatement(String.format( "SELECT id, " + fieldName + " " + "FROM `" + config.getTablePrefix() + tableName + "` " + "WHERE " + fieldName + " IN (%s)", StatementUtils.preparePlaceHolders(partition.size())))); String[] values = new String[partition.size()]; int i = 0; for (V entry : partition) { values[i] = fromType(entry); i++; } StatementUtils.setValues(statement, values); ResultSet results = closer.register(statement.executeQuery()); while (results.next()) { cache.put(toKey(toType(results.getString(fieldName))), results.getInt("id")); } } finally { closer.closeQuietly(); } } List<V> missing = new ArrayList<>(); for (V entry : fetchList) { if (!cache.containsKey(toKey(entry))) { missing.add(entry); } } // Insert entries that are missing if (!missing.isEmpty()) { Closer closer = Closer.create(); try { PreparedStatement statement = closer.register( conn.prepareStatement("INSERT INTO `" + config.getTablePrefix() + tableName + "` (id, " + fieldName + ") VALUES (null, ?)", Statement.RETURN_GENERATED_KEYS)); for (V entry : missing) { statement.setString(1, fromType(entry)); statement.execute(); ResultSet generatedKeys = statement.getGeneratedKeys(); if (generatedKeys.next()) { cache.put(toKey(entry), generatedKeys.getInt(1)); } else { log.warning("Could not get the database ID for entry " + entry); } } } finally { closer.closeQuietly(); } } } }
From source file:com.olacabs.fabric.processors.kafkawriter.KafkaWriter.java
@Override protected EventSet consume(ProcessingContext processingContext, EventSet eventSet) throws ProcessingException { final List<KeyedMessage<String, String>> messages = Lists.newArrayList(); try {/* ww w . j a v a2s . com*/ eventSet.getEvents().forEach(event -> { KeyedMessage<String, String> convertedMessage = null; try { convertedMessage = convertEvent(event); } catch (ProcessingException e) { LOGGER.error("Error converting byte stream to event: ", e); throw new RuntimeException(e); } if (null != convertedMessage) { messages.add(convertedMessage); } }); } catch (final Exception e) { LOGGER.error("Error converting byte stream to event: ", e); throw new ProcessingException(e); } Lists.partition(messages, ingestionPoolSize).forEach(messageList -> getProducer().send(messageList)); return eventSet; }
From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java
@Override public void softDeleteDataMetadatas(final String user, @Nonnull final List<String> uris) { try {/*from ww w.j a v a2s .c o m*/ final Connection conn = poolingDataSource.getConnection(); try { final List<List<String>> subLists = Lists.partition(uris, config.getUserMetadataMaxInClauseItems()); for (List<String> subUris : subLists) { _softDeleteDataMetadatas(conn, user, subUris); } conn.commit(); } catch (SQLException e) { conn.rollback(); throw e; } finally { conn.close(); } } catch (SQLException e) { log.error("Sql exception", e); throw new UserMetadataServiceException(String.format("Failed deleting the data metadata for %s", uris), e); } }
From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionInserter.java
private void insertCuboids() throws SQLException { Closer closer = Closer.create();/*from w ww .j a v a 2s . co m*/ try { PreparedStatement stmt = closer.register(conn.prepareStatement("INSERT INTO " + config.getTablePrefix() + "region_cuboid " + "(region_id, world_id, min_z, min_y, min_x, max_z, max_y, max_x ) " + "VALUES " + "(?, " + worldId + ", ?, ?, ?, ?, ?, ?)")); for (List<ProtectedCuboidRegion> partition : Lists.partition(cuboids, StatementBatch.MAX_BATCH_SIZE)) { for (ProtectedCuboidRegion region : partition) { BlockVector3 min = region.getMinimumPoint(); BlockVector3 max = region.getMaximumPoint(); stmt.setString(1, region.getId()); stmt.setInt(2, min.getBlockZ()); stmt.setInt(3, min.getBlockY()); stmt.setInt(4, min.getBlockX()); stmt.setInt(5, max.getBlockZ()); stmt.setInt(6, max.getBlockY()); stmt.setInt(7, max.getBlockX()); stmt.addBatch(); } stmt.executeBatch(); } } finally { closer.closeQuietly(); } }
From source file:com.auditbucket.engine.service.MediationFacade.java
public Integer createHeaders(final Company company, final Fortress fortress, final List<MetaInputBean> inputBeans) throws DatagioException { fortress.setCompany(company);//from w w w . j a va 2 s .c o m Long id = DateTime.now().getMillis(); StopWatch watch = new StopWatch(); watch.start(); logger.info("Starting Batch [{}] - size [{}]", id, inputBeans.size()); boolean newMode = true; if (newMode) { // Tune to balance against concurrency and batch transaction insert efficiency. List<List<MetaInputBean>> splitList = Lists.partition(inputBeans, 20); for (List<MetaInputBean> metaInputBeans : splitList) { class DLCommand implements Command { Iterable<MetaInputBean> headers = null; DLCommand(List<MetaInputBean> processList) { this.headers = new CopyOnWriteArrayList<>(processList); } @Override public Command execute() throws DatagioException { //fortressService.registerFortress(company, new FortressInputBean(headers.iterator().next().getFortress()), true); Iterable<TrackResultBean> resultBeans = trackService.createHeaders(headers, company, fortress); processLogs(company, resultBeans); return this; } } DeadlockRetry.execute(new DLCommand(metaInputBeans), "creating headers", 20); } } else { logger.info("Processing in slow Transaction mode"); for (MetaInputBean inputBean : inputBeans) { createHeader(company, fortress, inputBean); } } watch.stop(); logger.info("Completed Batch [{}] - secs= {}, RPS={}", id, f.format(watch.getTotalTimeSeconds()), f.format(inputBeans.size() / watch.getTotalTimeSeconds())); return inputBeans.size(); }
From source file:com.opengamma.livedata.client.ActiveMQLiveDataClient.java
@Override protected Map<String, Runnable> startReceivingTicks(List<String> specs, Session session, JmsByteArrayMessageDispatcher jmsDispatcher) { Map<String, Runnable> ret = new HashMap<String, Runnable>(); if (specs.isEmpty()) { return ret; }/*from w w w. j a v a 2s .c om*/ for (String spec : specs) { ConsumerRecord record = _messageConsumersBySpec.get(spec); if (record != null) { //NOTE: could be on the wrong session, but we don't touch it record.getReceiving().add(spec); ret.put(spec, getCloseAction(spec, record)); } } SetView<String> remaining = Sets.difference(new HashSet<String>(specs), ret.keySet()); List<String> remainingList = new ArrayList<String>(remaining); for (List<String> partition : Lists.partition(remainingList, 100)) { String topicName = getCompositeTopicName(partition); try { Topic topic = session.createTopic(topicName); final MessageConsumer messageConsumer = session.createConsumer(topic); messageConsumer.setMessageListener(jmsDispatcher); ConsumerRecord record = new ConsumerRecord(messageConsumer, partition); for (String tickDistributionSpecification : partition) { _messageConsumersBySpec.put(tickDistributionSpecification, record); ret.put(tickDistributionSpecification, getCloseAction(tickDistributionSpecification, record)); } } catch (JMSException e) { throw new OpenGammaRuntimeException("Failed to create subscription to JMS topics " + partition, e); } } return ret; }
From source file:com.auditbucket.registration.service.TagService.java
/** * * @param company who owns this collection * @param tagInputs tags to establish/*from w ww . j av a2 s . c o m*/ * @return tagInputs that failed processing */ @Async public Future<Collection<TagInputBean>> makeTags(final Company company, final List<TagInputBean> tagInputs) { Collection<TagInputBean> failedInput = new ArrayList<>(); class DLCommand implements Command { Collection<TagInputBean> failedInput; private final List<TagInputBean> inputs; public DLCommand(List<TagInputBean> tagInputBeans) { this.inputs = tagInputBeans; } @Override public Command execute() { // Creates the relationships failedInput = tagDao.save(company, inputs); return this; } } List<List<TagInputBean>> splitList = Lists.partition(tagInputs, 5); for (List<TagInputBean> tagInputBeans : splitList) { DLCommand c = new DLCommand(tagInputBeans); try { com.auditbucket.helper.DeadlockRetry.execute(c, "creating tags", 15); } catch (DatagioException e) { logger.error(" Tag errors detected"); } failedInput.addAll(c.failedInput); } return new AsyncResult<>(failedInput); }
From source file:de.bonn.limes.core.AbstractReposite.java
public TreeMap<String, ArrayList> getAbstracts(List<String> GeneList, Integer maxAbs, Integer perSec) throws InterruptedException { this.Glist = GeneList; this.noAbst = maxAbs; List<Integer> ids; try {//from www . j a va 2 s .c o m TreeMap<String, ArrayList> geneWidAbstract = new TreeMap();// holds all PubMedAbstract object for gene list with genes List<PubMedAbstract> abstracts; // holds all PubMedAbstract object for gene list for (String gene : Glist) { System.out.println(gene.length()); if (gene.length() != 0 && gene.length() > 2) { //System.out.println("this is the count for progress bar: "+count); abstracts = new ArrayList<>(); //System.out.println("Gene for abstract: " + gene); if (noAbst != null) { ids = new PubmedSearch().getPubMedIDs(gene, noAbst); } else { ids = new PubmedSearch().getPubMedIDs(gene); } progressbarCount++; //System.out.println("Size of fetched PMID list"+ids.size()); //ProgressBar.setValue(progressbarCount++); //ProgressBar.repaint(); //System.out.println("This should be the progress: "+ProgressBar.getValue()); //System.out.println("Are there no abstracts for gene: " + ids.isEmpty()); if (ids.get(0) != 0) { //System.out.println(ids); // if PMID list has less than 50 ids if (ids.size() < 50) { List<PubMedRecord> records = new PubMedFetcher().getPubMedRecordForIDs(ids); //System.out.println("Size of fetched PMID list"+records.size()); if (records == null) { //System.out.println("No abstract to show.."); } for (PubMedRecord record : records) { PubMedAbstract abs = new PubMedAbstract(); abs.setAbstractText(record.getAbstract()); abs.setPMID(record.getPubMedID()); abs.setTitle(record.getTitle()); abs.setYear(String.valueOf(record.getYear())); abstracts.add(abs); /* WORKING System.out.println("+++++++++++++++++++++++++++"+record.getTitle()); */ } } // if else { int countabst = 0; List<List<Integer>> parts = Lists.partition(ids, perSec); for (List<Integer> Slist : parts) { //System.out.println("Downloaded abstracts:"+Slist.size()); List<PubMedRecord> records = new PubMedFetcher().getPubMedRecordForIDs(Slist); //System.out.println("Size of fetched PMID list"+records.size()); //System.out.println(gene); if (!records.isEmpty()) { countabst++; //System.out.println("For "+gene +" Number of abstracts Fetched: " + countabst * 500); for (PubMedRecord record : records) { PubMedAbstract abs = new PubMedAbstract(); abs.setAbstractText(record.getAbstract()); abs.setPMID(record.getPubMedID()); abs.setTitle(record.getTitle()); abs.setYear(String.valueOf(record.getYear())); abstracts.add(abs); } } else { System.err.println("Empty list returned."); } } } } //System.out.println("Size of Abstracts: " + abstracts.size()); geneWidAbstract.put(gene, (ArrayList) abstracts); } } return geneWidAbstract; } catch (AxisFault ex) { Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex); return null; } catch (RemoteException ex) { Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex); return null; } }
From source file:com.facebook.buck.distributed.build_slave.RuleKeyDivergenceRunnerFactory.java
/** Creates DistBuildModeRunner to be used for rule key divergence checks */ public static DistBuildModeRunner createRunner(StampedeId stampedeId, BuildSlaveRunId buildSlaveRunId, Clock clock, DistBuildService distBuildService, DelegateAndGraphsInitializer initializer, RuleKeyConfiguration ruleKeyConfiguration, RuleKeyCacheScope<RuleKey> ruleKeyCacheScope, WeightedListeningExecutorService executorService, BuckEventBus eventBus, DistBuildState state, Cell rootCell, UnconfiguredBuildTargetFactory unconfiguredBuildTargetFactory) { return new AbstractDistBuildModeRunner() { @Override/* w w w . j a va 2 s .c o m*/ public ListenableFuture<?> getAsyncPrepFuture() { return Futures.immediateFuture(null); } @Override public ExitCode runAndReturnExitCode(HeartbeatService heartbeatService) throws IOException, InterruptedException { try (Closer closer = Closer.create()) { closer.register(heartbeatService.addCallback("ReportCoordinatorAlive", createHeartbeatCallback(stampedeId, distBuildService))); try { List<Pair<BuildRule, RuleKey>> rulesAndKeys = calculateDefaultRuleKeys( getTopLevelTargetsToBuild(state, rootCell, unconfiguredBuildTargetFactory), initializer, ruleKeyConfiguration, ruleKeyCacheScope, executorService, eventBus); List<BuildSlaveEvent> ruleKeyCalculatedEvents = rulesAndKeys.stream().map(rk -> { RuleKeyCalculatedEvent event = new RuleKeyCalculatedEvent(); event.setBuildTarget(rk.getFirst().getFullyQualifiedName()); event.setDefaultRuleKey(rk.getSecond().getHashCode().toString()); BuildSlaveEvent buildSlaveEvent = new BuildSlaveEvent(); buildSlaveEvent.setEventType(BuildSlaveEventType.RULE_KEY_CALCULATED_EVENT); buildSlaveEvent.setRuleKeyCalculatedEvent(event); return buildSlaveEvent; }).collect(Collectors.toList()); List<List<BuildSlaveEvent>> ruleKeyCalculationBatches = Lists .partition(ruleKeyCalculatedEvents, RULE_CALCULATION_EVENTS_PER_FRONTEND_REQUEST); for (List<BuildSlaveEvent> ruleKeyCalculateBatch : ruleKeyCalculationBatches) { distBuildService.uploadBuildSlaveEvents(stampedeId, buildSlaveRunId, ruleKeyCalculateBatch); } // Ensure client doesn't wait for timeout before completing distBuildService.sendAllBuildRulesPublishedEvent(stampedeId, buildSlaveRunId, clock.currentTimeMillis()); distBuildService.setFinalBuildStatus(stampedeId, BuildStatus.FINISHED_SUCCESSFULLY, "Rule key divergence check complete"); return ExitCode.SUCCESS; } catch (ExecutionException | IOException e) { LOG.error(e, "Failed to calculate rule keys"); distBuildService.setFinalBuildStatus(stampedeId, BuildStatus.FAILED, "Could not compute or publish rule keys"); return ExitCode.FATAL_GENERIC; } } } }; }