List of usage examples for com.google.common.collect Lists partition
public static <T> List<List<T>> partition(List<T> list, int size)
From source file:com.pearson.eidetic.driver.threads.MonitorVolumeSyncValidator.java
@Override public void run() { /**/*from w w w . j a va 2 s . c om*/ * Every runTimeInterval_ Mins it will run to see if it needs to take a * snapshot of something */ ConcurrentHashMap<Region, ArrayList<Volume>> localVolumeSyncValidate; while (true) { try { try { localVolumeSyncValidate = awsAccount_.getVolumeSyncValidate_Copy(); } catch (Exception e) { logger.error("Error=\"awsAccount pull failure.\" " + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); Threads.sleepSeconds(5); continue; } HashMap<Region, Integer> secsSlept = new HashMap<>(); HashMap<Region, Boolean> allDead = new HashMap<>(); HashMap<Region, Integer> timeLeftOver = new HashMap<>(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeSyncValidate.entrySet()) { Region region = entry.getKey(); if (localVolumeSyncValidate.get(region).isEmpty()) { continue; } List<List<Volume>> listOfLists = Lists.partition(localVolumeSyncValidate.get(region), localVolumeSyncValidate.get(region).size()); localVolumeSyncValidateList_.put(region, listsToArrayLists(listOfLists)); ArrayList<SnapshotVolumeSyncValidator> threads = new ArrayList<>(); for (ArrayList<Volume> vols : localVolumeSyncValidateList_.get(region)) { threads.add(new SnapshotVolumeSyncValidator(awsAccount_.getAwsAccessKeyId(), awsAccount_.getAwsSecretKey(), awsAccount_.getUniqueAwsAccountIdentifier(), awsAccount_.getMaxApiRequestsPerSecond(), ApplicationConfiguration.getAwsCallRetryAttempts(), region, vols)); } //Initializing content secsSlept.put(region, 0); //Initializing content allDead.put(region, false); //Initializing content timeLeftOver.put(region, 0); EideticSubThreads_.put(region, threads); } //AND THEY'RE OFF for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeSyncValidate.entrySet()) { Region region = entry.getKey(); if (localVolumeSyncValidate.get(region).isEmpty()) { continue; } if (localVolumeSyncValidateList_.get(region) == null || localVolumeSyncValidateList_.get(region).isEmpty()) { continue; } Threads.threadExecutorFixedPool(EideticSubThreads_.get(region), 1, runTimeInterval_, TimeUnit.SECONDS); } //LETS SEE IF THEY'RE DEAD Boolean ejection = false; Boolean theyreDead; while (true) { for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeSyncValidate.entrySet()) { Region region = entry.getKey(); if (localVolumeSyncValidate.get(region).isEmpty()) { continue; } if (areAllThreadsDead(EideticSubThreads_.get(region))) { allDead.put(region, true); } else { secsSlept.replace(region, secsSlept.get(region), secsSlept.get(region) + 1); if (secsSlept.get(region) > runTimeInterval_) { ejection = true; break; } } } //I dont like this theyreDead = true; for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeSyncValidate.entrySet()) { Region region = entry.getKey(); if (localVolumeSyncValidate.get(region).isEmpty()) { continue; } //If any of them have false if (!allDead.get(region)) { theyreDead = false; } } if (ejection || theyreDead) { break; } Threads.sleepSeconds(1); } //See if decrease splitfactor for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeSyncValidate.entrySet()) { Region region = entry.getKey(); if (localVolumeSyncValidate.get(region).isEmpty()) { continue; } //Left over sleep time int timeRemaining = runTimeInterval_ - secsSlept.get(region); if (timeRemaining > 0) { timeLeftOver.put(region, timeRemaining); } } //Sleep our remaining time Map.Entry<Region, Integer> maxEntry = null; for (Map.Entry<Region, Integer> entry : timeLeftOver.entrySet()) { if (maxEntry == null || entry.getValue().compareTo(maxEntry.getValue()) > 0) { Threads.sleepSeconds(10); maxEntry = entry; } } if (maxEntry != null && maxEntry.getValue() > 0) { Threads.sleepSeconds(maxEntry.getValue()); } else { Threads.sleepSeconds(runTimeInterval_); } localVolumeSyncValidateList_.clear(); EideticSubThreads_.clear(); } catch (Exception e) { logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Error=\"MonitorSnapshotVolumeNoTimeFailure\", stacktrace=\"" + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\""); Threads.sleepSeconds(10); } } /* for (Region region : EideticSubThreads_.keySet()) { ArrayList<EideticSubThread> EideticSubThreads = EideticSubThreads_.get(region); EideticSubThreadMethods.areAllThreadsDead(EideticSubThreads); } */ }
From source file:com.sk89q.worldguard.protection.managers.storage.sql.RegionInserter.java
private void insertRegionTypes() throws SQLException { Closer closer = Closer.create();//w ww .j a v a 2 s . c o m try { PreparedStatement stmt = closer.register(conn.prepareStatement("INSERT INTO " + config.getTablePrefix() + "region " + "(id, world_id, type, priority, parent) " + "VALUES " + "(?, ?, ?, ?, NULL)")); for (List<ProtectedRegion> partition : Lists.partition(all, StatementBatch.MAX_BATCH_SIZE)) { for (ProtectedRegion region : partition) { stmt.setString(1, region.getId()); stmt.setInt(2, worldId); stmt.setString(3, SQLRegionDatabase.getRegionTypeName(region)); stmt.setInt(4, region.getPriority()); stmt.addBatch(); } stmt.executeBatch(); } } finally { closer.closeQuietly(); } }
From source file:com.netflix.spinnaker.clouddriver.elasticsearch.ops.BulkUpsertEntityTagsAtomicOperation.java
public BulkUpsertEntityTagsAtomicOperationResult operate(List priorOutputs) { BulkUpsertEntityTagsAtomicOperationResult result = new BulkUpsertEntityTagsAtomicOperationResult(); List<EntityTags> entityTags = bulkUpsertEntityTagsDescription.entityTags; addTagIdsIfMissing(entityTags, result); mergeTags(bulkUpsertEntityTagsDescription); Date now = new Date(); Lists.partition(entityTags, 50).forEach(tags -> { getTask().updateStatus(BASE_PHASE, "Retrieving current entity tags"); Map<String, EntityTags> existingTags = retrieveExistingTags(tags); getTask().updateStatus(BASE_PHASE, "Merging existing tags and metadata"); tags.forEach(tag -> mergeExistingTagsAndMetadata(now, existingTags.get(tag.getId()), tag, bulkUpsertEntityTagsDescription.isPartial)); getTask().updateStatus(BASE_PHASE, "Performing batch update to durable tagging service"); Map<String, EntityTags> durableTags = front50Service.batchUpdate(new ArrayList<>(tags)).stream() .collect(Collectors.toMap(EntityTags::getId, Function.identity())); getTask().updateStatus(BASE_PHASE, "Pushing tags to Elastic Search"); updateMetadataFromDurableTagsAndIndex(tags, durableTags, result); result.upserted.addAll(tags);/*w w w . ja va2s . com*/ }); return result; }
From source file:org.sonar.core.issue.db.IssueChangeDao.java
List<IssueChangeDto> selectByIssuesAndType(SqlSession session, Collection<String> issueKeys, String changeType) {/*from w ww. j av a2 s.c om*/ if (issueKeys.isEmpty()) { return Collections.emptyList(); } IssueChangeMapper mapper = session.getMapper(IssueChangeMapper.class); List<IssueChangeDto> dtosList = newArrayList(); List<List<String>> keysPartition = Lists.partition(newArrayList(issueKeys), 1000); for (List<String> partition : keysPartition) { List<IssueChangeDto> dtos = mapper.selectByIssuesAndType(partition, changeType); dtosList.addAll(dtos); } return dtosList; }
From source file:com.github.joshelser.YcsbBatchScanner.java
private void _run() throws Exception { log.info("Computing ranges"); // numRanges/*from w w w .jav a 2 s .c o m*/ List<Range> ranges = computeRanges(); log.info("All ranges calculated: {} ranges found", ranges.size()); for (int i = 0; i < numIterations; i++) { List<List<Range>> partitionedRanges = Lists.partition(ranges, numRangesPerPartition); log.info("Executing {} range partitions using a pool of {} threads", partitionedRanges.size(), threadPoolSize); List<Future<Integer>> results = new ArrayList<>(); Stopwatch sw = new Stopwatch(); sw.start(); for (List<Range> partition : partitionedRanges) { // results.add(this.svc.submit(new BatchScannerQueryTask(conn, partition))); results.add(this.svc.submit(new ScannerQueryTask(conn, partition))); } for (Future<Integer> result : results) { log.debug("Found {} results", result.get()); } sw.stop(); log.info("Queries executed in {} ms", sw.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:pt.ist.fenixedu.cmscomponents.ui.spring.UnitSiteManagementController.java
@RequestMapping(value = "manage/{page}", method = RequestMethod.GET) public String list(@PathVariable(value = "page") int page, Model model) { List<List<Site>> pages = Lists.partition(getSites(), ITEMS_PER_PAGE); int currentPage = normalize(page, pages); model.addAttribute("numberOfPages", pages.size()); model.addAttribute("currentPage", currentPage); model.addAttribute("sites", pages.isEmpty() ? Collections.emptyList() : pages.get(currentPage)); model.addAttribute("isManager", Group.managers().isMember(Authenticate.getUser())); return "fenix-learning/istSites"; }
From source file:com.pearson.eidetic.driver.threads.MonitorSnapshotVolumeNoTime.java
@Override public void run() { /**// w w w . ja va 2 s .c o m * Every runTimeInterval_ Mins it will run to see if it needs to take a * snapshot of something */ ConcurrentHashMap<Region, ArrayList<Volume>> localVolumeNoTime; localVolumeNoTime = awsAccount_.getVolumeNoTime_Copy(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); splitFactor_.put(region, 1); } while (true) { try { try { localVolumeNoTime = awsAccount_.getVolumeNoTime_Copy(); } catch (Exception e) { logger.error("Error=\"awsAccount pull failure.\" " + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); Threads.sleepSeconds(5); continue; } HashMap<Region, Integer> secsSlept = new HashMap<>(); HashMap<Region, Boolean> allDead = new HashMap<>(); HashMap<Region, Integer> timeLeftOver = new HashMap<>(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); if (localVolumeNoTime.get(region).isEmpty()) { continue; } List<List<Volume>> listOfLists = Lists.partition(localVolumeNoTime.get(region), splitFactor_.get(region)); localVolumeNoTimeList_.put(region, listsToArrayLists(listOfLists)); //List<List<Volume>> lolz = Lists.partition(localVolumeNoTime.get(region), splitFactor_.get(region)); //localVolumeNoTimeList_.put(region, splitArrayList(localVolumeNoTime.get(region), splitFactor_.get(region))); ArrayList<SnapshotVolumeNoTime> threads = new ArrayList<>(); for (ArrayList<Volume> vols : localVolumeNoTimeList_.get(region)) { threads.add(new SnapshotVolumeNoTime(awsAccount_.getAwsAccessKeyId(), awsAccount_.getAwsSecretKey(), awsAccount_.getUniqueAwsAccountIdentifier(), awsAccount_.getMaxApiRequestsPerSecond(), ApplicationConfiguration.getAwsCallRetryAttempts(), region, vols)); } //Initializing content secsSlept.put(region, 0); //Initializing content allDead.put(region, false); //Initializing content timeLeftOver.put(region, 0); EideticSubThreads_.put(region, threads); } //AND THEY'RE OFF for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); if (localVolumeNoTime.get(region).isEmpty()) { continue; } if (localVolumeNoTimeList_.get(region) == null || localVolumeNoTimeList_.get(region).isEmpty()) { continue; } Threads.threadExecutorFixedPool(EideticSubThreads_.get(region), splitFactor_.get(region), runTimeInterval_, TimeUnit.SECONDS); } //LETS SEE IF THEY'RE DEAD Boolean ejection = false; Boolean theyreDead; while (true) { for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); if (localVolumeNoTime.get(region).isEmpty()) { continue; } if (areAllThreadsDead(EideticSubThreads_.get(region))) { allDead.put(region, true); } else { secsSlept.replace(region, secsSlept.get(region), secsSlept.get(region) + 1); if (secsSlept.get(region) > runTimeInterval_) { splitFactor_.replace(region, splitFactor_.get(region), splitFactor_.get(region) + 1); logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Event=\"increasing_splitFactor\", Monitor=\"SnapshotVolumeNoTime\", splitFactor=\"" + Integer.toString(splitFactor_.get(region)) + "\", VolumeNoTimeSize=\"" + Integer.toString(localVolumeNoTime.get(region).size()) + "\""); ejection = true; break; } } } //I dont like this theyreDead = true; for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); if (localVolumeNoTime.get(region).isEmpty()) { continue; } //If any of them have false if (!allDead.get(region)) { theyreDead = false; } } if (ejection || theyreDead) { break; } Threads.sleepSeconds(1); } //See if decrease splitfactor for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeNoTime.entrySet()) { Region region = entry.getKey(); if (localVolumeNoTime.get(region).isEmpty()) { continue; } //Left over sleep time int timeRemaining = runTimeInterval_ - secsSlept.get(region); if (timeRemaining > 0) { timeLeftOver.put(region, timeRemaining); } if ((splitFactor_.get(region) > 1) & (timeRemaining > 60)) { splitFactor_.replace(region, splitFactor_.get(region), splitFactor_.get(region) - 1); logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Event=\"decreasing_splitFactor\", Monitor=\"SnapshotVolumeNoTime\", splitFactor=\"" + Integer.toString(splitFactor_.get(region)) + "\", VolumeNoTimeSize=\"" + Integer.toString(localVolumeNoTime.get(region).size()) + "\""); } } //Sleep our remaining time Map.Entry<Region, Integer> maxEntry = null; for (Map.Entry<Region, Integer> entry : timeLeftOver.entrySet()) { if (maxEntry == null || entry.getValue().compareTo(maxEntry.getValue()) > 0) { Threads.sleepSeconds(10); maxEntry = entry; } } if (maxEntry != null && maxEntry.getValue() > 0) { Threads.sleepSeconds(maxEntry.getValue()); } else { Threads.sleepSeconds(runTimeInterval_); } localVolumeNoTimeList_.clear(); EideticSubThreads_.clear(); } catch (Exception e) { logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Error=\"MonitorSnapshotVolumeNoTimeFailure\", stacktrace=\"" + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\""); Threads.sleepSeconds(10); } } /* for (Region region : EideticSubThreads_.keySet()) { ArrayList<EideticSubThread> EideticSubThreads = EideticSubThreads_.get(region); EideticSubThreadMethods.areAllThreadsDead(EideticSubThreads); } */ }
From source file:me.lucko.luckperms.common.storage.backing.utils.LegacySchemaMigration.java
@Override public void run() { backing.getPlugin().getLog().info("Collecting UUID data from the old tables."); Map<UUID, String> uuidData = new HashMap<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT uuid, name FROM lp_uuid")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { try { uuidData.put(UUID.fromString(rs.getString("uuid")), rs.getString("name")); } catch (IllegalArgumentException e) { e.printStackTrace(); }//from www . j a v a 2 s. com } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog() .info("Found " + uuidData.size() + " uuid data entries. Copying to new tables..."); List<Map.Entry<UUID, String>> uuidEntries = uuidData.entrySet().stream().collect(Collectors.toList()); List<List<Map.Entry<UUID, String>>> partitionedUuidEntries = Lists.partition(uuidEntries, 100); for (List<Map.Entry<UUID, String>> l : partitionedUuidEntries) { try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("INSERT INTO {prefix}players VALUES(?, ?, ?)"))) { for (Map.Entry<UUID, String> e : l) { ps.setString(1, e.getKey().toString()); ps.setString(2, e.getValue().toLowerCase()); ps.setString(3, "default"); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException e) { e.printStackTrace(); } } uuidData.clear(); uuidEntries.clear(); partitionedUuidEntries.clear(); backing.getPlugin().getLog().info("Migrated all uuid data."); backing.getPlugin().getLog().info("Starting user data migration."); Set<UUID> users = new HashSet<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT uuid FROM lp_users")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { try { users.add(UUID.fromString(rs.getString("uuid"))); } catch (IllegalArgumentException e) { e.printStackTrace(); } } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog().info("Found " + users.size() + " user data entries. Copying to new tables..."); AtomicInteger userCounter = new AtomicInteger(0); for (UUID uuid : users) { String permsJson = null; String primaryGroup = null; try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement("SELECT primary_group, perms FROM lp_users WHERE uuid=?")) { ps.setString(1, uuid.toString()); try (ResultSet rs = ps.executeQuery()) { if (rs.next()) { permsJson = rs.getString("perms"); primaryGroup = rs.getString("primary_group"); } } } } catch (SQLException e) { e.printStackTrace(); } if (permsJson == null || primaryGroup == null) { new Throwable().printStackTrace(); continue; } Map<String, Boolean> convertedPerms = backing.getGson().fromJson(permsJson, NODE_MAP_TYPE); if (convertedPerms == null) { new Throwable().printStackTrace(); continue; } Set<NodeDataHolder> nodes = convertedPerms.entrySet().stream() .map(e -> NodeFactory.fromSerialisedNode(e.getKey(), e.getValue())) .map(NodeDataHolder::fromNode).collect(Collectors.toSet()); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement(backing.getPrefix().apply( "INSERT INTO {prefix}user_permissions(uuid, permission, value, server, world, expiry, contexts) VALUES(?, ?, ?, ?, ?, ?, ?)"))) { for (NodeDataHolder nd : nodes) { ps.setString(1, uuid.toString()); ps.setString(2, nd.getPermission()); ps.setBoolean(3, nd.isValue()); ps.setString(4, nd.getServer()); ps.setString(5, nd.getWorld()); ps.setLong(6, nd.getExpiry()); ps.setString(7, nd.serialiseContext()); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException e) { e.printStackTrace(); } if (!primaryGroup.equalsIgnoreCase("default")) { try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("UPDATE {prefix}players SET primary_group=? WHERE uuid=?"))) { ps.setString(1, primaryGroup); ps.setString(2, uuid.toString()); ps.execute(); } } catch (SQLException e) { e.printStackTrace(); } } int i = userCounter.incrementAndGet(); if (i % 100 == 0) { backing.getPlugin().getLog().info("Migrated " + i + " users so far..."); } } users.clear(); backing.getPlugin().getLog().info("Migrated all user data."); backing.getPlugin().getLog().info("Starting group data migration."); Map<String, String> groupData = new HashMap<>(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement("SELECT name, perms FROM lp_groups")) { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { groupData.put(rs.getString("name"), rs.getString("perms")); } } } } catch (SQLException e) { e.printStackTrace(); } backing.getPlugin().getLog() .info("Found " + groupData.size() + " group data entries. Copying to new tables..."); for (Map.Entry<String, String> e : groupData.entrySet()) { String name = e.getKey(); String permsJson = e.getValue(); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("INSERT INTO {prefix}groups VALUES(?)"))) { ps.setString(1, name); ps.execute(); } } catch (SQLException ex) { ex.printStackTrace(); } Map<String, Boolean> convertedPerms = backing.getGson().fromJson(permsJson, NODE_MAP_TYPE); if (convertedPerms == null) { new Throwable().printStackTrace(); continue; } Set<NodeDataHolder> nodes = convertedPerms.entrySet().stream() .map(ent -> NodeFactory.fromSerialisedNode(ent.getKey(), ent.getValue())) .map(NodeDataHolder::fromNode).collect(Collectors.toSet()); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c.prepareStatement(backing.getPrefix().apply( "INSERT INTO {prefix}group_permissions(name, permission, value, server, world, expiry, contexts) VALUES(?, ?, ?, ?, ?, ?, ?)"))) { for (NodeDataHolder nd : nodes) { ps.setString(1, name); ps.setString(2, nd.getPermission()); ps.setBoolean(3, nd.isValue()); ps.setString(4, nd.getServer()); ps.setString(5, nd.getWorld()); ps.setLong(6, nd.getExpiry()); ps.setString(7, nd.serialiseContext()); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException ex) { ex.printStackTrace(); } } groupData.clear(); backing.getPlugin().getLog().info("Migrated all group data."); backing.getPlugin().getLog().info("Renaming action and track tables."); try (Connection c = backing.getProvider().getConnection()) { try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("DROP TABLE {prefix}actions"))) { ps.execute(); } try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("ALTER TABLE lp_actions RENAME TO {prefix}actions"))) { ps.execute(); } try (PreparedStatement ps = c .prepareStatement(backing.getPrefix().apply("DROP TABLE {prefix}tracks"))) { ps.execute(); } try (PreparedStatement ps = c.prepareStatement( backing.getPrefix().apply("ALTER TABLE lp_tracks RENAME TO {prefix}tracks"))) { ps.execute(); } } catch (SQLException ex) { ex.printStackTrace(); } backing.getPlugin().getLog().info("Legacy schema migration complete."); }
From source file:org.repodriller.RepositoryMining.java
private void processRepos(SCMRepository repo) { log.info("Git repository in " + repo.getPath()); List<ChangeSet> allCs = range.get(repo.getScm()); if (!reverseOrder) Collections.reverse(allCs); log.info("Total of commits: " + allCs.size()); log.info("Starting threads: " + threads); ExecutorService exec = Executors.newFixedThreadPool(threads); List<List<ChangeSet>> partitions = Lists.partition(allCs, threads); for (List<ChangeSet> partition : partitions) { exec.submit(() -> {/*w w w . java 2 s.co m*/ for (ChangeSet cs : partition) { try { processChangeSet(repo, cs); } catch (OutOfMemoryError e) { System.err.println("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME"); e.printStackTrace(); System.err.println("goodbye :/"); log.fatal("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME", e); log.fatal("Goodbye! ;/"); System.exit(-1); } catch (Throwable t) { log.error(t); } } }); } try { exec.shutdown(); exec.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException e) { log.error("error waiting for threads to terminate in " + repo.getLastDir(), e); } }
From source file:com.pearson.eidetic.driver.threads.MonitorCopySnapshot.java
@Override public void run() { /**/* w w w . ja v a2 s . co m*/ * Every runTimeInterval_ Mins it will run to see if it needs to take a * snapshot of something */ ConcurrentHashMap<Region, ArrayList<Volume>> localCopyVolumeSnapshots; localCopyVolumeSnapshots = awsAccount_.getCopyVolumeSnapshots_Copy(); for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); splitFactor_.put(region, 1); } while (true) { try { try { localCopyVolumeSnapshots = awsAccount_.getCopyVolumeSnapshots_Copy(); } catch (Exception e) { logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Error=\"awsAccount pull failure.\" " + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e)); Threads.sleepSeconds(5); continue; } HashMap<Region, Integer> secsSlept = new HashMap<>(); HashMap<Region, Boolean> allDead = new HashMap<>(); for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); if (localCopyVolumeSnapshots.get(region).isEmpty()) { continue; } List<List<Volume>> listOfLists = Lists.partition(localCopyVolumeSnapshots.get(region), splitFactor_.get(region)); localCopyVolumeSnapshotsList_.put(region, listsToArrayLists(listOfLists)); ArrayList<CopySnapshot> threads = new ArrayList<>(); for (ArrayList<Volume> vols : localCopyVolumeSnapshotsList_.get(region)) { threads.add(new CopySnapshot(awsAccount_.getAwsAccessKeyId(), awsAccount_.getAwsSecretKey(), awsAccount_.getUniqueAwsAccountIdentifier(), awsAccount_.getMaxApiRequestsPerSecond(), ApplicationConfiguration.getAwsCallRetryAttempts(), region, vols)); } //Initializing content secsSlept.put(region, 0); //Initializing content allDead.put(region, false); EideticSubThreads_.put(region, threads); } //AND THEY'RE OFF for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); if (localCopyVolumeSnapshots.get(region).isEmpty()) { continue; } if (localCopyVolumeSnapshotsList_.get(region) == null || localCopyVolumeSnapshotsList_.get(region).isEmpty()) { continue; } Threads.threadExecutorFixedPool(EideticSubThreads_.get(region), splitFactor_.get(region), runTimeInterval_, TimeUnit.SECONDS); } Boolean ejection = false; Boolean theyreDead; while (true) { for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); if (localCopyVolumeSnapshots.get(region).isEmpty()) { continue; } if (areAllThreadsDead(EideticSubThreads_.get(region))) { allDead.put(region, true); } else { secsSlept.replace(region, secsSlept.get(region), secsSlept.get(region) + 1); if (secsSlept.get(region) > runTimeInterval_) { splitFactor_.replace(region, splitFactor_.get(region), splitFactor_.get(region) + 1); logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Event=\"increasing_splitFactor\", Monitor=\"SnapshotVolumeNoTime\", splitFactor=\"" + Integer.toString(splitFactor_.get(region)) + "\", VolumeNoTimeSize=\"" + Integer.toString(localCopyVolumeSnapshots.get(region).size()) + "\""); ejection = true; break; } } } theyreDead = true; for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); if (localCopyVolumeSnapshots.get(region).isEmpty()) { continue; } //If any of them have false if (!allDead.get(region)) { theyreDead = false; } } if (ejection || theyreDead) { break; } Threads.sleepSeconds(1); } //See if decrease splitfactor for (Map.Entry<Region, ArrayList<Volume>> entry : localCopyVolumeSnapshots.entrySet()) { Region region = entry.getKey(); if (localCopyVolumeSnapshots.get(region).isEmpty()) { continue; } //Left over sleep time int timeRemaining = runTimeInterval_ - secsSlept.get(region); if ((splitFactor_.get(region) > 1) & (timeRemaining > 60)) { splitFactor_.replace(region, splitFactor_.get(region), splitFactor_.get(region) - 1); logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Event=\"decreasing_splitFactor\", Monitor=\"CopySnapshot\", splitFactor=\"" + Integer.toString(splitFactor_.get(region)) + "\", CopyVolumeSnapshotsSize=\"" + Integer.toString(localCopyVolumeSnapshots.get(region).size()) + "\""); } } localCopyVolumeSnapshotsList_.clear(); EideticSubThreads_.clear(); Threads.sleepMinutes(10); } catch (Exception e) { logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Error=\"MonitorCopySnapshotFailure\", stacktrace=\"" + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\""); Threads.sleepSeconds(10); } } /* for (Region region : EideticSubThreads_.keySet()) { ArrayList<EideticSubThread> EideticSubThreads = EideticSubThreads_.get(region); EideticSubThreadMethods.areAllThreadsDead(EideticSubThreads); } */ }