List of usage examples for com.google.common.collect Iterables size
public static int size(Iterable<?> iterable)
From source file:org.apache.cassandra.streaming.StreamOut.java
private static List<PendingFile> createPendingFiles(Iterable<SSTableReader> sstables, Collection<Range<Token>> ranges, OperationType type) { List<PendingFile> pending = new ArrayList<PendingFile>(); for (SSTableReader sstable : sstables) { Descriptor desc = sstable.descriptor; List<Pair<Long, Long>> sections = sstable.getPositionsForRanges(ranges); if (sections.isEmpty()) { // A reference was acquired on the sstable and we won't stream it sstable.releaseReference();//from w w w .ja v a 2 s . c o m continue; } pending.add(new PendingFile(sstable, desc, SSTable.COMPONENT_DATA, sections, type, sstable.estimatedKeysForRanges(ranges))); } logger.info("Stream context metadata {}, {} sstables.", pending, Iterables.size(sstables)); return pending; }
From source file:com.vmware.appfactory.notification.controller.NotificationApiController.java
/** * Get a consolidated status of all conversison's progress and a list of * alert actions.// ww w. ja v a 2 s. c o m * * @return */ @RequestMapping(value = "/notify/alertAndProgress", method = RequestMethod.GET) public @ResponseBody CaptureTaskSummary getCaptureAndAlertSummary() { long progressTotal = 0; int taskCount = 0; Iterable<TaskState> taskList = _conversionsQueue.getTasks(MetaStatusPredicate.NOT_FINISHED); Iterable<TaskState> captureStates = Iterables.filter(taskList, Predicates.instanceOf(AbstractCaptureState.class)); for (TaskState state : captureStates) { progressTotal += state.getProgress(); taskCount++; } Iterable<TaskState> runningTasks = Iterables.filter(taskList, Predicates.and(MetaStatusPredicate.RUNNING, Predicates.instanceOf(AbstractCaptureState.class))); // Compute the average progress if (taskCount != 0) { progressTotal /= taskCount; } // Create the dto and set the computed values and queued up capture tasks CaptureTaskSummary summary = new CaptureTaskSummary(Iterables.size(runningTasks), (int) progressTotal, getWaitingCaptureTaskCount()); // Set the number of failed and wait-on-user alerts. List<ActionAlert> aaList = getWorkpoolAndImageFailedAlerts(); aaList.addAll(getUserWaitAndFailedAlerts()); aaList.addAll(getFeedFailedAlerts()); aaList.addAll(getLowDiskSpaceAlerts()); // Add the list of action alerts with the latest alert event appearing on top. Collections.sort(aaList); summary.setActionList(aaList); // Return the summary info for display. return summary; }
From source file:com.twitter.common.application.modules.LocalServiceRegistry.java
/** * Launches the local services if not already launched, otherwise this is a no-op. *///from www . j av a 2 s. co m void ensureLaunched() { if (primarySocket == null) { ImmutableList.Builder<LocalService> builder = ImmutableList.builder(); for (ServiceRunner runner : runnerProvider.get()) { try { LocalService service = runner.launch(); builder.add(service); shutdownRegistry.addAction(service.shutdownCommand); } catch (LaunchException e) { throw new IllegalStateException("Failed to launch " + runner, e); } } List<LocalService> localServices = builder.build(); Iterable<LocalService> primaries = Iterables.filter(localServices, IS_PRIMARY); switch (Iterables.size(primaries)) { case 0: primarySocket = Optional.absent(); break; case 1: primarySocket = Optional.of(SERVICE_TO_SOCKET.apply(Iterables.getOnlyElement(primaries))); break; default: throw new IllegalArgumentException("More than one primary local service: " + primaries); } Iterable<LocalService> auxSinglyNamed = Iterables.concat(FluentIterable.from(localServices) .filter(Predicates.not(IS_PRIMARY)).transform(AUX_NAME_BREAKOUT)); Map<String, LocalService> byName; try { byName = Maps.uniqueIndex(auxSinglyNamed, GET_NAME); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Auxiliary services with identical names.", e); } auxiliarySockets = ImmutableMap.copyOf(Maps.transformValues(byName, SERVICE_TO_SOCKET)); } }
From source file:com.medvision360.medrecord.basex.BaseXLocatableStore.java
@Override public Iterable<HierObjectID> list(EHR EHR, String rmEntity) throws IOException, NotFoundException { checkNotNull(EHR, "EHR cannot be null"); checkNotNull(rmEntity, "rmEntity cannot be null"); String path = fullPath(EHR) + "/" + rmEntity; Iterable<HierObjectID> result = list(path); if (Iterables.size(result) == 0) { // for throwing NotFoundException if the EHR does not exist list(EHR);//from ww w. j a va 2 s . co m } return result; }
From source file:org.movsim.simulator.vehicles.lanechange.OvertakingViaPeer.java
private static boolean peerSegmentSuitedForOvertaking(RoadSegment peer) { if (Iterables.size(peer.trafficLights()) > 0) { return false; }/*from w ww . java2 s .co m*/ if (peer.getSizeSourceRoadSegments() > 1) { return false; } return true; }
From source file:com.proofpoint.event.monitor.Monitor.java
public void processEvents(Iterable<Event> events) { int count = Iterables.size(Iterables.filter(events, eventPredicate)); counterStat.update(count); }
From source file:org.hbs.neo4j.importers.CsvImporter.java
private Iterable<String> getAndValidateHeader(String headerString, int minSize, String... keys) { boolean isHeaderValid = true; Iterable<String> header = Splitter.on(SEPARATOR).trimResults().split(headerString); if (Iterables.size(header) < minSize) { isHeaderValid = false;//from w w w. jav a 2 s.co m } else { for (int i = 0; i < keys.length; i++) { if (keys[i].compareTo(Iterables.get(header, i)) != 0) { isHeaderValid = false; } } } if (!isHeaderValid) { StringBuilder exMsg = new StringBuilder("Invalid Header for CSV file."); exMsg.append(" Header have to start with \""); for (int i = 0; i < keys.length; i++) { exMsg.append(keys[i]).append(SEPARATOR); } exMsg.append(" followed by a list proprerty names separated by tabs."); throw new IllegalArgumentException(exMsg.toString()); } return header; }
From source file:org.jclouds.aws.ec2.compute.extensions.AWSEC2SecurityGroupExtension.java
@Override public SecurityGroup addIpPermission(IpProtocol protocol, int startPort, int endPort, Multimap<String, String> tenantIdGroupNamePairs, Iterable<String> ipRanges, Iterable<String> groupIds, SecurityGroup group) {/*from w ww . j ava2 s .co m*/ String region = AWSUtils.getRegionFromLocationOrNull(group.getLocation()); String id = group.getProviderId(); IpPermission.Builder builder = IpPermission.builder(); builder.ipProtocol(protocol); builder.fromPort(startPort); builder.toPort(endPort); if (Iterables.size(ipRanges) > 0) { for (String cidr : ipRanges) { builder.cidrBlock(cidr); } } if (tenantIdGroupNamePairs.size() > 0) { for (String userId : tenantIdGroupNamePairs.keySet()) { for (String groupString : tenantIdGroupNamePairs.get(userId)) { String[] parts = AWSUtils.parseHandle(groupString); String groupId = parts[1]; builder.tenantIdGroupNamePair(userId, groupId); } } } client.getSecurityGroupApi().get().authorizeSecurityGroupIngressInRegion(region, id, builder.build()); return getSecurityGroupById(group.getId()); }
From source file:org.apache.commons.math3.ml.clustering.Try_RDBSCANClusterer.java
/** * Recursively Expands the cluster to include density-reachable items. * * @param cluster Cluster to expand/* w w w .jav a 2 s .c om*/ * @param point Point to add to cluster * @param neighbors List of neighbors * @param points the data set * @param visited the set of already visited points * @return the expanded cluster */ private List<T> expand(final T point) { List<T> cluster = new ArrayList<T>(); final Iterable<T> neighbors = getNeighbors(point, eps); if (Iterables.size(neighbors) >= minPts) { // BEGIN expansion cluster.add(point); visited.put(point, PointStatus.PART_OF_CLUSTER); Iterable<T> seeds = neighbors; //int index = 0; while (index < seeds.size()) { final T current = seeds.get(index); for (final T current : seeds) { // FOR EACH current IN seeds DO if (visited.get(current) == null) { // only check non-visited points cluster = merge(cluster, expand(current)); /* final List<T> currentNeighbors = getNeighbors(current, points); if (currentNeighbors.size() >= minPts) { seeds = merge(seeds, currentNeighbors); } */ } if (visited.get(current) != PointStatus.PART_OF_CLUSTER) { cluster.add(current); visited.put(current, PointStatus.PART_OF_CLUSTER); } } // END FOR // index++;} // END expandsion } else { visited.put(point, PointStatus.NOISE); } return cluster; }
From source file:org.apache.cassandra.db.SinglePartitionSliceCommand.java
protected UnfilteredRowIterator queryMemtableAndDiskInternal(ColumnFamilyStore cfs, boolean copyOnHeap) { Tracing.trace("Acquiring sstable references"); ColumnFamilyStore.ViewFragment view = cfs.select(View.select(SSTableSet.LIVE, partitionKey())); List<UnfilteredRowIterator> iterators = new ArrayList<>( Iterables.size(view.memtables) + view.sstables.size()); ClusteringIndexSliceFilter filter = clusteringIndexFilter(); try {/* w w w . j a v a 2 s .c o m*/ for (Memtable memtable : view.memtables) { Partition partition = memtable.getPartition(partitionKey()); if (partition == null) continue; @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator UnfilteredRowIterator iter = filter.getUnfilteredRowIterator(columnFilter(), partition); @SuppressWarnings("resource") // same as above UnfilteredRowIterator maybeCopied = copyOnHeap ? UnfilteredRowIterators.cloningIterator(iter, HeapAllocator.instance) : iter; oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, partition.stats().minLocalDeletionTime); iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(maybeCopied, nowInSec()) : maybeCopied); } /* * We can't eliminate full sstables based on the timestamp of what we've already read like * in collectTimeOrderedData, but we still want to eliminate sstable whose maxTimestamp < mostRecentTombstone * we've read. We still rely on the sstable ordering by maxTimestamp since if * maxTimestamp_s1 > maxTimestamp_s0, * we're guaranteed that s1 cannot have a row tombstone such that * timestamp(tombstone) > maxTimestamp_s0 * since we necessarily have * timestamp(tombstone) <= maxTimestamp_s1 * In other words, iterating in maxTimestamp order allow to do our mostRecentPartitionTombstone elimination * in one pass, and minimize the number of sstables for which we read a partition tombstone. */ int sstablesIterated = 0; Collections.sort(view.sstables, SSTableReader.maxTimestampComparator); List<SSTableReader> skippedSSTables = null; long mostRecentPartitionTombstone = Long.MIN_VALUE; long minTimestamp = Long.MAX_VALUE; int nonIntersectingSSTables = 0; for (SSTableReader sstable : view.sstables) { minTimestamp = Math.min(minTimestamp, sstable.getMinTimestamp()); // if we've already seen a partition tombstone with a timestamp greater // than the most recent update to this sstable, we can skip it if (sstable.getMaxTimestamp() < mostRecentPartitionTombstone) break; if (!filter.shouldInclude(sstable)) { nonIntersectingSSTables++; // sstable contains no tombstone if maxLocalDeletionTime == Integer.MAX_VALUE, so we can safely skip those entirely if (sstable.getSSTableMetadata().maxLocalDeletionTime != Integer.MAX_VALUE) { if (skippedSSTables == null) skippedSSTables = new ArrayList<>(); skippedSSTables.add(sstable); } continue; } sstable.incrementReadCount(); @SuppressWarnings("resource") // 'iter' is added to iterators which is closed on exception, or through the closing of the final merged iterator UnfilteredRowIterator iter = filter.filter( sstable.iterator(partitionKey(), columnFilter(), filter.isReversed(), isForThrift())); if (!sstable.isRepaired()) oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, sstable.getMinLocalDeletionTime()); iterators.add(isForThrift() ? ThriftResultsMerger.maybeWrap(iter, nowInSec()) : iter); mostRecentPartitionTombstone = Math.max(mostRecentPartitionTombstone, iter.partitionLevelDeletion().markedForDeleteAt()); sstablesIterated++; } int includedDueToTombstones = 0; // Check for partition tombstones in the skipped sstables if (skippedSSTables != null) { for (SSTableReader sstable : skippedSSTables) { if (sstable.getMaxTimestamp() <= minTimestamp) continue; sstable.incrementReadCount(); @SuppressWarnings("resource") // 'iter' is either closed right away, or added to iterators which is close on exception, or through the closing of the final merged iterator UnfilteredRowIterator iter = filter.filter( sstable.iterator(partitionKey(), columnFilter(), filter.isReversed(), isForThrift())); if (iter.partitionLevelDeletion().markedForDeleteAt() > minTimestamp) { iterators.add(iter); if (!sstable.isRepaired()) oldestUnrepairedTombstone = Math.min(oldestUnrepairedTombstone, sstable.getMinLocalDeletionTime()); includedDueToTombstones++; sstablesIterated++; } else { iter.close(); } } } if (Tracing.isTracing()) Tracing.trace("Skipped {}/{} non-slice-intersecting sstables, included {} due to tombstones", nonIntersectingSSTables, view.sstables.size(), includedDueToTombstones); cfs.metric.updateSSTableIterated(sstablesIterated); if (iterators.isEmpty()) return UnfilteredRowIterators.emptyIterator(cfs.metadata, partitionKey(), filter.isReversed()); Tracing.trace("Merging data from memtables and {} sstables", sstablesIterated); @SuppressWarnings("resource") // Closed through the closing of the result of that method. UnfilteredRowIterator merged = UnfilteredRowIterators.merge(iterators, nowInSec()); if (!merged.isEmpty()) { DecoratedKey key = merged.partitionKey(); cfs.metric.samplers.get(TableMetrics.Sampler.READS).addSample(key.getKey(), key.hashCode(), 1); } return merged; } catch (RuntimeException | Error e) { try { FBUtilities.closeAll(iterators); } catch (Exception suppressed) { e.addSuppressed(suppressed); } throw e; } }