List of usage examples for com.google.common.collect Lists newArrayListWithCapacity
@GwtCompatible(serializable = true) public static <E> ArrayList<E> newArrayListWithCapacity(int initialArraySize)
From source file:com.github.steveash.jg2p.align.PathXTable.java
public PathXTable(int xSize, int maxBestPath) { this.table = Lists.newArrayListWithCapacity(xSize); init(xSize, maxBestPath); }
From source file:org.apache.solr.handler.clustering.carrot2.EchoTokensClusteringAlgorithm.java
@Override public void process() throws ProcessingException { final PreprocessingContext preprocessingContext = preprocessing.preprocess(documents, "", LanguageCode.ENGLISH);//from w w w. j av a 2 s . c o m clusters = Lists.newArrayListWithCapacity(preprocessingContext.allTokens.image.length); for (char[] token : preprocessingContext.allTokens.image) { if (token != null) { clusters.add(new Cluster(new String(token))); } } }
From source file:com.continuuity.loom.layout.change.AddServiceChangeIterator.java
public AddServiceChangeIterator(ClusterLayout clusterLayout, String service) { this.service = service; // cluster services are needed in order to prune the constraints to only use ones that pertain to services // on the cluster Set<String> expandedClusterServices = Sets.newHashSet(service); for (NodeLayout nodeLayout : clusterLayout.getLayout().elementSet()) { expandedClusterServices.addAll(nodeLayout.getServiceNames()); }/* w ww.ja v a2 s . c o m*/ // first figure out which node layouts can add this service this.expandableNodeLayouts = Lists.newArrayListWithCapacity(clusterLayout.getLayout().elementSet().size()); Multiset<NodeLayout> expandedCounts = HashMultiset.create(); for (NodeLayout originalNodeLayout : clusterLayout.getLayout().elementSet()) { NodeLayout expandedNodeLayout = NodeLayout.addServiceToNodeLayout(originalNodeLayout, service); if (expandedNodeLayout.satisfiesConstraints(clusterLayout.getConstraints(), expandedClusterServices)) { expandableNodeLayouts.add(originalNodeLayout); expandedCounts.add(originalNodeLayout, clusterLayout.getLayout().count(originalNodeLayout)); } } // sort expandable node layouts by preference order Collections.sort(this.expandableNodeLayouts, new NodeLayoutComparator(null, null)); // need to pass this to the slotted iterator so we don't try and add the service to a node layout more times // than there are nodes for the node layout. this.nodeLayoutMaxCounts = new int[expandableNodeLayouts.size()]; for (int i = 0; i < nodeLayoutMaxCounts.length; i++) { nodeLayoutMaxCounts[i] = expandedCounts.count(expandableNodeLayouts.get(i)); } // figure out the max number of nodes we can add the service to. Start off by saying we can add it to all nodes. this.nodesToAddTo = expandedCounts.size(); // we always need to add the service to at least one node. this.minNodesToAddTo = 1; ServiceConstraint serviceConstraint = clusterLayout.getConstraints().getServiceConstraints().get(service); // if there is a max constraint on this service and its less than the number of nodes in the cluster, start // there instead. Similarly, if there is a min constraint on this service higher than 1, use that instead. if (serviceConstraint != null) { this.nodesToAddTo = Math.min(serviceConstraint.getMaxCount(), this.nodesToAddTo); this.minNodesToAddTo = Math.max(serviceConstraint.getMinCount(), this.minNodesToAddTo); } this.nodeLayoutCountIterator = (this.nodesToAddTo < 1) ? null : new SlottedCombinationIterator(expandableNodeLayouts.size(), nodesToAddTo, nodeLayoutMaxCounts); }
From source file:org.gradle.internal.Actions.java
/** * Creates an action that will call each of the given actions in order. * * @param actions The actions to make a composite of. * @param <T> The type of the object that action is for * @return The composite action./*from w w w .j a v a 2s . c o m*/ */ public static <T> Action<T> composite(Action<? super T>... actions) { List<Action<? super T>> filtered = Lists.newArrayListWithCapacity(actions.length); for (Action<? super T> action : actions) { if (DOES_SOMETHING.apply(action)) { filtered.add(action); } } return composite(filtered); }
From source file:kr.debop4j.timeperiod.timerange.YearTimeRange.java
/** * Gets halfyears./*from w ww. java 2 s . c o m*/ * * @return the halfyears */ public List<HalfyearRange> getHalfyears() { int startYear = getStartYear(); List<HalfyearRange> halfyears = Lists.newArrayListWithCapacity(yearCount * TimeSpec.HalfyearsPerYear); for (int y = 0; y < yearCount; y++) { halfyears.add(new HalfyearRange(startYear + y, Halfyear.First, getTimeCalendar())); halfyears.add(new HalfyearRange(startYear + y, Halfyear.Second, getTimeCalendar())); } return halfyears; }
From source file:nl.knaw.huygens.analysis.lucene.DiacriticsFilter.java
public static List<String> convert(List<String> items) { List<String> result = Lists.newArrayListWithCapacity(items.size()); for (String item : items) { result.add(convert(item));//from ww w . j a v a2s .c o m } return result; }
From source file:com.google.gerrit.httpd.rpc.SystemInfoServiceImpl.java
public void contributorAgreements(final AsyncCallback<List<ContributorAgreement>> callback) { Collection<ContributorAgreement> agreements = projectCache.getAllProjects().getConfig() .getContributorAgreements(); List<ContributorAgreement> cas = Lists.newArrayListWithCapacity(agreements.size()); for (ContributorAgreement ca : agreements) { cas.add(ca.forUi());//from w w w . ja v a 2 s . c om } callback.onSuccess(cas); }
From source file:org.apache.mahout.ga.watchmaker.cd.CDMutation.java
@Override public List<CDRule> apply(List<CDRule> selectedCandidates, Random rng) { List<CDRule> mutatedPopulation = Lists.newArrayListWithCapacity(selectedCandidates.size()); for (CDRule ind : selectedCandidates) { mutatedPopulation.add(mutate(ind, rng)); }//from w w w.j av a2 s . c om return mutatedPopulation; }
From source file:org.apache.druid.indexer.HadoopDruidDetermineConfigurationJob.java
@Override public boolean run() { JobHelper.ensurePaths(config);//from w w w.j a v a2 s.co m if (config.isDeterminingPartitions()) { job = config.getPartitionsSpec().getPartitionJob(config); return JobHelper.runSingleJob(job, config); } else { int shardsPerInterval = config.getPartitionsSpec().getNumShards(); Map<Long, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(); int shardCount = 0; for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) { DateTime bucket = segmentGranularity.getStart(); if (shardsPerInterval > 0) { List<HadoopyShardSpec> specs = Lists.newArrayListWithCapacity(shardsPerInterval); for (int i = 0; i < shardsPerInterval; i++) { specs.add(new HadoopyShardSpec(new HashBasedNumberedShardSpec(i, shardsPerInterval, config.getPartitionsSpec().getPartitionDimensions(), HadoopDruidIndexerConfig.JSON_MAPPER), shardCount++)); } shardSpecs.put(bucket.getMillis(), specs); log.info("DateTime[%s], spec[%s]", bucket, specs); } else { final HadoopyShardSpec spec = new HadoopyShardSpec(NoneShardSpec.instance(), shardCount++); shardSpecs.put(bucket.getMillis(), Collections.singletonList(spec)); log.info("DateTime[%s], spec[%s]", bucket, spec); } } config.setShardSpecs(shardSpecs); return true; } }
From source file:co.cask.tigon.data.transaction.queue.inmemory.InMemoryQueueService.java
/** * Drop either all streams or all queues. * @param clearStreams if true, drops all streams, if false, clears all queues. * @param prefix if non-null, drops only queues with a name that begins with this prefix. */// ww w. j av a 2 s.c o m private void resetAllQueuesOrStreams(boolean clearStreams, @Nullable String prefix) { List<String> toRemove = Lists.newArrayListWithCapacity(queues.size()); for (String queueName : queues.keySet()) { if ((clearStreams && QueueName.isStream(queueName)) || (!clearStreams && QueueName.isQueue(queueName))) { if (prefix == null || queueName.startsWith(prefix)) { toRemove.add(queueName); } } } for (String queueName : toRemove) { queues.remove(queueName); } }