List of usage examples for com.google.common.collect Lists newArrayListWithCapacity
@GwtCompatible(serializable = true) public static <E> ArrayList<E> newArrayListWithCapacity(int initialArraySize)
From source file:org.apache.shindig.protocol.multipart.DefaultMultipartFormParser.java
private Collection<FormDataItem> convertToFormData(List<FileItem> fileItems) { List<FormDataItem> formDataItems = Lists.newArrayListWithCapacity(fileItems.size()); for (FileItem item : fileItems) { formDataItems.add(new CommonsFormDataItem(item)); }//from w ww . j a v a 2 s.c om return formDataItems; }
From source file:com.opengamma.web.analytics.formatting.SnapshotDataBundleFormatter.java
private Map<String, Object> formatExpanded(final SnapshotDataBundle bundle, final ValueSpecification valueSpec) { final List<List<String>> results = Lists.newArrayListWithCapacity(bundle.size()); final Map<String, Object> resultsMap = Maps.newHashMap(); for (final Map.Entry<ExternalIdBundle, Double> entry : bundle.getDataPointSet()) { final ExternalId id = _orderConfig.getPreferred(entry.getKey()); final String idStr = (id != null) ? id.toString() : ""; final String formattedValue = _doubleFormatter.formatCell(entry.getValue(), valueSpec, null); results.add(ImmutableList.of(idStr, formattedValue)); }// ww w. ja v a2 s . c o m resultsMap.put(DATA, results); resultsMap.put(LABELS, ImmutableList.of(ID, VALUE)); return resultsMap; }
From source file:org.apache.drill.exec.store.kafka.MessageIterator.java
public MessageIterator(final KafkaConsumer<byte[], byte[]> kafkaConsumer, final KafkaSubScanSpec subScanSpec, final long kafkaPollTimeOut) { this.kafkaConsumer = kafkaConsumer; this.kafkaPollTimeOut = kafkaPollTimeOut; List<TopicPartition> partitions = Lists.newArrayListWithCapacity(1); topicPartition = new TopicPartition(subScanSpec.getTopicName(), subScanSpec.getPartitionId()); partitions.add(topicPartition);/* w w w . ja va 2 s. c om*/ this.kafkaConsumer.assign(partitions); logger.info("Start offset of {}:{} is - {}", subScanSpec.getTopicName(), subScanSpec.getPartitionId(), subScanSpec.getStartOffset()); this.kafkaConsumer.seek(topicPartition, subScanSpec.getStartOffset()); this.endOffset = subScanSpec.getEndOffset(); }
From source file:ru.codeinside.gses.webui.supervisor.GroupsQuery.java
@Override public List<Item> loadItems(int startIndex, int count) { final List<Group> groups; if (mode == GroupsQueryDefinition.Mode.ORG) { groups = Flash.flash().getAdminService().getControlledOrgGroupsOf(login, startIndex, count, sortProps, sortAsc, container == null ? null : container.sender); } else {// www. java 2 s . co m groups = Flash.flash().getAdminService().getControlledEmpGroupsOf(login, startIndex, count, sortProps, sortAsc, container == null ? null : container.sender); } List<Item> itemsList = Lists.newArrayListWithCapacity(groups.size()); for (Group g : groups) { final PropertysetItem item = new PropertysetItem(); item.addItemProperty("name", new ObjectProperty<String>(g.getName())); item.addItemProperty("title", new ObjectProperty<String>(g.getTitle())); itemsList.add(item); } return itemsList; }
From source file:com.ojuslabs.oct.data.RouteNode.java
/** * Each RouteNode instance should have an ID that is unique within its * route. It should, therefore, be provided by the route itself. Typical * usage is as follows./*from ww w . j a v a 2 s .c o m*/ * * <pre> * Route route = Route.newInstance(); * ... * RouteNode goal = route.goalNode(); * ... * RouteNode node = goal.newChildNode(); * ... * RouteNode node2 = node.newChildNode(); * </pre> * * @param id */ RouteNode(int id) { _id = id; _children = Lists.newArrayListWithCapacity(Constants.LIST_SIZE_T); _byproducts = Lists.newArrayListWithCapacity(Constants.LIST_SIZE_T); }
From source file:voldemort.store.routed.ReadRepairer.java
/** * Compute the repair set from the given values and nodes * //from w ww . j av a 2s . c o m * @param nodeValues The value found on each node * @return A set of repairs to perform */ public List<NodeValue<K, V>> getRepairs(List<NodeValue<K, V>> nodeValues) { int size = nodeValues.size(); if (size <= 1) return Collections.emptyList(); Map<K, List<NodeValue<K, V>>> keyToNodeValues = Maps.newHashMap(); for (NodeValue<K, V> nodeValue : nodeValues) { List<NodeValue<K, V>> keyNodeValues = keyToNodeValues.get(nodeValue.getKey()); if (keyNodeValues == null) { keyNodeValues = Lists.newArrayListWithCapacity(5); keyToNodeValues.put(nodeValue.getKey(), keyNodeValues); } keyNodeValues.add(nodeValue); } List<NodeValue<K, V>> result = Lists.newArrayList(); for (List<NodeValue<K, V>> keyNodeValues : keyToNodeValues.values()) result.addAll(singleKeyGetRepairs(keyNodeValues)); return result; }
From source file:org.apache.hadoop.hdfs.server.namenode.FSDirXAttrOp.java
/** * Set xattr for a file or directory.// ww w.j a v a2 s. c o m * * @param src * - path on which it sets the xattr * @param xAttr * - xAttr details to set * @param flag * - xAttrs flags * @throws IOException */ static HdfsFileStatus setXAttr(FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag, boolean logRetryCache) throws IOException { checkXAttrsConfigFlag(fsd); checkXAttrSize(fsd, xAttr); FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi(pc, xAttr, FSDirectory.isReservedRawName(src)); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); INodesInPath iip; fsd.writeLock(); try { iip = fsd.getINodesInPath4Write(src); checkXAttrChangeAccess(fsd, iip, xAttr, pc); unprotectedSetXAttrs(fsd, src, xAttrs, flag); } finally { fsd.writeUnlock(); } fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); return fsd.getAuditFileInfo(iip); }
From source file:org.apache.phoenix.iterate.UnionResultIterators.java
public UnionResultIterators(List<QueryPlan> plans, StatementContext parentStmtCtx) throws SQLException { this.parentStmtCtx = parentStmtCtx; this.plans = plans; int nPlans = plans.size(); iterators = Lists.newArrayListWithExpectedSize(nPlans); splits = Lists.newArrayListWithExpectedSize(nPlans * 30); scans = Lists.newArrayListWithExpectedSize(nPlans * 10); readMetricsList = Lists.newArrayListWithCapacity(nPlans); overAllQueryMetricsList = Lists.newArrayListWithCapacity(nPlans); for (QueryPlan plan : this.plans) { readMetricsList.add(plan.getContext().getReadMetricsQueue()); overAllQueryMetricsList.add(plan.getContext().getOverallQueryMetrics()); iterators.add(LookAheadResultIterator.wrap(plan.iterator())); splits.addAll(plan.getSplits()); scans.addAll(plan.getScans());//from w w w .jav a2s. c om } }
From source file:com.sahlbach.maven.delivery.Delivery.java
/** * creates a merged version of two deliveries. * jobs are deeply merged/*ww w.ja v a2 s . c o m*/ * @param toMerge delivers the overwritten values of the delivery * @throws org.apache.maven.plugin.MojoExecutionException in case of merge conflicts * @return the merged instance (this) for call chaining */ public Delivery mergeWith(Delivery toMerge) throws MojoExecutionException { setId(toMerge.getId()); if (toMerge.getDescription() != null) setDescription(toMerge.getDescription()); Map<String, Job> mappedJobs = Maps.newHashMapWithExpectedSize(getJobs().size() + toMerge.getJobs().size()); List<Job> resultJobs = Lists.newArrayListWithCapacity(getJobs().size() + toMerge.getJobs().size()); for (Job job : getJobs()) { Job newJob = new Job(); newJob.mergeWith(job); if (newJob.getId() != null) mappedJobs.put(newJob.getId(), newJob); resultJobs.add(newJob); } for (Job job : toMerge.getJobs()) { if ((job.getId() != null) && mappedJobs.get(job.getId()) != null) { Job existingJob = mappedJobs.get(job.getId()); existingJob.mergeWith(job); } else { resultJobs.add(new Job().mergeWith(job)); } } setJobs(resultJobs); return this; }
From source file:net.derquinse.common.io.ChunkedHeapByteSource.java
@Override public MemoryByteSource toDirect(boolean merge) { if (merge) {// w ww .j a va2 s.c o m ByteBuffer buffer = ByteBuffer.allocateDirect(chunks.getTotalSize()); writeTo(buffer); buffer.flip(); return new SingleDirectByteSource(buffer); } else { List<SingleDirectByteSource> list = Lists.newArrayListWithCapacity(chunks.size()); for (ByteArrayByteSource s : chunks) { list.add(s.toDirect(true)); } return new ChunkedDirectByteSource(new Chunks<SingleDirectByteSource>(list)); } }