List of usage examples for com.google.common.collect Lists newArrayListWithExpectedSize
@GwtCompatible(serializable = true) public static <E> ArrayList<E> newArrayListWithExpectedSize(int estimatedSize)
From source file:com.android.builder.core.DexProcessBuilder.java
@NonNull public DexProcessBuilder additionalParameters(@NonNull List<String> params) { if (mAdditionalParams == null) { mAdditionalParams = Lists.newArrayListWithExpectedSize(params.size()); }//from w w w . java 2 s .co m mAdditionalParams.addAll(params); return this; }
From source file:org.artifactory.ui.rest.model.artifacts.browse.treebrowser.nodes.JunctionNode.java
@Override public Collection<? extends RestTreeNode> getChildren(AuthorizationService authService, boolean isCompact, ArtifactoryRestRequest request) { List<RestTreeNode> children; // create repo path RepoPath repositoryPath = InternalRepoPathFactory.create(getRepoKey(), getPath()); RepositoryService repoService = getRepoService(); BrowsableItemCriteria criteria;//from w ww.ja v a 2 s.com List<ItemInfo> items; // get child's from repo service RepositoryBrowsingService repositoryBrowsingService = ContextHelper.get() .beanForType(RepositoryBrowsingService.class); switch (repoType) { case "local": items = repoService.getChildren(repositoryPath); children = Lists.newArrayListWithExpectedSize(items.size()); // populate child data populateChildData(children, repoService, items, isCompact); break; case "cache": { items = repoService.getChildren(repositoryPath); children = Lists.newArrayListWithExpectedSize(items.size()); // populate child data populateChildData(children, repoService, items, isCompact); break; } case "remote": { RepoPath remoteRepoPath = InternalRepoPathFactory.create(getRepoKey(), getPath(), true); criteria = getBrowsableItemCriteria(remoteRepoPath); List<BaseBrowsableItem> remoteChildren = repositoryBrowsingService .getRemoteRepoBrowsableChildren(criteria); children = Lists.newArrayListWithExpectedSize(remoteChildren.size()); Collections.sort(remoteChildren); populateRemoteData(children, remoteChildren, "remote"); break; } case "virtual": { RepoPath virtualRepoPath = InternalRepoPathFactory.create(getRepoKey(), getPath(), true); criteria = getBrowsableItemCriteria(virtualRepoPath); List<BaseBrowsableItem> virtualChildren = repositoryBrowsingService .getVirtualRepoBrowsableChildren(criteria); children = Lists.newArrayListWithExpectedSize(virtualChildren.size()); Collections.sort(virtualChildren); populateRemoteData(children, virtualChildren, "virtual"); break; } default: { items = repoService.getChildren(repositoryPath); children = Lists.newArrayListWithExpectedSize(items.size()); // populate child data populateChildData(children, repoService, items, isCompact); break; } } return children; }
From source file:org.apache.hadoop.hdfs.server.namenode.XAttrFeature.java
public static List<StoredXAttr.PrimaryKey> getPrimaryKeys(long inodeId, List<XAttr> attrs) { List<StoredXAttr.PrimaryKey> pks = Lists.newArrayListWithExpectedSize(attrs.size()); for (XAttr attr : attrs) { pks.add(getPrimaryKey(inodeId, attr)); }/*from www . jav a2 s . c o m*/ return pks; }
From source file:org.n52.sos.decode.json.JSONDecoder.java
protected <T> List<T> decodeJsonToObjectList(JsonNode node, Class<T> type) throws OwsExceptionReport { Decoder<T, JsonNode> decoder = getDecoder(type); if (node.isArray()) { List<T> filters = Lists.newArrayListWithExpectedSize(node.size()); for (JsonNode n : node) { if (n.isObject()) { filters.add(decoder.decode(n)); }// w w w . j av a 2s.co m } return filters; } else if (node.isObject()) { return Collections.singletonList(decoder.decode(node)); } else { return Collections.emptyList(); } }
From source file:org.apache.phoenix.pig.hadoop.PhoenixRecordReader.java
@Override public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { final PhoenixInputSplit pSplit = (PhoenixInputSplit) split; final List<Scan> scans = pSplit.getScans(); try {/* ww w . ja va 2 s .co m*/ List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size()); for (Scan scan : scans) { final TableResultIterator tableResultIterator = new TableResultIterator(queryPlan.getContext(), queryPlan.getTableRef(), scan); PeekingResultIterator peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator); iterators.add(peekingResultIterator); } ResultIterator iterator = ConcatResultIterator.newConcatResultIterator(iterators); if (queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) { iterator = new SequenceResultIterator(iterator, queryPlan.getContext().getSequenceManager()); } this.resultIterator = iterator; this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector(), queryPlan.getContext().getStatement()); } catch (SQLException e) { LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", e.getMessage())); Throwables.propagate(e); } }
From source file:com.textocat.textokit.io.brat.BratCollectionReader.java
@Override public void initialize(UimaContext ctx) throws ResourceInitializationException { super.initialize(ctx); // initialize mappingFactory mappingFactory = InitializableFactory.create(ctx, mappingFactoryClassName, BratUimaMappingFactory.class); // make bratDocIter File[] annFiles = bratCollectionDir .listFiles((FileFilter) FileFilterUtils.suffixFileFilter(BratDocument.ANN_FILE_SUFFIX)); List<BratDocument> bratDocs = Lists.newArrayListWithExpectedSize(annFiles.length); for (File annFile : annFiles) { String docBaseName = FilenameUtils.getBaseName(annFile.getPath()); BratDocument bratDoc = new BratDocument(bratCollectionDir, docBaseName); if (bratDoc.exists()) { bratDocs.add(bratDoc);//from ww w . jav a2s.c o m } else { throw new IllegalStateException(String.format("Missing txt file for %s", annFile)); } } totalDocsNum = bratDocs.size(); bratDocIter = bratDocs.iterator(); }
From source file:org.eclipse.hawkbit.amqp.AmqpControllerAuthentication.java
private void addFilter() { filterChain = Lists.newArrayListWithExpectedSize(5); final ControllerPreAuthenticatedGatewaySecurityTokenFilter gatewaySecurityTokenFilter = new ControllerPreAuthenticatedGatewaySecurityTokenFilter( tenantConfigurationManagement, tenantAware, systemSecurityContext); filterChain.add(gatewaySecurityTokenFilter); final ControllerPreAuthenticatedSecurityHeaderFilter securityHeaderFilter = new ControllerPreAuthenticatedSecurityHeaderFilter( ddiSecruityProperties.getRp().getCnHeader(), ddiSecruityProperties.getRp().getSslIssuerHashHeader(), tenantConfigurationManagement, tenantAware, systemSecurityContext); filterChain.add(securityHeaderFilter); final ControllerPreAuthenticateSecurityTokenFilter securityTokenFilter = new ControllerPreAuthenticateSecurityTokenFilter( tenantConfigurationManagement, controllerManagement, tenantAware, systemSecurityContext); filterChain.add(securityTokenFilter); final ControllerPreAuthenticatedAnonymousDownload anonymousDownloadFilter = new ControllerPreAuthenticatedAnonymousDownload( tenantConfigurationManagement, tenantAware, systemSecurityContext); filterChain.add(anonymousDownloadFilter); filterChain.add(new ControllerPreAuthenticatedAnonymousFilter(ddiSecruityProperties)); }
From source file:com.palantir.atlasdb.keyvalue.cassandra.jmx.CassandraJmxCompactionManager.java
private boolean deleteTombstone(String keyspace, String tableName, long timeoutInSeconds) throws InterruptedException, TimeoutException { List<TombstoneCompactionTask> compactionTasks = Lists.newArrayListWithExpectedSize(clients.size()); for (CassandraJmxCompactionClient client : clients) { compactionTasks.add(new TombstoneCompactionTask(client, keyspace, tableName)); }/*from w ww . ja va2 s . c o m*/ return executeInParallel(exec, compactionTasks, timeoutInSeconds); }
From source file:com.splicemachine.ddl.ZooKeeperDDLWatchChecker.java
@Override public void notifyProcessed(Collection<Pair<DDLMessage.DDLChange, String>> processedChanges) throws IOException { /*// www . j ava 2s. com * Notify the relevant controllers that their change has been processed */ List<Op> ops = Lists.newArrayListWithExpectedSize(processedChanges.size()); List<DDLMessage.DDLChange> changeList = Lists.newArrayList(); for (Pair<DDLMessage.DDLChange, String> pair : processedChanges) { DDLMessage.DDLChange change = pair.getFirst(); String errorMessage = pair.getSecond(); // Tag Errors for handling on the client, will allow us to understand what node failed and why... String path = zkClient.changePath + "/" + change.getChangeId() + "/" + (errorMessage == null ? "" : DDLConfiguration.ERROR_TAG) + id; Op op = Op.create(path, (errorMessage == null ? new byte[] {} : (String.format("server [%s] failed with error [%s]", id, errorMessage).getBytes())), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); ops.add(op); changeList.add(change); } try { List<OpResult> multi = ZkUtils.getRecoverableZooKeeper().getZooKeeper().multi(ops); for (int i = 0; i < multi.size(); i++) { OpResult result = multi.get(i); if (!(result instanceof OpResult.ErrorResult)) processedChanges.remove(changeList.get(i)); else { OpResult.ErrorResult err = (OpResult.ErrorResult) result; KeeperException.Code code = KeeperException.Code.get(err.getErr()); switch (code) { case NODEEXISTS: //we may have already set the value, so ignore node exists issues case NONODE: // someone already removed the notification, it's obsolete // ignore break; default: throw Exceptions.getIOException(KeeperException.create(code)); } } } } catch (InterruptedException e) { throw Exceptions.getIOException(e); } catch (KeeperException e) { switch (e.code()) { case NODEEXISTS: //we may have already set the value, so ignore node exists issues case NONODE: // someone already removed the notification, it's obsolete // ignore break; default: throw Exceptions.getIOException(e); } } }
From source file:org.apache.kylin.cube.cuboid.algorithm.CuboidStatsUtil.java
public static Map<Long, List<Long>> createDirectChildrenCache(final Set<Long> cuboidSet) { /**/*w w w .j a va 2 s . co m*/ * Sort the list by ascending order: * */ final List<Long> cuboidList = Lists.newArrayList(cuboidSet); Collections.sort(cuboidList); /** * Sort the list by ascending order: * 1. the more bit count of its value, the bigger * 2. the larger of its value, the bigger * */ List<Integer> layerIdxList = Lists.newArrayListWithExpectedSize(cuboidList.size()); for (int i = 0; i < cuboidList.size(); i++) { layerIdxList.add(i); } Collections.sort(layerIdxList, new Comparator<Integer>() { @Override public int compare(Integer i1, Integer i2) { Long o1 = cuboidList.get(i1); Long o2 = cuboidList.get(i2); int nBitDiff = Long.bitCount(o1) - Long.bitCount(o2); if (nBitDiff != 0) { return nBitDiff; } return Long.compare(o1, o2); } }); /** * Construct an index array for pointing the position in layerIdxList * (layerCuboidList is for speeding up continuous iteration) * */ int[] toLayerIdxArray = new int[layerIdxList.size()]; final List<Long> layerCuboidList = Lists.newArrayListWithExpectedSize(cuboidList.size()); for (int i = 0; i < layerIdxList.size(); i++) { int cuboidIdx = layerIdxList.get(i); toLayerIdxArray[cuboidIdx] = i; layerCuboidList.add(cuboidList.get(cuboidIdx)); } int[] previousLayerLastIdxArray = new int[layerIdxList.size()]; int currentBitCount = 0; int previousLayerLastIdx = -1; for (int i = 0; i < layerIdxList.size(); i++) { int cuboidIdx = layerIdxList.get(i); int nBits = Long.bitCount(cuboidList.get(cuboidIdx)); if (nBits > currentBitCount) { currentBitCount = nBits; previousLayerLastIdx = i - 1; } previousLayerLastIdxArray[i] = previousLayerLastIdx; } Map<Long, List<Long>> directChildrenCache = Maps.newHashMap(); for (int i = 0; i < cuboidList.size(); i++) { Long currentCuboid = cuboidList.get(i); LinkedList<Long> directChildren = Lists.newLinkedList(); int lastLayerIdx = previousLayerLastIdxArray[toLayerIdxArray[i]]; /** * Choose one of the two scan strategies * 1. cuboids are sorted by its value, like 1,2,3,4,... * 2. cuboids are layered and sorted, like 1,2,4,8,...,3,5,... * */ if (i - 1 <= lastLayerIdx) { /** * 1. Adding cuboid by descending order * */ for (int j = i - 1; j >= 0; j--) { checkAndAddDirectChild(directChildren, currentCuboid, cuboidList.get(j)); } } else { /** * 1. Adding cuboid by descending order * 2. Check from lower cuboid layer * */ for (int j = lastLayerIdx; j >= 0; j--) { checkAndAddDirectChild(directChildren, currentCuboid, layerCuboidList.get(j)); } } directChildrenCache.put(currentCuboid, directChildren); } return directChildrenCache; }