Example usage for com.google.common.collect Lists newArrayListWithExpectedSize

List of usage examples for com.google.common.collect Lists newArrayListWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Lists newArrayListWithExpectedSize.

Prototype

@GwtCompatible(serializable = true)
public static <E> ArrayList<E> newArrayListWithExpectedSize(int estimatedSize) 

Source Link

Document

Creates an ArrayList instance to hold estimatedSize elements, plus an unspecified amount of padding; you almost certainly mean to call #newArrayListWithCapacity (see that method for further advice on usage).

Usage

From source file:org.eclipse.xtext.common.types.access.binary.asm.BinaryGenericTypeSignature.java

@Override
public List<BinaryTypeArgumentSignature> getTypeArguments() {
    if (length < 2 || chars.charAt(offset + length - 2) != '>')
        // cannot have type arguments otherwise signature would end by ">;"
        return Collections.emptyList();
    int count = 1; // start to count generic end/start peers
    int start = offset + length - 2;
    while (start >= offset && count > 0) {
        switch (chars.charAt(--start)) {
        case '<':
            count--;/*  w  w  w  .jav  a2  s . c  o  m*/
            break;
        case '>':
            count++;
            break;
        }
    }
    if (start < 0) // invalid number of generic start/end
        throw new IllegalArgumentException(toString());
    List<BinaryTypeArgumentSignature> result = Lists.newArrayListWithExpectedSize(2);
    int p = start + 1;
    while (true) {
        if (p >= offset + length) {
            throw new IllegalArgumentException(toString());
        }
        char c = chars.charAt(p);
        if (c == '>') {
            return result;
        }
        int end = SignatureUtil.scanTypeSignature(chars, p);
        result.add(new BinaryTypeArgumentSignature(chars, p, end + 1 - p));
        p = end + 1;
    }
}

From source file:org.apache.kylin.engine.mr.steps.CubingExecutableUtil.java

public static List<String> getMergingSegmentIds(Map<String, String> params) {
    final String ids = params.get(MERGING_SEGMENT_IDS);
    if (ids != null) {
        final String[] splitted = StringUtils.split(ids, ",");
        ArrayList<String> result = Lists.newArrayListWithExpectedSize(splitted.length);
        for (String id : splitted) {
            result.add(id);/*from  w w w.j a  v  a2 s. c  om*/
        }
        return result;
    } else {
        return Collections.emptyList();
    }
}

From source file:org.n52.iceland.util.net.ProxyChain.java

/**
 * Creates a Proxy chain from the {@code X-Forwarded-For} HTTP header.
 *
 * @param header the {@code X-Forwarded-For} header
 *
 * @return a {@code ProxyChain} if the header is present, non empty and well
 *         formed./* w ww.  j  a  v  a  2s .co m*/
 */
public static Optional<ProxyChain> fromForwardedForHeader(String header) {
    try {
        if (Strings.emptyToNull(header) != null) {
            String[] split = header.split(",");
            List<IPAddress> chain = Lists.newArrayListWithExpectedSize(split.length);
            for (String splitted : split) {
                chain.add(getIPAddress(splitted));
            }
            return Optional.of(new ProxyChain(chain));
        }
    } catch (IllegalArgumentException e) {
        LOG.warn("Ignoring invalid IP address in X-Forwared-For header: " + header, e);
    }
    return Optional.absent();
}

From source file:org.apache.druid.query.Queries.java

/**
 * Returns decorated post-aggregators, based on original un-decorated post-aggregators. In addition, this method
 * also verifies that there are no output name collisions, and that all of the post-aggregators' required input
 * fields are present.//w  ww  .  j  a v a2s.co m
 *
 * @param otherOutputNames names of fields that will appear in the same output namespace as aggregators and
 *                         post-aggregators, and are also assumed to be valid inputs to post-aggregators. For most
 *                         built-in query types, this is either empty, or the list of dimension output names.
 * @param aggFactories     aggregator factories for this query
 * @param postAggs         post-aggregators for this query
 *
 * @return decorated post-aggregators
 *
 * @throws NullPointerException     if otherOutputNames or aggFactories is null
 * @throws IllegalArgumentException if there are any output name collisions or missing post-aggregator inputs
 */
public static List<PostAggregator> prepareAggregations(List<String> otherOutputNames,
        List<AggregatorFactory> aggFactories, List<PostAggregator> postAggs) {
    Preconditions.checkNotNull(otherOutputNames, "otherOutputNames cannot be null");
    Preconditions.checkNotNull(aggFactories, "aggregations cannot be null");

    final Set<String> combinedOutputNames = new HashSet<>();
    combinedOutputNames.addAll(otherOutputNames);

    final Map<String, AggregatorFactory> aggsFactoryMap = new HashMap<>();
    for (AggregatorFactory aggFactory : aggFactories) {
        Preconditions.checkArgument(combinedOutputNames.add(aggFactory.getName()), "[%s] already defined",
                aggFactory.getName());
        aggsFactoryMap.put(aggFactory.getName(), aggFactory);
    }

    if (postAggs != null && !postAggs.isEmpty()) {
        List<PostAggregator> decorated = Lists.newArrayListWithExpectedSize(postAggs.size());
        for (final PostAggregator postAgg : postAggs) {
            final Set<String> dependencies = postAgg.getDependentFields();
            final Set<String> missing = Sets.difference(dependencies, combinedOutputNames);

            Preconditions.checkArgument(missing.isEmpty(), "Missing fields [%s] for postAggregator [%s]",
                    missing, postAgg.getName());
            Preconditions.checkArgument(combinedOutputNames.add(postAgg.getName()), "[%s] already defined",
                    postAgg.getName());

            decorated.add(postAgg.decorate(aggsFactoryMap));
        }
        return decorated;
    }

    return postAggs;
}

From source file:com.cloudera.exhibit.javascript.JSCalculator.java

Obs toObs(Object obj, Exhibit exhibit) {
    List<Object> values = Lists.newArrayListWithExpectedSize(descriptor.size());
    if (obj instanceof Map) {
        Map mres = (Map) obj;
        for (ObsDescriptor.Field f : descriptor) {
            Object v = mres.get(f.name);
            values.add(v == null ? null : f.type.cast(v));
        }/*  w w w . j  av a 2 s.  com*/
    } else if (descriptor.size() == 1) {
        if (obj == null) {
            values.add(null);
        } else {
            values.add(descriptor.get(0).type.cast(obj));
        }
    } else {
        //TODO: log, provide default obs
        throw new IllegalStateException("Invalid javascript result: " + obj + " for exhibit: " + exhibit);
    }
    return new SimpleObs(descriptor, values);
}

From source file:com.twitter.distributedlog.config.ConfigurationSubscription.java

public ConfigurationSubscription(ConcurrentBaseConfiguration viewConfig,
        List<FileConfigurationBuilder> fileConfigBuilders, ScheduledExecutorService executorService,
        int reloadPeriod, TimeUnit reloadUnit) throws ConfigurationException {
    Preconditions.checkNotNull(fileConfigBuilders);
    Preconditions.checkArgument(!fileConfigBuilders.isEmpty());
    Preconditions.checkNotNull(executorService);
    Preconditions.checkNotNull(viewConfig);
    this.viewConfig = viewConfig;
    this.executorService = executorService;
    this.reloadPeriod = reloadPeriod;
    this.reloadUnit = reloadUnit;
    this.fileConfigBuilders = fileConfigBuilders;
    this.fileConfigs = Lists.newArrayListWithExpectedSize(this.fileConfigBuilders.size());
    this.confListeners = new CopyOnWriteArraySet<ConfigurationListener>();
    reload();//from   www.jav a2  s .  co  m
    scheduleReload();
}

From source file:org.apache.phoenix.mapreduce.PhoenixInputFormat.java

private List<InputSplit> generateSplits(final QueryPlan qplan, final List<KeyRange> splits) throws IOException {
    Preconditions.checkNotNull(qplan);//w w w  . ja v  a2  s.  c o  m
    Preconditions.checkNotNull(splits);
    final List<InputSplit> psplits = Lists.newArrayListWithExpectedSize(splits.size());
    for (List<Scan> scans : qplan.getScans()) {
        psplits.add(new PhoenixInputSplit(scans));
    }
    return psplits;
}

From source file:com.textocat.textokit.commons.util.CorpusUtils.java

/**
 * Partition corpus files specified by filters.
 *
 * @param corpusDir          corpus base directory
 * @param corpusFileFilter   filter for corpus files
 * @param corpusSubDirFilter filter for corpus subdirectories. If null subdirectories will
 *                           be ignored.
 * @param partitionsNumber/*from  w w  w . j  a  v a 2s.  co m*/
 * @return list of file sets (partitions)
 */
public static List<Set<File>> partitionCorpusByFileSize(File corpusDir, IOFileFilter corpusFileFilter,
        IOFileFilter corpusSubDirFilter, int partitionsNumber) {
    log.info("Partitioning corpus {} with file filter {} and subdir filter {}...",
            new Object[] { corpusDir.getAbsolutePath(), corpusFileFilter, corpusSubDirFilter });
    // TODO implement an algorithm that is more robust to different file sizes
    // e.g. it should handle the case when there is no more files to include into the last partition
    if (partitionsNumber <= 0) {
        throw new IllegalArgumentException(String.format("Illegal number of partitions: %s", partitionsNumber));
    }
    if (!corpusDir.isDirectory()) {
        throw new IllegalArgumentException(String.format("%s is not existing directory", corpusDir));
    }
    final Deque<File> corpusFilesDeq;
    {
        List<File> corpusFiles = Lists
                .newArrayList(FileUtils.listFiles(corpusDir, corpusFileFilter, corpusSubDirFilter));
        // sort by decreasing size to smooth differences between parts
        Collections.sort(corpusFiles, SizeFileComparator.SIZE_REVERSE);
        corpusFilesDeq = Lists.newLinkedList(corpusFiles);
    }
    //
    int totalSize = 0;
    for (File cf : corpusFilesDeq) {
        totalSize += cf.length();
    }
    log.info("Corpus total size (bytes): {}", totalSize);
    List<FileBucket> buckets = Lists.newArrayListWithExpectedSize(partitionsNumber);
    // create empty parts
    for (int i = 0; i < partitionsNumber; i++) {
        buckets.add(new FileBucket());
    }
    while (!corpusFilesDeq.isEmpty()) {
        File cf = corpusFilesDeq.pop();
        buckets.get(0).add(cf);
        // resort: make the least bucket first
        Collections.sort(buckets);
    }
    // resort: make the largest bucket first
    Collections.sort(buckets, Collections.reverseOrder());
    // log
    log.info("Corpus {} has been partitioned by file sizes. Result partitions:\n{}", corpusDir,
            Joiner.on('\n').join(buckets));
    // transform
    List<Set<File>> result = Lists.newArrayList();
    for (FileBucket b : buckets) {
        result.add(b.getFiles());
    }
    // sanity checks
    if (result.size() != partitionsNumber || result.get(result.size() - 1).isEmpty()) {
        throw new IllegalStateException(
                "Illegal corpus partitioning result. Check previous log messages for details.");
    }
    return result;
}

From source file:org.apache.distributedlog.common.config.ConfigurationSubscription.java

public ConfigurationSubscription(ConcurrentBaseConfiguration viewConfig,
        List<FileConfigurationBuilder> fileConfigBuilders, ScheduledExecutorService executorService,
        int reloadPeriod, TimeUnit reloadUnit) throws ConfigurationException {
    checkNotNull(fileConfigBuilders);/*from  ww  w . j a  v  a 2  s .  com*/
    checkArgument(!fileConfigBuilders.isEmpty());
    checkNotNull(executorService);
    checkNotNull(viewConfig);
    this.viewConfig = viewConfig;
    this.executorService = executorService;
    this.reloadPeriod = reloadPeriod;
    this.reloadUnit = reloadUnit;
    this.fileConfigBuilders = fileConfigBuilders;
    this.fileConfigs = Lists.newArrayListWithExpectedSize(this.fileConfigBuilders.size());
    this.confListeners = new CopyOnWriteArraySet<ConfigurationListener>();
    reload();
    scheduleReload();
}

From source file:org.apache.phoenix.iterate.ParallelIterators.java

@Override
protected void submitWork(List<List<Scan>> nestedScans,
        List<List<Pair<Scan, Future<PeekingResultIterator>>>> nestedFutures,
        final Queue<PeekingResultIterator> allIterators, int estFlattenedSize) {
    // Pre-populate nestedFutures lists so that we can shuffle the scans
    // and add the future to the right nested list. By shuffling the scans
    // we get better utilization of the cluster since our thread executor
    // will spray the scans across machines as opposed to targeting a
    // single one since the scans are in row key order.
    ExecutorService executor = context.getConnection().getQueryServices().getExecutor();
    List<ScanLocator> scanLocations = Lists.newArrayListWithExpectedSize(estFlattenedSize);
    for (int i = 0; i < nestedScans.size(); i++) {
        List<Scan> scans = nestedScans.get(i);
        List<Pair<Scan, Future<PeekingResultIterator>>> futures = Lists
                .newArrayListWithExpectedSize(scans.size());
        nestedFutures.add(futures);/*from  w  w w  .j av a2s . c  o  m*/
        for (int j = 0; j < scans.size(); j++) {
            Scan scan = nestedScans.get(i).get(j);
            scanLocations.add(new ScanLocator(scan, i, j));
            futures.add(null); // placeholder
        }
    }
    // Shuffle so that we start execution across many machines
    // before we fill up the thread pool
    Collections.shuffle(scanLocations);
    ReadMetricQueue readMetrics = context.getReadMetricsQueue();
    final String physicalTableName = tableRef.getTable().getPhysicalName().getString();
    int numScans = scanLocations.size();
    context.getOverallQueryMetrics().updateNumParallelScans(numScans);
    GLOBAL_NUM_PARALLEL_SCANS.update(numScans);
    for (ScanLocator scanLocation : scanLocations) {
        final Scan scan = scanLocation.getScan();
        final CombinableMetric scanMetrics = readMetrics.allotMetric(MetricType.SCAN_BYTES, physicalTableName);
        final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(readMetrics,
                physicalTableName);
        Future<PeekingResultIterator> future = executor
                .submit(Tracing.wrap(new JobCallable<PeekingResultIterator>() {

                    @Override
                    public PeekingResultIterator call() throws Exception {
                        long startTime = System.currentTimeMillis();
                        ResultIterator scanner = new TableResultIterator(context, tableRef, scan, scanMetrics);
                        if (logger.isDebugEnabled()) {
                            logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: "
                                    + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan,
                                    ScanUtil.getCustomAnnotations(scan)));
                        }
                        PeekingResultIterator iterator = iteratorFactory.newIterator(context, scanner, scan,
                                physicalTableName);

                        // Fill the scanner's cache. This helps reduce latency since we are parallelizing the I/O needed.
                        iterator.peek();

                        allIterators.add(iterator);
                        return iterator;
                    }

                    /**
                     * Defines the grouping for round robin behavior.  All threads spawned to process
                     * this scan will be grouped together and time sliced with other simultaneously
                     * executing parallel scans.
                     */
                    @Override
                    public Object getJobId() {
                        return ParallelIterators.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                        return taskMetrics;
                    }
                }, "Parallel scanner for table: " + tableRef.getTable().getName().getString()));
        // Add our future in the right place so that we can concatenate the
        // results of the inner futures versus merge sorting across all of them.
        nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(),
                new Pair<Scan, Future<PeekingResultIterator>>(scan, future));
    }
}