Example usage for com.google.common.collect Lists newArrayListWithCapacity

List of usage examples for com.google.common.collect Lists newArrayListWithCapacity

Introduction

In this page you can find the example usage for com.google.common.collect Lists newArrayListWithCapacity.

Prototype

@GwtCompatible(serializable = true)
public static <E> ArrayList<E> newArrayListWithCapacity(int initialArraySize) 

Source Link

Document

Creates an ArrayList instance backed by an array with the specified initial size; simply delegates to ArrayList#ArrayList(int) .

Usage

From source file:org.apache.giraph.edge.ArrayListEdges.java

@Override
public void initialize(int capacity) {
    edgeList = Lists.newArrayListWithCapacity(capacity);
}

From source file:edu.byu.nlp.util.IntArrays.java

public static int[] shuffled(int[] arr, RandomGenerator rnd) {
    // int[] -> List
    List<Integer> tmp = Lists.newArrayListWithCapacity(arr.length);
    for (int i = 0; i < arr.length; i++) {
        tmp.add(arr[i]);//  ww w  .  j av  a  2s . com
    }
    // shuffle
    Collections.shuffle(tmp, new RandomAdaptor(rnd));
    // List -> int[] (wish there were a better way to do this)
    int[] arr2 = new int[tmp.size()];
    for (int i = 0; i < tmp.size(); i++) {
        arr2[i] = tmp.get(i);
    }
    return arr2;
}

From source file:com.palantir.atlasdb.table.description.NameMetadataDescription.java

public static NameMetadataDescription create(List<NameComponentDescription> components,
        boolean hasFirstComponentHash) {
    if (!hasFirstComponentHash) {
        return new NameMetadataDescription(components, false);
    } else {//from w ww.j ava  2  s .  c o m
        List<NameComponentDescription> withHashRowComponent = Lists
                .newArrayListWithCapacity(components.size() + 1);
        withHashRowComponent.add(new NameComponentDescription(HASH_ROW_COMPONENT_NAME, ValueType.FIXED_LONG));
        withHashRowComponent.addAll(components);
        return new NameMetadataDescription(withHashRowComponent, true);
    }
}

From source file:com.indeed.imhotep.iql.Grouping.java

public Iterator<GroupStats> getGroupStats(EZImhotepSession session, Map<Integer, GroupKey> groupKeys,
        List<StatReference> statRefs, long timeoutTS) throws ImhotepOutOfMemoryException {
    if (groupKeys.isEmpty()) { // we don't have any parent groups probably because all docs were filtered out
        return Collections.<GroupStats>emptyList().iterator();
    }//from  ww  w. j  a  v a  2  s  .c o m
    groupKeys = regroup(session, groupKeys);
    final int statCount = statRefs.size();
    final double[][] statGroupValues = new double[statCount][];
    for (int i = 0; i < statCount; i++) {
        statGroupValues[i] = statRefs.get(i).getGroupStats();
    }
    final int groupCount = statGroupValues[0].length;
    final List<GroupStats> ret = Lists.newArrayListWithCapacity(groupCount);
    for (int group = 1; group < groupCount; group++) {
        final double[] groupStats = new double[statCount];
        for (int statNum = 0; statNum < groupStats.length; statNum++) {
            groupStats[statNum] = statGroupValues[statNum][group];
        }
        ret.add(new GroupStats(groupKeys.get(group), groupStats));
    }
    final double[] emptyGroupStats = new double[statCount];
    for (int group = groupCount; group < groupKeys.size() + 1; group++) {
        ret.add(new GroupStats(groupKeys.get(group), emptyGroupStats));
    }
    return ret.iterator();
}

From source file:org.terasology.persistence.typeHandling.coreTypes.LongTypeHandler.java

@Override
public List<Long> deserializeCollection(PersistedData data, DeserializationContext context) {
    if (data.isArray()) {
        PersistedDataArray array = data.getAsArray();
        List<Long> result = Lists.newArrayListWithCapacity(array.size());
        for (PersistedData item : array) {
            if (item.isNumber()) {
                result.add(item.getAsLong());
            } else {
                result.add(null);//w  ww.  jav  a  2 s  . c  om
            }
        }
        return result;
    }
    return Lists.newArrayList();
}

From source file:org.terasology.persistence.typeHandling.coreTypes.FloatTypeHandler.java

@Override
public List<Float> deserializeCollection(PersistedData data, DeserializationContext context) {
    if (data.isArray()) {
        PersistedDataArray array = data.getAsArray();
        List<Float> result = Lists.newArrayListWithCapacity(array.size());
        for (PersistedData item : array) {
            if (item.isNumber()) {
                result.add(item.getAsFloat());
            } else {
                result.add(null);//from  w w  w .  ja  va 2 s .  co m
            }
        }
        return result;
    }
    return Lists.newArrayList();
}

From source file:org.terasology.persistence.typeHandling.coreTypes.IntTypeHandler.java

@Override
public List<Integer> deserializeCollection(PersistedData data, DeserializationContext context) {
    if (data.isArray()) {
        PersistedDataArray array = data.getAsArray();
        List<Integer> result = Lists.newArrayListWithCapacity(array.size());
        for (PersistedData item : array) {
            if (item.isNumber()) {
                result.add(item.getAsInteger());
            } else {
                result.add(null);/*  www. j a  v  a 2 s  .co m*/
            }
        }
        return result;
    }
    return Lists.newArrayList();
}

From source file:org.eclipse.milo.opcua.sdk.server.api.AttributeHistoryManager.java

/**
 * Read history values from nodes belonging to this {@link AttributeHistoryManager}.
 * <p>/*from   ww  w. j ava 2s. c om*/
 * Complete the operation with {@link HistoryReadContext#complete(List)}.
 *
 * @param context      the {@link HistoryReadContext}.
 * @param timestamps   requested timestamp values.
 * @param readValueIds the values to read.
 */
default void historyRead(HistoryReadContext context, HistoryReadDetails readDetails,
        TimestampsToReturn timestamps, List<HistoryReadValueId> readValueIds) {

    List<HistoryReadResult> results = Lists.newArrayListWithCapacity(readValueIds.size());

    for (HistoryReadValueId readValueId : readValueIds) {

        results.add(new HistoryReadResult(new StatusCode(StatusCodes.Bad_NotSupported), null, null));
    }

    context.complete(results);
}

From source file:org.terasology.persistence.typeHandling.coreTypes.DoubleTypeHandler.java

@Override
public List<Double> deserializeCollection(PersistedData data, DeserializationContext context) {
    if (data.isArray()) {
        PersistedDataArray array = data.getAsArray();
        List<Double> result = Lists.newArrayListWithCapacity(array.size());
        for (PersistedData item : array) {
            if (item.isNumber()) {
                result.add(item.getAsDouble());
            } else {
                result.add(null);/*from  ww w  .j ava2s  .co  m*/
            }
        }
        return result;
    }
    return Lists.newArrayList();
}

From source file:org.opendaylight.distributed.tx.impl.RollbackImpl.java

@Override
public CheckedFuture<Void, DTxException.RollbackFailedException> rollback(
        @Nonnull final Map<InstanceIdentifier<?>, ? extends TxCache> perNodeCachesByType,
        @Nonnull final Map<InstanceIdentifier<?>, ? extends ReadWriteTransaction> perNodeRollbackTxs) {

    final List<ListenableFuture<Void>> perNodeRollbackSubmitFutures = Lists
            .newArrayListWithCapacity(perNodeRollbackTxs.size());
    for (final Map.Entry<InstanceIdentifier<?>, ? extends TxCache> perNodeCacheEntry : perNodeCachesByType
            .entrySet()) {//from w  w w .j  a v a 2  s .  c o  m
        InstanceIdentifier<?> nodeId = perNodeCacheEntry.getKey();
        TxCache perNodeCache = perNodeCacheEntry.getValue();

        final ReadWriteTransaction perNodeRollbackTx = perNodeRollbackTxs.get(nodeId);
        for (CachedData cachedData : perNodeCache) {
            final InstanceIdentifier<DataObject> dataId = (InstanceIdentifier<DataObject>) cachedData.getId();

            ModifyAction revertAction = getRevertAction(cachedData.getOperation(), cachedData.getData());

            switch (revertAction) {
            case REPLACE: {
                try {
                    perNodeRollbackTx.put(cachedData.getDsType(), dataId, cachedData.getData().get());
                    break;
                } catch (Exception e) {
                    return Futures.immediateFailedCheckedFuture(new DTxException.RollbackFailedException(String
                            .format("Unable to rollback change for node: %s, %s data: %s. Node in unknown state.",
                                    perNodeCacheEntry.getKey(), revertAction, dataId),
                            e));
                }
            }
            case DELETE: {
                try {
                    perNodeRollbackTx.delete(cachedData.getDsType(), dataId);
                    break;
                } catch (Exception e) {
                    return Futures.immediateFailedCheckedFuture(new DTxException.RollbackFailedException(String
                            .format("Unable to rollback change for node: %s, %s data: %s. Node in unknown state.",
                                    perNodeCacheEntry.getKey(), revertAction, dataId),
                            e));
                }
            }
            case NONE: {
                break;
            }
            default: {
                return Futures.immediateFailedCheckedFuture(new DTxException.RollbackFailedException(
                        "Unable to handle rollback for node: " + perNodeCacheEntry.getKey()
                                + ", revert action: " + revertAction + ". Unknown operation type"));
            }
            }
        }
        CheckedFuture<Void, TransactionCommitFailedException> perNodeRollbackSumitFuture = null;
        try {
            perNodeRollbackSumitFuture = perNodeRollbackTx.submit();
        } catch (Exception submitException) {
            perNodeRollbackSumitFuture = Futures.immediateFailedCheckedFuture(
                    new TransactionCommitFailedException("Rollback submit error occur", submitException));
        }
        perNodeRollbackSubmitFutures.add(perNodeRollbackSumitFuture);
        Futures.addCallback(perNodeRollbackSumitFuture,
                new LoggingRollbackCallback(perNodeCacheEntry.getKey()));
    }
    //}

    return aggregateRollbackFutures(perNodeRollbackSubmitFutures);
}