Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:com.opengamma.financial.analytics.model.irfutureoption.IRFutureOptionSABRSensitivitiesFunction.java

@Override
protected Set<ComputedValue> getResult(final FunctionExecutionContext context,
        final Set<ValueRequirement> desiredValues, final FunctionInputs inputs, final ComputationTarget target,
        final InstrumentDerivative irFutureOption, final SABRInterestRateDataBundle data) {
    final PresentValueSABRSensitivityDataBundle sensitivities = irFutureOption.accept(CALCULATOR, data);
    final SurfaceValue alphaSurface = sensitivities.getAlpha();
    final SurfaceValue betaSurface = sensitivities.getBeta();
    final SurfaceValue rhoSurface = sensitivities.getRho();
    final SurfaceValue nuSurface = sensitivities.getNu();
    final ValueProperties properties = desiredValues.iterator().next().getConstraints().copy()
            .withoutAny(ValuePropertyNames.FUNCTION).with(ValuePropertyNames.FUNCTION, getUniqueId()).get();
    final Set<ComputedValue> results = Sets.newHashSetWithExpectedSize(4);
    final String[] names = getValueRequirementNames();
    final ComputationTargetSpecification targetSpec = target.toSpecification();
    results.add(new ComputedValue(new ValueSpecification(names[0], targetSpec, properties),
            getMatrix(alphaSurface)));/*from w w w  . j a v  a2  s. c o m*/
    results.add(new ComputedValue(new ValueSpecification(names[1], targetSpec, properties),
            getMatrix(betaSurface)));
    results.add(
            new ComputedValue(new ValueSpecification(names[2], targetSpec, properties), getMatrix(rhoSurface)));
    results.add(
            new ComputedValue(new ValueSpecification(names[3], targetSpec, properties), getMatrix(nuSurface)));
    return results;
}

From source file:org.eclipse.xtext.xbase.typesystem.util.UnboundTypeParameterPreservingSubstitutor.java

@Override
protected Set<JvmTypeParameter> createVisiting() {
    return Sets.newHashSetWithExpectedSize(3);
}

From source file:com.pinterest.terrapin.controller.ControllerUtil.java

/**
 * Builds the helix ideal state for HDFS directory by finding the locations of HDFS blocks and
 * creating an ideal state assignment based on those.
 *
 * @param hdfsClient The HDFS client object.
 * @param hdfsDir The HDFS directory containing the various files.
 * @param resourceName The name of the Helix resource for which the ideal state is being created.
 * @param partitioner The partitioner type, used for extracting helix partition names from
 *                    HDFS files.//w w  w  .j a  v  a 2 s. c  o  m
 * @param numReplicas The number of replicas for each partition.
 * @param enableZkCompression Whether data in zk is kept compressed.
 * @return The ideal state as computed based on HDFS block placement.
 * @throws ControllerException
 */
public static IdealState buildIdealStateForHdfsDir(DFSClient hdfsClient, String hdfsDir, String resourceName,
        PartitionerType partitioner, int numReplicas, boolean enableZkCompression) throws ControllerException {
    List<HdfsFileStatus> fileList;
    try {
        fileList = TerrapinUtil.getHdfsFileList(hdfsClient, hdfsDir);
    } catch (IOException e) {
        throw new ControllerException("Exception while listing files in " + hdfsDir,
                ControllerErrorCode.HDFS_ERROR);
    }
    // Mapping from file to HDFS block locations.
    Map<Integer, Set<String>> hdfsBlockMapping = Maps.newHashMapWithExpectedSize(fileList.size());
    for (HdfsFileStatus fileStatus : fileList) {
        Integer partitionName = TerrapinUtil.extractPartitionName(fileStatus.getLocalName(), partitioner);
        if (partitionName == null) {
            LOG.info("Skipping " + fileStatus.getLocalName() + " for " + hdfsDir);
            continue;
        }
        String fullName = fileStatus.getFullName(hdfsDir);
        BlockLocation[] locations = null;
        try {
            locations = hdfsClient.getBlockLocations(fullName, 0, fileStatus.getLen());
        } catch (Exception e) {
            throw new ControllerException("Exception while getting block locations " + e.getMessage(),
                    ControllerErrorCode.HDFS_ERROR);
        }
        Set<String> instanceSet = Sets.newHashSetWithExpectedSize(3);
        BlockLocation firstLocation = locations[0];
        String[] hosts = null;
        try {
            hosts = firstLocation.getHosts();
        } catch (IOException e) {
            throw new ControllerException("Exception while getting hosts " + e.getMessage(),
                    ControllerErrorCode.HDFS_ERROR);
        }
        for (String host : hosts) {
            instanceSet.add(host);
        }
        hdfsBlockMapping.put(partitionName, instanceSet);
    }
    // Assign helix partitions for the resource - which is the HDFS directory.
    int bucketSize = TerrapinUtil.getBucketSize(hdfsBlockMapping.size(), enableZkCompression);
    CustomModeISBuilder idealStateBuilder = new CustomModeISBuilder(resourceName);
    for (Map.Entry<Integer, Set<String>> mapping : hdfsBlockMapping.entrySet()) {
        // Make partitions globally unique
        String partitionName = null;
        // This is needed because of the way helix parses partition numbers for buckets.
        if (bucketSize > 0) {
            partitionName = resourceName + "_" + mapping.getKey();
        } else {
            partitionName = resourceName + "$" + mapping.getKey();
        }
        Set<String> instanceSet = mapping.getValue();
        for (String instance : instanceSet) {
            idealStateBuilder.assignInstanceAndState(partitionName,
                    TerrapinUtil.getHelixInstanceFromHDFSHost(instance), "ONLINE");
        }
    }
    idealStateBuilder.setStateModel("OnlineOffline");
    idealStateBuilder.setNumReplica(numReplicas);
    idealStateBuilder.setNumPartitions(hdfsBlockMapping.size());
    IdealState is = idealStateBuilder.build();
    if (bucketSize > 0) {
        is.setBucketSize(bucketSize);
    }
    is.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
    if (enableZkCompression) {
        TerrapinUtil.compressIdealState(is);
    }
    return is;
}

From source file:com.torodb.torod.db.backends.query.processors.ProcessorTestUtils.java

private static HashSet<QueryCriteriaWrapper> convertQueryCriteria(Set<QueryCriteria> queries) {
    HashSet<QueryCriteriaWrapper> result = Sets.newHashSetWithExpectedSize(queries.size());

    Iterables.addAll(result, Iterables.transform(queries, new Function<QueryCriteria, QueryCriteriaWrapper>() {

        @Override/* w w w . j ava 2 s .  co  m*/
        public QueryCriteriaWrapper apply(QueryCriteria input) {
            return new QueryCriteriaWrapper(input);
        }
    }));

    return result;
}

From source file:com.opengamma.engine.fudgemsg.ComputedValueResultFudgeBuilder.java

@Override
public ComputedValueResult buildObject(final FudgeDeserializer deserializer, final FudgeMsg msg) {
    final ValueSpecification valueSpec = ComputedValueFudgeBuilder.getValueSpecification(deserializer, msg);
    final Object valueObject = ComputedValueFudgeBuilder.getValueObject(deserializer, msg);
    final AggregatedExecutionLog aggregatedExecutionLog = deserializer.fieldValueToObject(
            DefaultAggregatedExecutionLog.class, msg.getByName(AGGREGATED_EXECUTION_LOG_FIELD));
    final String computeNodeId = msg.getString(COMPUTE_NODE_ID_FIELD);
    final FudgeMsg missingInputsMsg = msg.getMessage(MISSING_INPUTS_FIELD_NAME);
    final Set<ValueSpecification> missingInputs;
    if (missingInputsMsg != null) {
        missingInputs = Sets.newHashSetWithExpectedSize(missingInputsMsg.getNumFields());
        for (final FudgeField missingInput : missingInputsMsg) {
            missingInputs.add(deserializer.fieldValueToObject(ValueSpecification.class, missingInput));
        }//from   w w  w . j av a2 s  .  com
    } else {
        missingInputs = null;
    }
    final String invocationResultName = msg.getString(INVOCATION_RESULT_FIELD_NAME);
    final InvocationResult invocationResult = invocationResultName != null
            ? InvocationResult.valueOf(invocationResultName)
            : null;
    return new ComputedValueResult(valueSpec, valueObject, aggregatedExecutionLog, computeNodeId, missingInputs,
            invocationResult);
}

From source file:com.opengamma.engine.fudgemsg.ViewComputationResultModelFudgeBuilder.java

private Set<ValueRequirement> getRequirements(final FudgeDeserializer deserializer,
        final FudgeField requirements) {
    final FudgeMsg msg = (FudgeMsg) requirements.getValue();
    final Set<ValueRequirement> result = Sets.newHashSetWithExpectedSize(msg.getNumFields());
    for (final FudgeField requirement : msg) {
        result.add(deserializer.fieldValueToObject(ValueRequirement.class, requirement));
    }//from w  w  w .jav  a2s.c om
    return result;
}

From source file:com.android.ide.common.layout.relative.DeletionHandler.java

/**
 * Creates a new {@link DeletionHandler}
 *
 * @param deleted the deleted nodes//from  w  w w.  j ava  2s. co  m
 * @param moved nodes that were moved (e.g. deleted, but also inserted elsewhere)
 * @param layout the parent layout of the deleted nodes
 */
public DeletionHandler(@NonNull List<INode> deleted, @NonNull List<INode> moved, @NonNull INode layout) {
    mDeleted = deleted;
    mMoved = moved;
    mLayout = layout;

    mChildren = mLayout.getChildren();
    mNodeMap = Maps.newHashMapWithExpectedSize(mChildren.length);
    for (INode child : mChildren) {
        String id = child.getStringAttr(ANDROID_URI, ATTR_ID);
        if (id != null) {
            mNodeMap.put(stripIdPrefix(id), child);
        }
    }

    mDeletedIds = Sets.newHashSetWithExpectedSize(mDeleted.size());
    for (INode node : mDeleted) {
        String id = node.getStringAttr(ANDROID_URI, ATTR_ID);
        if (id != null) {
            mDeletedIds.add(stripIdPrefix(id));
        }
    }

    // Any widgets that remain (e.g. typically because they were moved) should
    // keep their incoming dependencies
    for (INode node : mMoved) {
        String id = node.getStringAttr(ANDROID_URI, ATTR_ID);
        if (id != null) {
            mDeletedIds.remove(stripIdPrefix(id));
        }
    }
}

From source file:com.opengamma.language.install.HostConfigurationScanner.java

@Override
public void run() {
    final URI uri = URI.create("http://" + getHost() + ":" + getPort() + getBase());
    try {/*from w  w w .j a v a2s  . c  om*/
        FudgeMsg configurations = new RemoteConfiguration(uri).getConfigurationMsg();
        if (configurations == null) {
            s_logger.info("No configuration document at {}", uri);
            return;
        }
        final Collection<Configuration> found = Sets.newHashSetWithExpectedSize(configurations.getNumFields());
        for (FudgeField field : configurations) {
            final FudgeMsg configuration = configurations.getFieldValue(FudgeMsg.class, field);
            final String description = configuration.getString(DESCRIPTION_FIELD);
            if (description != null) {
                s_logger.debug("Found {}/{}", field.getName(), description);
                found.add(new Configuration(uri.resolve(field.getName()), description));
            } else {
                s_logger.debug("Ignoring {} - no description", field.getName());
            }
        }
        addConfigurations(found);

    } catch (RuntimeException e) {
        s_logger.info("Couldn't fetch configuration from {}", uri);
        s_logger.debug("Caught exception", e);
    } finally {
        complete();
    }
}

From source file:com.palantir.lock.ForwardingLockService.java

@Override
public Set<HeldLocksToken> refreshTokens(Iterable<HeldLocksToken> tokens) {
    Set<LockRefreshToken> refreshTokens = ImmutableSet
            .copyOf(Iterables.transform(tokens, HeldLocksTokens.getRefreshTokenFun()));
    Set<LockRefreshToken> goodTokens = delegate().refreshLockRefreshTokens(refreshTokens);
    Set<HeldLocksToken> ret = Sets.newHashSetWithExpectedSize(refreshTokens.size());
    Map<LockRefreshToken, HeldLocksToken> tokenMap = Maps.uniqueIndex(tokens,
            HeldLocksTokens.getRefreshTokenFun());
    for (LockRefreshToken goodToken : goodTokens) {
        HeldLocksToken lock = tokenMap.get(goodToken);
        ret.add(goodToken.refreshTokenWithExpriationDate(lock));
    }/*  w  w w.ja va 2  s.  c o m*/
    return ret;
}

From source file:com.opengamma.integration.viewer.status.impl.SimpleViewStatusModel.java

@Override
public Set<String> getComputationTargetTypes() {
    Set<String> result = Sets.newHashSetWithExpectedSize(_viewStatusResult.size());
    for (ViewStatusKey key : _viewStatusResult.keySet()) {
        result.add(key.getTargetType());
    }/*  www . j av  a2 s . c o  m*/
    return result;
}