Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:co.cask.cdap.internal.app.runtime.webapp.WebappProgramRunner.java

public static Set<String> getServingHostNames(InputSupplier<? extends InputStream> inputSupplier)
        throws Exception {
    try (JarInputStream jarInput = new JarInputStream(inputSupplier.getInput())) {
        Set<String> hostNames = Sets.newHashSet();
        JarEntry jarEntry;/*from  w  w  w  . j a v  a 2 s  .  c  om*/
        String webappDir = Constants.Webapp.WEBAPP_DIR + "/";
        while ((jarEntry = jarInput.getNextJarEntry()) != null) {
            if (!jarEntry.isDirectory() && jarEntry.getName().startsWith(webappDir)
                    && jarEntry.getName().contains(ServePathGenerator.SRC_PATH)) {
                // Format is - webapp/host:port/[path/]src/files
                String webappHostName = Iterables.get(Splitter.on("/src/").split(jarEntry.getName()), 0);
                String hostName = Iterables.get(Splitter.on('/').limit(2).split(webappHostName), 1);

                hostNames.add(hostName);
            }
        }

        Set<String> registerNames = Sets.newHashSetWithExpectedSize(hostNames.size());
        for (String hostName : hostNames) {
            if (hostName.equals(ServePathGenerator.DEFAULT_DIR_NAME)) {
                LOG.warn("Not registering default service name; default service needs to have a routable path");
                continue;
            } else if (hostName.startsWith(DEFAULT_DIR_NAME_COLON)) {
                LOG.warn("Not registering default service name with explicit port - {}", hostName);
                continue;
            }

            registerNames.add(Networks.normalizeWebappDiscoveryName(hostName));
        }

        return registerNames;
    }
}

From source file:monasca.common.hibernate.db.AlarmDefinitionDb.java

public AlarmDefinitionDb setAlarms(final Collection<AlarmDb> alarms) {
    final AlarmDefinitionDb self = this;
    this.alarms = Sets.newHashSetWithExpectedSize(alarms.size());
    FluentIterable.from(alarms).transform(new Function<AlarmDb, AlarmDb>() {
        @Nullable//from ww  w .  j av  a2 s. c om
        @Override
        public AlarmDb apply(@Nullable final AlarmDb input) {
            assert input != null;
            input.setAlarmDefinition(self);
            return input;
        }
    }).copyInto(this.alarms);
    return this;
}

From source file:com.cinchapi.concourse.server.storage.db.SearchBlock.java

/**
 * Calculate all possible substrings for {@code term} and submit a task to
 * the {@link #indexer} that will store a revision for the {@code term} at
 * {@code position} for {@code key} in {@code record} at {@code version}.
 * // w w  w.  java  2 s . c o  m
 * @param key
 * @param term
 * @param position
 * @param record
 * @param version
 * @param type
 * @return {@link Future Futures} that can be used to wait for all the
 *         submitted tasks to complete
 */
private List<Future<?>> process(final Text key, final String term, final int position, final PrimaryKey record,
        final long version, final Action type) {
    if (!STOPWORDS.contains(term)) {
        int upperBound = (int) Math.pow(term.length(), 2);
        List<Future<?>> futures = Lists.newArrayListWithCapacity(upperBound);

        // The set of substrings that have been indexed from {@code term} at
        // {@code position} for {@code key} in {@code record} at {@code
        // version}. This is used to ensure that we do not add duplicate
        // indexes (i.e. 'abrakadabra')
        Set<String> indexed = Sets.newHashSetWithExpectedSize(upperBound);

        for (int i = 0; i < term.length(); ++i) {
            for (int j = i + 1; j < term.length() + 1; ++j) {
                final String substring = term.substring(i, j).trim();
                if (!Strings.isNullOrEmpty(substring) && !STOPWORDS.contains(substring)
                        && !indexed.contains(substring)) {
                    indexed.add(substring);
                    futures.add(indexer.submit(new Runnable() {

                        @Override
                        public void run() {
                            doInsert(key, Text.wrap(substring), Position.wrap(record, position), version, type);
                        }

                    }));
                }

            }
        }
        indexed = null; // make eligible for immediate GC
        return futures;
    } else {
        return Collections.emptyList();
    }
}

From source file:com.google.cloud.pubsub.spi.DefaultPubSubRpc.java

private static <V> ListenableFuture<V> translate(ListenableFuture<V> from, final boolean idempotent,
        int... returnNullOn) {
    final Set<Integer> returnNullOnSet = Sets.newHashSetWithExpectedSize(returnNullOn.length);
    for (int value : returnNullOn) {
        returnNullOnSet.add(value);/*from  ww w. j ava 2s  .c  o m*/
    }
    return Futures.catching(from, ApiException.class, new Function<ApiException, V>() {
        @Override
        public V apply(ApiException exception) {
            if (returnNullOnSet.contains(exception.getStatusCode().value())) {
                return null;
            }
            throw new PubSubException(exception, idempotent);
        }
    });
}

From source file:com.opengamma.engine.view.calcnode.CalculationJobResultItem.java

@Override
public void convertIdentifiers(final Long2ObjectMap<ValueSpecification> identifiers) {
    if (_missingInputs == null) {
        if (_missingInputIdentifiers == null) {
            _missingInputs = Collections.emptySet();
        } else {//from   w ww.  java 2 s.  c om
            _missingInputs = Sets.newHashSetWithExpectedSize(_missingInputIdentifiers.length);
            for (long identifier : _missingInputIdentifiers) {
                _missingInputs.add(identifiers.get(identifier));
            }
        }
    }
    if (_missingOutputs == null) {
        if (_missingOutputIdentifiers == null) {
            _missingOutputs = Collections.emptySet();
        } else {
            _missingOutputs = Sets.newHashSetWithExpectedSize(_missingOutputIdentifiers.length);
            for (long identifier : _missingOutputIdentifiers) {
                _missingOutputs.add(identifiers.get(identifier));
            }
        }
    }
}

From source file:com.opengamma.financial.analytics.MissingInputsFunction.java

@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context,
        final ComputationTarget target) {
    final Set<ValueSpecification> underlyingResults = getUnderlyingCompiled().getResults(context, target);
    if (underlyingResults == null) {
        s_logger.debug("Underlying returned null for target {}", target);
        return null;
    }/*from   w w w .  jav  a  2 s.c  om*/
    final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(underlyingResults.size());
    for (final ValueSpecification underlyingResult : underlyingResults) {
        final ValueProperties underlyingProperties = underlyingResult.getProperties();
        final ValueProperties.Builder properties = underlyingProperties.copy();
        if (underlyingProperties.getProperties().isEmpty()) {
            // Got the infinite or nearly infinite property set
            properties.withAny(ValuePropertyNames.AGGREGATION);
            results.add(new ValueSpecification(underlyingResult.getValueName(),
                    underlyingResult.getTargetSpecification(), properties.get()));
        } else {
            // Got a finite property set; republish with both aggregation modes
            properties.withoutAny(ValuePropertyNames.AGGREGATION).with(ValuePropertyNames.AGGREGATION,
                    getAggregationStyleFull());
            results.add(new ValueSpecification(underlyingResult.getValueName(),
                    underlyingResult.getTargetSpecification(), properties.get()));
            properties.withoutAny(ValuePropertyNames.AGGREGATION).with(ValuePropertyNames.AGGREGATION,
                    getAggregationStyleMissing());
            results.add(new ValueSpecification(underlyingResult.getValueName(),
                    underlyingResult.getTargetSpecification(), properties.get()));
        }
    }
    s_logger.debug("Returning results {}", results);
    return results;
}

From source file:org.summer.dsl.xbase.typesystem.override.RawResolvedFeatures.java

protected List<JvmFeature> computeAllFeatures(String simpleName) {
    JvmType rawType = getRawType();/*  ww  w  . j av a 2s .  c  o  m*/
    if (!(rawType instanceof JvmDeclaredType)) {
        return Collections.emptyList();
    }
    List<JvmFeature> result = Lists.newArrayList();
    Multimap<String, AbstractResolvedOperation> processed = HashMultimap.create();
    Set<String> processedFields = Sets.newHashSetWithExpectedSize(5);
    computeAllFeatures((JvmDeclaredType) rawType, simpleName, processed, processedFields, result);
    return Collections.unmodifiableList(result);
}

From source file:com.github.jcustenborder.kafka.connect.utils.data.Parser.java

public Object parseJsonNode(Schema schema, JsonNode input) {
    checkSchemaAndInput(schema, input);//w  w w  . ja v  a2  s  .  co  m

    if (null == input || input.isNull()) {
        return null;
    }

    log.trace("parseJsonNode() - schema.type() = {}", schema.type());

    Object result;

    if (Schema.Type.STRUCT == schema.type()) {
        Struct struct = new Struct(schema);
        Preconditions.checkState(input.isObject(),
                "struct schemas require a ObjectNode to be supplied for input.");
        log.trace("parseJsonNode() - Processing as struct.");
        final Set<String> processedFields = Sets.newHashSetWithExpectedSize(schema.fields().size());
        for (Field field : schema.fields()) {
            log.trace("parseJsonNode() - Processing field '{}:{}'", schema.name(), field.name());
            JsonNode fieldInput = input.findValue(field.name());
            try {
                Object convertedValue = parseJsonNode(field.schema(), fieldInput);
                struct.put(field, convertedValue);
            } catch (Exception ex) {
                throw new DataException(
                        String.format("Exception thrown while processing %s:%s", schema.name(), field.name()),
                        ex);
            }
            processedFields.add(field.name());
        }

        if (log.isTraceEnabled()) {
            final Set<String> jsonFieldNames = Sets.newLinkedHashSet(ImmutableList.copyOf(input.fieldNames()));
            Sets.SetView<String> difference = Sets.difference(jsonFieldNames, processedFields);
            if (!difference.isEmpty()) {
                log.trace("parseJsonNode() - Unprocessed fields for {}:\n{}", schema.name(),
                        Joiner.on('\n').join(difference));
            }
        }

        result = struct;
    } else if (Schema.Type.ARRAY == schema.type()) {
        Preconditions.checkState(input.isArray(),
                "array schemas require a ArrayNode to be supplied for input.");
        log.trace("parseJsonNode() - Processing as array.");
        List<Object> array = new ArrayList<>();
        Iterator<JsonNode> arrayIterator = input.iterator();
        int index = 0;
        while (arrayIterator.hasNext()) {
            log.trace("parseJsonNode() - Processing index {}", index);
            JsonNode arrayInput = arrayIterator.next();
            try {
                Object arrayResult = parseJsonNode(schema.valueSchema(), arrayInput);
                array.add(arrayResult);
            } catch (Exception ex) {
                throw new DataException(String.format("Exception thrown while processing index %s", index), ex);
            }
            index++;
        }
        result = array;
    } else if (Schema.Type.MAP == schema.type()) {
        Preconditions.checkState(input.isObject(),
                "map schemas require a ObjectNode to be supplied for input.");
        log.trace("parseJsonNode() - Processing as map.");
        Map<Object, Object> map = new LinkedHashMap<>();
        Iterator<String> fieldNameIterator = input.fieldNames();

        while (fieldNameIterator.hasNext()) {
            final String fieldName = fieldNameIterator.next();
            final JsonNode fieldInput = input.findValue(fieldName);
            log.trace("parseJsonNode() - Processing key. Key='{}'", fieldName);
            final Object mapKey;
            try {
                mapKey = parseString(schema.keySchema(), fieldName);
            } catch (Exception ex) {
                throw new DataException(
                        String.format("Exception thrown while parsing key. Key='%s'", fieldName), ex);
            }
            log.trace("parseJsonNode() - Processing value. Key='{}'", fieldName);
            final Object mapValue;
            try {
                mapValue = parseJsonNode(schema.keySchema(), fieldInput);
            } catch (Exception ex) {
                throw new DataException(
                        String.format("Exception thrown while parsing value. Key='%s'", fieldName), ex);
            }
            map.put(mapKey, mapValue);
        }

        result = map;
    } else {
        TypeParser parser = findParser(schema);

        try {
            result = parser.parseJsonNode(input, schema);
        } catch (Exception ex) {
            String message = String.format("Could not parse '%s' to %s", input,
                    parser.expectedClass().getSimpleName());
            throw new DataException(message, ex);
        }
    }

    return result;
}

From source file:com.github.nethad.clustermeister.provisioning.ec2.AmazonConfigurationLoader.java

/**
 * Returns a set of configured {@link Credentials}.
 * //from  ww w .  j  a  va 2  s.  co  m
 * @return the configured credentials.
 */
public Set<Credentials> getConfiguredCredentials() {
    List<Object> keypairList = configuration.getList(KEYPAIRS, Collections.EMPTY_LIST);
    Map<String, Map<String, String>> keypairSpecifications = ConfigurationUtil.reduceObjectList(keypairList,
            "Keypairs must be specified as a list of objects.");
    Set<Credentials> credentials = Sets.newHashSetWithExpectedSize(keypairSpecifications.size());
    for (Map.Entry<String, Map<String, String>> entry : keypairSpecifications.entrySet()) {
        String keyPairName = entry.getKey();
        Map<String, String> keyPairValues = entry.getValue();
        String user = ConfigurationUtil.getCheckedConfigValue(USER, keyPairValues, "keypair", keyPairName);
        String privateKeyPath = ConfigurationUtil.getCheckedConfigValue(PRIVATE_KEY, keyPairValues, "keypair",
                keyPairName);
        File privateKey = ConfigurationUtil.getCheckedFile(privateKeyPath, PRIVATE_KEY, "keypair", keyPairName);

        String publicKeyPath = keyPairValues.get(PUBLIC_KEY);
        if (publicKeyPath != null) {
            File publicKey = ConfigurationUtil.getCheckedFile(publicKeyPath, PUBLIC_KEY, "keypair",
                    keyPairName);
            credentials.add(new KeyPairCredentials(keyPairName, user, privateKey, publicKey));
        } else {
            credentials.add(new AmazonConfiguredKeyPairCredentials(keyPairName, user, privateKey));
        }
    }

    return credentials;
}

From source file:org.apache.phoenix.execute.UnionPlan.java

@Override
public Set<TableRef> getSourceRefs() {
    // TODO is this correct?
    Set<TableRef> sources = Sets.newHashSetWithExpectedSize(plans.size());
    for (QueryPlan plan : plans) {
        sources.addAll(plan.getSourceRefs());
    }/*from w  ww  .j  ava 2s.  c o  m*/
    return sources;
}