Example usage for com.google.common.collect BiMap keySet

List of usage examples for com.google.common.collect BiMap keySet

Introduction

In this page you can find the example usage for com.google.common.collect BiMap keySet.

Prototype

Set<K> keySet();

Source Link

Document

Returns a Set view of the keys contained in this map.

Usage

From source file:net.derquinse.common.collect.ImmutableIndexedHierarchy.java

/**
 * Preconditions.//from  w  w w. j  a  v a 2 s . c  o m
 * @param map Backing map.
 * @param hierarchy Keys hierachy.
 */
static void check(BiMap<?, ?> map, Hierarchy<?> hierarchy) {
    checkNotNull(map, "The backing bimap must be provided.");
    checkNotNull(map, "The backing hierarchy must be provided.");
    checkArgument(map.keySet().equals(hierarchy.elementSet()), "Inconsistent bimap and hierarchy");
}

From source file:com.example.app.support.address.AddressParser.java

/**
 * This method takes a map containing fill values that were
 * temporarily inserted into the address, mapped to
 * the strings that they originally replaced. Next, this
 * method iterates through the split up street fields and
 * replaces instances of the fillvalues with their originals.
 *
 * @param results parse results.//from  w  ww. j  a  v a 2s. com
 * @param codeMaps code maps.
 *
 * @return the parse results.
 */

private static Map<AddressComponent, String> replaceOriginalStringsInSplitUpFields(
        Map<AddressComponent, String> results, BiMap<String, String> codeMaps) {
    for (String oldString : codeMaps.keySet()) // contains a map of the form <stringToBeReplaced><StringToReplaceWith>
    {

        for (AddressComponent fieldKey : results.keySet()) {
            if (results.get(fieldKey) == null) // only update fields that are not null
            {
                continue;
            } else {
                String originalString = results.get(fieldKey);
                String updatedString = originalString.replaceAll(oldString, codeMaps.get(oldString));
                results.put(fieldKey, updatedString);
            }
        }
    }
    return results;
}

From source file:com.linkedin.pinot.controller.api.restlet.resources.ServerTableSizeReader.java

public Map<String, List<SegmentSizeInfo>> getSizeDetailsFromServers(BiMap<String, String> serverEndPoints,
        String table, int timeoutMsec) {

    List<String> serverUrls = new ArrayList<>(serverEndPoints.size());
    BiMap<String, String> endpointsToServers = serverEndPoints.inverse();
    for (String endpoint : endpointsToServers.keySet()) {
        String tableSizeUri = "http://" + endpoint + "/table/" + table + "/size";
        serverUrls.add(tableSizeUri);//from   w  ww.  j  a  v  a2 s  .c o  m
    }

    MultiGetRequest mget = new MultiGetRequest(executor, connectionManager);
    LOGGER.info("Reading segment sizes from servers for table: {}, timeoutMsec: {}", table, timeoutMsec);
    CompletionService<GetMethod> completionService = mget.execute(serverUrls, timeoutMsec);

    Map<String, List<SegmentSizeInfo>> serverSegmentSizes = new HashMap<>(serverEndPoints.size());

    for (int i = 0; i < serverUrls.size(); i++) {
        try {
            GetMethod getMethod = completionService.take().get();
            URI uri = getMethod.getURI();
            String instance = endpointsToServers.get(uri.getHost() + ":" + uri.getPort());
            if (getMethod.getStatusCode() >= 300) {
                LOGGER.error("Server: {} returned error: {}", instance, getMethod.getStatusCode());
                continue;
            }
            TableSizeInfo tableSizeInfo = new ObjectMapper().readValue(getMethod.getResponseBodyAsString(),
                    TableSizeInfo.class);
            serverSegmentSizes.put(instance, tableSizeInfo.segments);
        } catch (InterruptedException e) {
            LOGGER.warn("Interrupted exception while reading segment size for table: {}", table, e);
        } catch (ExecutionException e) {
            if (Throwables.getRootCause(e) instanceof SocketTimeoutException) {
                LOGGER.warn("Server request to read table size was timed out for table: {}", table, e);
            } else if (Throwables.getRootCause(e) instanceof ConnectTimeoutException) {
                LOGGER.warn("Server request to read table size timed out waiting for connection. table: {}",
                        table, e);
            } else if (Throwables.getRootCause(e) instanceof ConnectionPoolTimeoutException) {
                LOGGER.warn(
                        "Server request to read table size timed out on getting a connection from pool, table: {}",
                        table, e);
            } else {
                LOGGER.warn("Execution exception while reading segment sizes for table: {}", table, e);
            }
        } catch (Exception e) {
            LOGGER.warn("Error while reading segment sizes for table: {}", table);
        }
    }
    LOGGER.info("Finished reading segment sizes for table: {}", table);
    return serverSegmentSizes;
}

From source file:com.linkedin.pinot.controller.api.resources.ServerTableSizeReader.java

public Map<String, List<SegmentSizeInfo>> getSizeDetailsFromServers(BiMap<String, String> serverEndPoints,
        String table, int timeoutMsec) {

    List<String> serverUrls = new ArrayList<>(serverEndPoints.size());
    BiMap<String, String> endpointsToServers = serverEndPoints.inverse();
    for (String endpoint : endpointsToServers.keySet()) {
        String tableSizeUri = "http://" + endpoint + "/table/" + table + "/size";
        serverUrls.add(tableSizeUri);//from w  ww .  ja  v a2  s .co  m
    }

    MultiGetRequest mget = new MultiGetRequest(executor, connectionManager);
    LOGGER.info("Reading segment sizes from servers for table: {}, timeoutMsec: {}", table, timeoutMsec);
    CompletionService<GetMethod> completionService = mget.execute(serverUrls, timeoutMsec);

    Map<String, List<SegmentSizeInfo>> serverSegmentSizes = new HashMap<>(serverEndPoints.size());

    for (int i = 0; i < serverUrls.size(); i++) {
        GetMethod getMethod = null;
        try {
            getMethod = completionService.take().get();
            URI uri = getMethod.getURI();
            String instance = endpointsToServers.get(uri.getHost() + ":" + uri.getPort());
            if (getMethod.getStatusCode() >= 300) {
                LOGGER.error("Server: {} returned error: {}", instance, getMethod.getStatusCode());
                continue;
            }
            TableSizeInfo tableSizeInfo = new ObjectMapper().readValue(getMethod.getResponseBodyAsString(),
                    TableSizeInfo.class);
            serverSegmentSizes.put(instance, tableSizeInfo.segments);
        } catch (InterruptedException e) {
            LOGGER.warn("Interrupted exception while reading segment size for table: {}", table, e);
        } catch (ExecutionException e) {
            if (Throwables.getRootCause(e) instanceof SocketTimeoutException) {
                LOGGER.warn("Server request to read table size was timed out for table: {}", table, e);
            } else if (Throwables.getRootCause(e) instanceof ConnectTimeoutException) {
                LOGGER.warn("Server request to read table size timed out waiting for connection. table: {}",
                        table, e);
            } else if (Throwables.getRootCause(e) instanceof ConnectionPoolTimeoutException) {
                LOGGER.warn(
                        "Server request to read table size timed out on getting a connection from pool, table: {}",
                        table, e);
            } else {
                LOGGER.warn("Execution exception while reading segment sizes for table: {}", table, e);
            }
        } catch (Exception e) {
            LOGGER.warn("Error while reading segment sizes for table: {}", table);
        } finally {
            if (getMethod != null) {
                getMethod.releaseConnection();
            }
        }
    }
    LOGGER.info("Finished reading segment sizes for table: {}", table);
    return serverSegmentSizes;
}

From source file:com.opengamma.bbg.loader.BloombergFXForwardScaleResolver.java

public Map<ExternalIdBundle, Integer> getBloombergFXForwardScale(
        final Collection<ExternalIdBundle> identifiers) {
    ArgumentChecker.notNull(identifiers, "identifiers");
    final Map<ExternalIdBundle, Integer> result = Maps.newHashMap();
    final BiMap<String, ExternalIdBundle> bundle2Bbgkey = getSubIds(identifiers);

    Map<String, FudgeMsg> fwdScaleResult = ReferenceDataProviderUtils.getFields(bundle2Bbgkey.keySet(),
            BBG_FIELD, _referenceDataProvider);

    for (ExternalIdBundle identifierBundle : identifiers) {
        String bbgKey = bundle2Bbgkey.inverse().get(identifierBundle);
        if (bbgKey != null) {
            FudgeMsg fudgeMsg = fwdScaleResult.get(bbgKey);
            if (fudgeMsg != null) {
                String bbgFwdScale = fudgeMsg.getString(BloombergConstants.BBG_FIELD_FWD_SCALE);
                Integer fwdScale = null;
                try {
                    fwdScale = Integer.parseInt(bbgFwdScale);
                    result.put(identifierBundle, fwdScale);
                } catch (NumberFormatException e) {
                    s_logger.warn("Could not parse FWD_SCALE with value {}", bbgFwdScale);
                }//  w  w w  .j a v a 2  s  . c o m
            }
        }
    }
    return result;

}

From source file:com.github.ferstl.maven.pomenforcers.PedanticPluginManagementOrderEnforcer.java

@Override
protected void doEnforce(ErrorReport report) {
    MavenProject project = EnforcerRuleUtils.getMavenProject(getHelper());

    Collection<PluginModel> declaredManagedPlugins = getProjectModel().getManagedPlugins();
    Collection<Plugin> managedPlugins = project.getPluginManagement().getPlugins();
    BiMap<PluginModel, PluginModel> matchedPlugins = matchPlugins(declaredManagedPlugins, managedPlugins);

    Set<PluginModel> resolvedPlugins = matchedPlugins.keySet();
    if (!this.pluginOrdering.isOrdered(resolvedPlugins)) {
        Collection<PluginModel> sortedPlugins = this.pluginOrdering.immutableSortedCopy(resolvedPlugins);

        report.addLine("Your plugin management has to be ordered this way:").emptyLine()
                .addDiffUsingToString(resolvedPlugins, sortedPlugins, "Actual Order", "Required Order");
    }/*from  w w w.  j av a 2  s.co m*/
}

From source file:org.apache.usergrid.tools.Metrics.java

private void applicationsFor(UUID orgId) throws Exception {
    BiMap<UUID, String> applications = managementService.getApplicationsForOrganization(orgId);

    for (UUID uuid : applications.keySet()) {
        logger.info("Checking app: {}", applications.get(uuid));

        orgApps.put(orgId, new ApplicationInfo(uuid, applications.get(uuid)));

        collect(MetricQuery.getInstance(uuid, MetricSort.APP_REQ_COUNT).resolution(CounterResolution.DAY)
                .startDate(startDate).endDate(endDate).execute(emf.getEntityManager(uuid)));
    }//from  w w w  . j  a  v a 2s  . c o  m
}

From source file:com.github.ferstl.maven.pomenforcers.AbstractPedanticDependencyOrderEnforcer.java

@Override
protected final void doEnforce(ErrorReport report) {
    MavenProject mavenProject = EnforcerRuleUtils.getMavenProject(getHelper());
    DependencyMatcher dependencyMatcher = new DependencyMatcher(getHelper());

    BiMap<DependencyModel, DependencyModel> matchedDependencies = dependencyMatcher
            .match(getMavenDependencies(mavenProject), getDeclaredDependencies());

    Set<DependencyModel> resolvedDependencies = matchedDependencies.keySet();
    if (!this.artifactOrdering.isOrdered(resolvedDependencies)) {
        reportError(report, resolvedDependencies,
                this.artifactOrdering.immutableSortedCopy(resolvedDependencies));
    }/*from w  w  w. j a  v  a2s  .c om*/
}

From source file:org.eclipse.emf.compare.match.update.Updater.java

/**
 * Recursively updates the elements from oldValues which have a matching element using that matching
 * elements as a reference./*w w  w .  j a va 2 s .  c o  m*/
 */
private void updateMatchingElements(EList<EObject> oldValues, BiMap<EObject, EObject> matches) {
    for (EObject obj : matches.keySet()) {
        EObject ref = matches.get(obj);
        recursiveUpdate(obj, ref);
    }
}

From source file:de.sep2011.funckit.model.graphmodel.implementations.ComponentImpl.java

@Override
public Pair<Brick, Map<AccessPoint, AccessPoint>> getUnconnectedCopy() {
    ComponentImpl copy = new ComponentImpl(getType());
    Map<AccessPoint, AccessPoint> oldNewMap = new LinkedHashMap<AccessPoint, AccessPoint>();

    // copy.accessPointMap = null; //
    copy.boundingRect = new Rectangle(boundingRect);
    copy.delay = delay;/*from w  ww  .  j  av  a2s.  com*/
    // copy.inputs = null; //
    copy.name = name;
    copy.orientation = orientation;
    // copy.outputs = null; //

    BiMap<AccessPoint, AccessPoint> oldApMapInv = accessPointMap.inverse();
    BiMap<AccessPoint, AccessPoint> newApMapInv = copy.accessPointMap.inverse();

    for (AccessPoint ap : oldApMapInv.keySet()) {
        oldNewMap.put(oldApMapInv.get(ap), newApMapInv.get(ap));
    }

    /* Adapt properties of copied inputs and outputs. */
    for (Input input : copy.getInputs()) {
        AccessPoint other = oldApMapInv.get(copy.accessPointMap.get(input));
        input.setName(other.getName());
        input.setPosition(other.getPosition());
    }
    for (Output output : copy.getOutputs()) {
        AccessPoint other = oldApMapInv.get(copy.accessPointMap.get(output));
        output.setName(other.getName());
        output.setPosition(other.getPosition());
    }

    return new Pair<Brick, Map<AccessPoint, AccessPoint>>(copy, oldNewMap);
}