Example usage for com.google.common.collect SetMultimap get

List of usage examples for com.google.common.collect SetMultimap get

Introduction

In this page you can find the example usage for com.google.common.collect SetMultimap get.

Prototype

@Override
Set<V> get(@Nullable K key);

Source Link

Document

Because a SetMultimap has unique values for a given key, this method returns a Set , instead of the java.util.Collection specified in the Multimap interface.

Usage

From source file:com.publictransitanalytics.scoregenerator.environment.StoredGrid.java

private static TreeMultimap<GeoLongitude, GridPoint> getLongitudeSortedPointsForLatitude(
        final GeoLatitude latitude, final SetMultimap<GeoLatitude, GridPoint> latitudePointMap) {
    final TreeMultimap<GeoLongitude, GridPoint> map = TreeMultimap.create(Comparator.naturalOrder(),
            (p1, p2) -> p1.getIdentifier().compareTo(p2.getIdentifier()));

    if (latitudePointMap.containsKey(latitude)) {
        for (final GridPoint point : latitudePointMap.get(latitude)) {
            map.put(point.getLocation().getLongitude(), point);
        }/* w w  w . j ava 2s  . c o m*/
    }
    return map;
}

From source file:com.publictransitanalytics.scoregenerator.environment.StoredGrid.java

private static TreeMultimap<GeoLatitude, GridPoint> getLatitudeSortedPointsForLongitude(
        final GeoLongitude longitude, final SetMultimap<GeoLongitude, GridPoint> longitudePointMap) {

    final TreeMultimap<GeoLatitude, GridPoint> map = TreeMultimap.create(Comparator.naturalOrder(),
            (p1, p2) -> p1.getIdentifier().compareTo(p2.getIdentifier()));
    if (longitudePointMap.containsKey(longitude)) {
        for (final GridPoint point : longitudePointMap.get(longitude)) {
            map.put(point.getLocation().getLatitude(), point);
        }/*from  w  w  w.j ava 2  s.c  o  m*/
    }
    return map;
}

From source file:com.facebook.presto.orc.checkpoint.Checkpoints.java

public static Map<StreamId, StreamCheckpoint> getStreamCheckpoints(Set<Integer> columns,
        List<OrcType> columnTypes, CompressionKind compressionKind, int rowGroupId,
        List<ColumnEncoding> columnEncodings, Map<StreamId, Stream> streams,
        Map<Integer, List<RowGroupIndex>> columnIndexes) throws InvalidCheckpointException {
    ImmutableSetMultimap.Builder<Integer, StreamKind> streamKindsBuilder = ImmutableSetMultimap.builder();
    for (Stream stream : streams.values()) {
        streamKindsBuilder.put(stream.getColumn(), stream.getStreamKind());
    }/*  w  w  w  .j ava2 s.co m*/
    SetMultimap<Integer, StreamKind> streamKinds = streamKindsBuilder.build();

    ImmutableMap.Builder<StreamId, StreamCheckpoint> checkpoints = ImmutableMap.builder();
    for (int column : columns) {
        List<Integer> positionsList = columnIndexes.get(column).get(rowGroupId).getPositions();

        ColumnEncodingKind columnEncoding = columnEncodings.get(column).getColumnEncodingKind();
        OrcTypeKind columnType = columnTypes.get(column).getOrcTypeKind();
        Set<StreamKind> availableStreams = streamKinds.get(column);

        ColumnPositionsList columnPositionsList = new ColumnPositionsList(column, columnType, positionsList);
        switch (columnType) {
        case BOOLEAN:
            checkpoints.putAll(getBooleanColumnCheckpoints(column, compressionKind, availableStreams,
                    columnPositionsList));
            break;
        case BYTE:
            checkpoints.putAll(
                    getByteColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case SHORT:
        case INT:
        case LONG:
        case DATE:
            checkpoints.putAll(getLongColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case FLOAT:
            checkpoints.putAll(
                    getFloatColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case DOUBLE:
            checkpoints.putAll(
                    getDoubleColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case TIMESTAMP:
            checkpoints.putAll(getTimestampColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case BINARY:
        case STRING:
            checkpoints.putAll(getSliceColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case LIST:
        case MAP:
            checkpoints.putAll(getListOrMapColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case STRUCT:
            checkpoints.putAll(
                    getStructColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case DECIMAL:
        case CHAR:
        case VARCHAR:
        case UNION:
            throw new IllegalArgumentException("Unsupported column type " + columnType);
        }

        // The DWRF code is not meticulous in the handling of checkpoints.  It appears that for the first row group
        // it will write checkpoints for all streams, but in other cases it will write only the streams that exist.
        // We detect this case by checking that all offsets in the initial position list are zero, and if so, we
        // clear the extra offsets
        if (columnPositionsList.hasNextPosition() && !Iterables.all(positionsList, equalTo(0))) {
            throw new InvalidCheckpointException(format(
                    "Column %s, of type %s, contains %s offset positions, but only %s positions were consumed",
                    column, columnType, positionsList.size(), columnPositionsList.getIndex()));
        }
    }
    return checkpoints.build();
}

From source file:com.splicemachine.orc.checkpoint.Checkpoints.java

public static Map<StreamId, StreamCheckpoint> getStreamCheckpoints(Set<Integer> columns,
        List<OrcType> columnTypes, CompressionKind compressionKind, int rowGroupId,
        List<ColumnEncoding> columnEncodings, Map<StreamId, Stream> streams,
        Map<Integer, List<RowGroupIndex>> columnIndexes) throws InvalidCheckpointException {
    ImmutableSetMultimap.Builder<Integer, StreamKind> streamKindsBuilder = ImmutableSetMultimap.builder();
    for (Stream stream : streams.values()) {
        streamKindsBuilder.put(stream.getColumn(), stream.getStreamKind());
    }/*from ww w.  ja v  a2s. com*/
    SetMultimap<Integer, StreamKind> streamKinds = streamKindsBuilder.build();

    ImmutableMap.Builder<StreamId, StreamCheckpoint> checkpoints = ImmutableMap.builder();
    for (int column : columns) {
        List<Integer> positionsList = columnIndexes.get(column).get(rowGroupId).getPositions();

        ColumnEncodingKind columnEncoding = columnEncodings.get(column).getColumnEncodingKind();
        OrcTypeKind columnType = columnTypes.get(column).getOrcTypeKind();
        Set<StreamKind> availableStreams = streamKinds.get(column);

        ColumnPositionsList columnPositionsList = new ColumnPositionsList(column, columnType, positionsList);
        switch (columnType) {
        case BOOLEAN:
            checkpoints.putAll(getBooleanColumnCheckpoints(column, compressionKind, availableStreams,
                    columnPositionsList));
            break;
        case BYTE:
            checkpoints.putAll(
                    getByteColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case SHORT:
        case INT:
        case LONG:
        case DATE:
            checkpoints.putAll(getLongColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case FLOAT:
            checkpoints.putAll(
                    getFloatColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case DOUBLE:
            checkpoints.putAll(
                    getDoubleColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case TIMESTAMP:
            checkpoints.putAll(getTimestampColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case BINARY:
        case STRING:
        case VARCHAR:
        case CHAR:
            checkpoints.putAll(getSliceColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case LIST:
        case MAP:
            checkpoints.putAll(getListOrMapColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case STRUCT:
            checkpoints.putAll(
                    getStructColumnCheckpoints(column, compressionKind, availableStreams, columnPositionsList));
            break;
        case DECIMAL:
            checkpoints.putAll(getDecimalColumnCheckpoints(column, columnEncoding, compressionKind,
                    availableStreams, columnPositionsList));
            break;
        case UNION:
            throw new IllegalArgumentException("Unsupported column type " + columnType);
        }

        // The DWRF code is not meticulous in the handling of checkpoints.  It appears that for the first row group
        // it will write checkpoints for all streams, but in other cases it will write only the streams that exist.
        // We detect this case by checking that all offsets in the initial position list are zero, and if so, we
        // clear the extra offsets
        if (columnPositionsList.hasNextPosition() && !Iterables.all(positionsList, equalTo(0))) {
            throw new InvalidCheckpointException(format(
                    "Column %s, of type %s, contains %s offset positions, but only %s positions were consumed",
                    column, columnType, positionsList.size(), columnPositionsList.getIndex()));
        }
    }
    return checkpoints.build();
}

From source file:org.apache.kylin.source.hive.HiveSourceTableLoader.java

public static Set<String> loadHiveTables(String[] hiveTables, KylinConfig config) throws IOException {

    SetMultimap<String, String> db2tables = LinkedHashMultimap.create();
    for (String fullTableName : hiveTables) {
        String[] parts = HadoopUtil.parseHiveTableName(fullTableName);
        db2tables.put(parts[0], parts[1]);
    }/*from   w  ww . j  a  v a 2  s  . c  o m*/

    IHiveClient hiveClient = HiveClientFactory.getHiveClient();
    SchemaChecker checker = new SchemaChecker(hiveClient, MetadataManager.getInstance(config),
            CubeManager.getInstance(config));
    for (Map.Entry<String, String> entry : db2tables.entries()) {
        SchemaChecker.CheckResult result = checker.allowReload(entry.getKey(), entry.getValue());
        result.raiseExceptionWhenInvalid();
    }

    // extract from hive
    Set<String> loadedTables = Sets.newHashSet();
    for (String database : db2tables.keySet()) {
        List<String> loaded = extractHiveTables(database, db2tables.get(database), hiveClient);
        loadedTables.addAll(loaded);
    }

    return loadedTables;
}

From source file:org.mskcc.cbio.portal.mut_diagram.Pileup.java

/**
 * Return a list of pileups for the specified list of mutations.  The list of
 * pileups may be empty but will not be null.
 *
 * @param mutations list of mutations, must not be null
 * @return a list of pileups for the specified list of mutations
 *///ww w. ja  v  a2  s  . co m
public static List<Pileup> pileup(final List<ExtendedMutation> mutations) {
    checkNotNull(mutations, "mutations must not be null");

    List<Pileup> pileups = Lists.newArrayList();
    SetMultimap<Integer, String> labels = HashMultimap.create();
    ListMultimap<Integer, ExtendedMutation> mutationsByLocation = ArrayListMultimap.create();
    for (ExtendedMutation mutation : mutations) {
        String label = mutation.getProteinChange();
        if (label != null) {
            try {
                int location = Integer.valueOf(label.replaceAll("[A-Za-z\\.*]+", ""));
                labels.put(location, label);
                mutationsByLocation.put(location, mutation);
            } catch (NumberFormatException e) {
                logger.warn("ignoring extended mutation " + label + ", no location information");
            }
        }
    }

    for (Map.Entry<Integer, Collection<ExtendedMutation>> entry : mutationsByLocation.asMap().entrySet()) {
        int location = entry.getKey();
        Set<String> locationLabels = labels.get(location);
        List<String> sortedLocationLabels = new ArrayList<String>();
        sortedLocationLabels.addAll(locationLabels);
        Collections.sort(sortedLocationLabels);
        String label = Joiner.on("/").join(sortedLocationLabels);
        int missenseCount = 0;
        Set<String> caseIds = Sets.newHashSet();

        for (ExtendedMutation mutation : entry.getValue()) {
            caseIds.add(mutation.getSampleId() + ":" + mutation.getProteinChange());

            if (mutation.getMutationType() != null
                    && mutation.getMutationType().toLowerCase().contains("missense")) {
                missenseCount++;
            }
        }

        pileups.add(new Pileup(label, location, caseIds.size(), missenseCount));
    }

    return ImmutableList.copyOf(pileups);
}

From source file:es.usc.citius.composit.core.composition.search.CompositSearch.java

private static <E, T extends Comparable<T>> SetMultimap<Operation<E>, E> calculateProviders(Set<E> inputs,
        int currentLayer, ServiceMatchNetwork<E, T> network) {
    //Provider -> inputs
    SetMultimap<Operation<E>, E> map = HashMultimap.create();
    for (E input : inputs) {
        Set<Operation<E>> providers = network.getSourceOperationsThatMatch(input).keySet();

        providers.retainAll(Sets.newHashSet(network.getOperations()));
        if (providers.isEmpty()) {
            throw new RuntimeException("No candidates for input " + input + ". Unsolvable request");
        }/* w  w  w. j av a 2 s . com*/
        for (Operation<E> provider : providers) {
            // If provider is not in the immediately previous layer, it cannot be selected.
            // Replace it with a noInputOp
            if (network.levelOf(provider) < currentLayer - 1) {
                DummyOperation noInputOp = new DummyOperation("no-" + input.toString(),
                        new SignatureIO<E>(Collections.singleton(input), Collections.singleton(input)));
                map.get(noInputOp).add(input);
            } else {
                map.get(provider).add(input);
            }
        }
    }
    return map;
}

From source file:org.gwt4e.mvp4g.processor.steps.DebugProcessingStep.java

public Set<Element> process(SetMultimap<Class<? extends Annotation>, Element> elementsByAnnotation) {
    for (Element element : elementsByAnnotation.get(Debug.class)) {
        if (Utils.hasAnnotation(element, EventBus.class)) {
            return ImmutableSet.of();
        }//w  ww.j  av a 2s . c  om
        messager.printMessage(Diagnostic.Kind.ERROR,
                String.format("%s is not applied to class which is annotated with %s",
                        ((TypeElement) element).getQualifiedName(), EventBus.class.getName()));
    }
    return ImmutableSet.of();
}

From source file:com.farseer.compiler.FsComponentProcessingStep.java

@Override
public Set<Element> process(SetMultimap<Class<? extends Annotation>, Element> elementsByAnnotation) {
    for (Element element : elementsByAnnotation.get(FsComponent.class)) {
        ValidationReport<Element> validationReport = fsComponentValidator.validate(element);
        validationReport.printMessagesTo(messager);

        if (validationReport.isClean()) {
            fsComponentGenerator.write(element);

        }/*from  ww  w .  ja  v a 2s .  c  o m*/
    }
    return ImmutableSet.of();
}

From source file:org.gwt4e.mvp4g.processor.steps.ModuleProcessingStep.java

public Set<Element> process(SetMultimap<Class<? extends Annotation>, Element> elementsByAnnotation) {
    for (Element element : elementsByAnnotation.get(Module.class)) {
        ModuleContext moduleContext = ModuleContext.create(messager, types, elements, element);
        if (moduleContext == null) {
            return ImmutableSet.of(); // error message already emitted
        }//from www  .j  a  v  a  2  s .  c  om
        processorContext.put(element.getSimpleName().toString(), moduleContext);
        try {
            ModuleWriter writer = ModuleWriter.builder().messenger(super.messager)
                    .context(super.processorContext).elements(super.elements).filer(super.filer)
                    .types(super.types).moduleContext(moduleContext).build();
            writer.write();
        } catch (IOException ioe) {
            createErrorMessage("Error generating source file for type "
                    + moduleContext.getInterfaceType().getQualifiedName(), ioe);
        }
    }
    return ImmutableSet.of();
}