Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:de.hzi.helmholtz.Compare.PathwayComparisonWithModules.java

public Multimap<Double, String> SubsetsMatching(final PathwayWithModules firstPathway,
        final PathwayWithModules secondPathway, BiMap<Integer, Integer> newSourceGeneIdToPositionMap,
        BiMap<Integer, Integer> newTargetGeneIdToPositionMap, int Yes) {
    Multimap<Double, String> resultPerfect = TreeMultimap.create(Ordering.natural().reverse(),
            Ordering.natural());//www .j  a v a 2 s  .  c om
    PathwayWithModules firstPathwayCopy = new PathwayWithModules(firstPathway);// Copy of the Query pathway
    PathwayWithModules secondPathwayCopy = new PathwayWithModules(secondPathway);// Copy of the Target pathway'
    // PathwayWithModules secondPathwayCopy1 = new PathwayWithModules(secondPathway);
    int currentQueryGene = 0;
    Iterator<ModuleGene> sourceGeneIt = firstPathway.moduleGeneIterator();
    List<Integer> QueryToRemove = new ArrayList<Integer>();
    List<Integer> TargetToRemove = new ArrayList<Integer>();
    while (sourceGeneIt.hasNext()) {
        currentQueryGene++;
        ModuleGene queryGene = sourceGeneIt.next();

        int currentTargetGene = 0;
        Multiset<String> qfunction = LinkedHashMultiset.create();
        List<String> qfunctionList = new ArrayList<String>();
        List<String> qactivity = new ArrayList<String>();
        List<Set<String>> qsubstrate = new ArrayList<Set<String>>();
        for (Module m : queryGene.getModule()) {
            for (Domain d : m.getDomains()) {
                qfunction.add(d.getDomainFunctionString());
                qfunctionList.add(d.getDomainFunctionString());
                qactivity.add(d.getStatus().toString());
                qsubstrate.add(d.getSubstrates());
            }
        }
        Iterator<ModuleGene> targetGeneIt = secondPathway.moduleGeneIterator();

        while (targetGeneIt.hasNext()) {
            currentTargetGene++;
            ModuleGene targetGene = targetGeneIt.next();
            Multiset<String> tfunction = LinkedHashMultiset.create();
            List<String> tfunctionList = new ArrayList<String>();
            List<String> tactivity = new ArrayList<String>();
            List<Set<String>> tsubstrate = new ArrayList<Set<String>>();
            for (Module m : targetGene.getModule()) {
                for (Domain d : m.getDomains()) {
                    tfunctionList.add(d.getDomainFunctionString());
                    tfunction.add(d.getDomainFunctionString());
                    tactivity.add(d.getStatus().toString());
                    tsubstrate.add(d.getSubstrates());
                }
            }
            Multiset<String> DomainsCovered = Multisets.intersection(qfunction, tfunction);
            if (DomainsCovered.size() == qfunction.size() && DomainsCovered.size() == tfunction.size()) {
                Multimap<Double, Multimap<String, Integer>> activityscores = myFunction.calculate(qactivity,
                        tactivity);
                Multimap<String, Integer> Functionscores = ArrayListMultimap.create();

                int TranspositionDomains = LevenshteinDistance.computeLevenshteinDistance(qfunctionList,
                        tfunctionList);
                if (TranspositionDomains > 0) {
                    TranspositionDomains = 1;
                }

                Functionscores.put(qfunction.size() + "-0", TranspositionDomains);
                Multimap<Double, Multimap<String, Integer>> substratescore = myFunction
                        .calculate(getSubstrateList(qsubstrate), getSubstrateList(tsubstrate));
                Object activityScore = activityscores.asMap().keySet().toArray()[0];
                Object substrateScore = substratescore.asMap().keySet().toArray()[0];
                double finalScore = Math
                        .round((((2.9 * 1.0) + (0.05 * Double.parseDouble(activityScore.toString().trim()))
                                + (0.05 * Double.parseDouble(substrateScore.toString().trim()))) / 3) * 100.0)
                        / 100.0;
                String ConvertedGeneIDs = "";
                if (Yes == 0) {
                    ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentQueryGene),
                            newSourceGeneIdToPositionMap) + "->"
                            + reconstructWithGeneId(Integer.toString(currentTargetGene),
                                    newTargetGeneIdToPositionMap);
                } else {
                    ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentTargetGene),
                            newTargetGeneIdToPositionMap) + "->"
                            + reconstructWithGeneId(Integer.toString(currentQueryGene),
                                    newSourceGeneIdToPositionMap);
                }
                resultPerfect.put(finalScore, ConvertedGeneIDs);
                ScoreFunctionMatchMisMatch.put(ConvertedGeneIDs, Functionscores);
                ScoreStatusMatchMisMatch.putAll(ConvertedGeneIDs, activityscores.values());
                ScoreSubstrateMatchMisMatch.putAll(ConvertedGeneIDs, substratescore.values());

                TargetToRemove.add(currentTargetGene);
                QueryToRemove.add(currentQueryGene);
            }
        }

    }
    for (int i : TargetToRemove) {
        secondPathwayCopy.removeGene(i);
    }
    for (int i : QueryToRemove) {
        firstPathwayCopy.removeGene(i);
    }
    if (firstPathwayCopy.size() > 0 && secondPathwayCopy.size() > 0) {
        // Re-construct the bimaps
        newSourceGeneIdToPositionMap = HashBiMap.create();
        int temp = 0;
        for (ModuleGene e : firstPathwayCopy.getModulegenes()) {
            temp = temp + 1;
            newSourceGeneIdToPositionMap.put(e.getGeneId(), temp);
        }
        newTargetGeneIdToPositionMap = HashBiMap.create();
        temp = 0;
        for (ModuleGene e : secondPathwayCopy.getModulegenes()) {
            temp = temp + 1;
            newTargetGeneIdToPositionMap.put(e.getGeneId(), temp);
        }
        resultPerfect.putAll(SubsetIdentification(firstPathwayCopy, secondPathwayCopy,
                newSourceGeneIdToPositionMap, newTargetGeneIdToPositionMap, Yes));
    }
    System.out.println(resultPerfect);
    return resultPerfect;
}

From source file:de.unidue.inf.is.ezdl.dlservices.log.store.DBUserLogStore.java

@Override
public void storeUserLog(UserLogNotify logNotify) {
    Connection connection = null;
    try {//w w  w.  j  av a2  s.com
        connection = connectionProvider.connection();

        int sequenceNumber = logNotify.getSequenceNumber();
        String sessionId = logNotify.getSessionId();
        long timestamp = logNotify.getBackendTimestamp();
        long localTimestamp = logNotify.getClientTimestamp();
        String eventName = logNotify.getEventName();
        Multimap<String, String> parameters = logNotify.getParameters();

        PreparedStatement preparedStatement = connection.prepareStatement("insert into " + TABLE_NAME_EVENT
                + " (sessionid, sequencenumber, eventtimestamp, eventtimestampms, eventlocaltimestamp, eventlocaltimestampms, name) values (?, ?, ?, ?, ?, ?, ?)",
                Statement.RETURN_GENERATED_KEYS);
        preparedStatement.setString(1, sessionId);
        preparedStatement.setInt(2, sequenceNumber);
        preparedStatement.setTimestamp(3, new Timestamp(timestamp));
        preparedStatement.setLong(4, timestamp);
        preparedStatement.setTimestamp(5, new Timestamp(localTimestamp));
        preparedStatement.setLong(6, localTimestamp);
        preparedStatement.setString(7, eventName);
        preparedStatement.execute();

        String eventid;
        ResultSet rs2 = preparedStatement.getGeneratedKeys();
        if (rs2.next()) {
            eventid = rs2.getString(1);
        } else {
            throw new SQLException();
        }

        PreparedStatement preparedStatement2 = connection.prepareStatement("insert into " + TABLE_NAME_PARAMS
                + " (eventid, paramname, paramvalue, sequence) values (?, ?, ?, ?)");
        for (Entry<String, Collection<String>> entry : parameters.asMap().entrySet()) {
            List<String> values = (List<String>) entry.getValue();
            for (int i = 0; i < values.size(); i++) {
                String value = values.get(i);
                preparedStatement2.setString(1, eventid);
                preparedStatement2.setString(2, entry.getKey());
                preparedStatement2.setString(3, value);
                preparedStatement2.setInt(4, i);

                preparedStatement2.addBatch();
            }

        }
        preparedStatement2.executeBatch();
        connection.commit();
    } catch (SQLException e) {
        logger.error(e.getMessage(), e);
        rollback(connection);
    } finally {
        ClosingUtils.close(connection);
    }
}

From source file:models.documentStore.AspectOpinionMinedCorpusModel.java

private Map<Object, Collection<AspectOpinionMinedDocumentModel>> groupDocuments(
        Iterable<AspectOpinionMinedDocumentModel> documents, DocumentGrouping grouping) {
    Multimap<Object, AspectOpinionMinedDocumentModel> indexedDocuments = null;

    switch (grouping) {
    case orientation:
        indexedDocuments = Multimaps.index(documents, new Function<AspectOpinionMinedDocumentModel, Object>() {
            @Override//ww  w . j a v a2  s  .  c  om
            @Nullable
            public Object apply(@Nullable AspectOpinionMinedDocumentModel document) {
                double polarity = ObjectUtils.defaultIfNull(document.polarity, 0.0);
                if (polarity < 0) {
                    return NEGATIVE;
                } else if (polarity == 0) {
                    return NEUTRAL;
                }
                return POSITIVE;
            }
        });

        break;
    case aspect:
        indexedDocuments = ArrayListMultimap.create();
        for (AspectOpinionMinedDocumentModel document : documents) {
            if (document.aspectPolarities == null || document.aspectPolarities.size() == 0) {
                indexedDocuments.put(null, document);
            } else {
                for (AspectLexiconModel aspect : document.aspectPolarities.keySet()) {
                    indexedDocuments.put(aspect, document);
                }
            }
        }

        break;
    }

    return indexedDocuments.asMap();
}

From source file:org.obiba.mica.dataset.search.rest.harmonization.PublishedDataschemaDatasetVariableResource.java

private Mica.DatasetVariableContingenciesDto getDatasetVariableContingenciesDto(DatasetVariable var,
        DatasetVariable crossVar) {//from   w ww  .  ja v a  2 s  .  c o m
    HarmonizationDataset dataset = getDataset(HarmonizationDataset.class, datasetId);
    Mica.DatasetVariableContingenciesDto.Builder crossDto = Mica.DatasetVariableContingenciesDto.newBuilder();

    List<Future<Search.QueryResultDto>> results = Lists.newArrayList();
    dataset.getBaseStudyTables()
            .forEach(table -> results.add(helper.getContingencyTable(dataset, var, crossVar, table)));

    Multimap<String, Mica.DatasetVariableAggregationDto> termAggregations = LinkedListMultimap.create();

    for (int i = 0; i < dataset.getBaseStudyTables().size(); i++) {
        BaseStudyTable opalTable = dataset.getBaseStudyTables().get(i);
        Future<Search.QueryResultDto> futureResult = results.get(i);

        try {
            Mica.DatasetVariableContingencyDto studyTableCrossDto = dtos
                    .asContingencyDto(opalTable, var, crossVar, futureResult.get()).build();
            termAggregations.put(null, studyTableCrossDto.getAll());
            studyTableCrossDto.getAggregationsList()
                    .forEach(termAggDto -> termAggregations.put(termAggDto.getTerm(), termAggDto));
            crossDto.addContingencies(studyTableCrossDto);
        } catch (Exception e) {
            log.warn("Unable to retrieve contingency table: " + e.getMessage(), e);
            crossDto.addContingencies(dtos.asContingencyDto(opalTable, var, crossVar, null));
        }
    }

    // Merge aggregations by term (=variable category) + all terms aggregation.
    Mica.DatasetVariableContingencyDto.Builder allContingencies = Mica.DatasetVariableContingencyDto
            .newBuilder();
    termAggregations.asMap().entrySet().forEach(entry -> {
        Mica.DatasetVariableAggregationDto merged = CombinedStatistics.mergeAggregations(entry.getValue());
        if (entry.getKey() == null) {
            allContingencies.setAll(merged);
        } else {
            allContingencies.addAggregations(merged);
        }
    });

    crossDto.setAll(allContingencies);

    return crossDto.build();
}

From source file:org.glowroot.ui.JvmJsonService.java

@GET(path = "/backend/jvm/thread-dump", permission = "agent:jvm:threadDump")
String getThreadDump(@BindAgentId String agentId) throws Exception {
    checkNotNull(liveJvmService);/*  w w  w .  j  a  va 2 s  .c  o m*/
    ThreadDump threadDump;
    try {
        threadDump = liveJvmService.getThreadDump(agentId);
    } catch (AgentNotConnectedException e) {
        logger.debug(e.getMessage(), e);
        return "{\"agentNotConnected\":true}";
    }
    List<ThreadDump.Thread> allThreads = Lists.newArrayList();
    StringBuilder sb = new StringBuilder();
    JsonGenerator jg = mapper.getFactory().createGenerator(CharStreams.asWriter(sb));
    try {
        jg.writeStartObject();
        jg.writeArrayFieldStart("transactions");
        List<Transaction> transactions = new TransactionOrderingByTotalTimeDesc()
                .sortedCopy(threadDump.getTransactionList());
        for (ThreadDump.Transaction transaction : transactions) {
            writeTransactionThread(transaction, jg);
            allThreads.addAll(transaction.getThreadList());
        }
        jg.writeEndArray();

        List<ThreadDump.Thread> unmatchedThreads = new ThreadOrderingByStackTraceSizeDesc()
                .sortedCopy(threadDump.getUnmatchedThreadList());
        Multimap<ThreadDump.Thread, ThreadDump.Thread> unmatchedThreadsGroupedByStackTrace = LinkedListMultimap
                .create();
        List<ThreadDump.Thread> glowrootThreads = Lists.newArrayList();
        for (ThreadDump.Thread thread : unmatchedThreads) {
            if (thread.getName().startsWith("Glowroot-")) {
                glowrootThreads.add(thread);
            } else {
                unmatchedThreadsGroupedByStackTrace.put(getGrouping(thread), thread);
            }
            allThreads.add(thread);
        }
        jg.writeArrayFieldStart("unmatchedThreadsByStackTrace");
        for (Map.Entry<ThreadDump.Thread, Collection<ThreadDump.Thread>> entry : unmatchedThreadsGroupedByStackTrace
                .asMap().entrySet()) {
            jg.writeStartArray();
            for (ThreadDump.Thread thread : entry.getValue()) {
                writeThread(thread, jg);
            }
            jg.writeEndArray();
        }
        jg.writeStartArray();
        for (ThreadDump.Thread thread : glowrootThreads) {
            writeThread(thread, jg);
        }
        jg.writeEndArray();
        jg.writeEndArray();

        jg.writeFieldName("threadDumpingThread");
        writeThread(threadDump.getThreadDumpingThread(), jg);
        allThreads.add(threadDump.getThreadDumpingThread());
        writeDeadlockedCycles(allThreads, jg);
        jg.writeEndObject();
    } finally {
        jg.close();
    }
    return sb.toString();
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from www . ja v a 2 s  .  com
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:co.cask.cdap.data2.metadata.dataset.MetadataDataset.java

/**
 * Returns metadata for a given set of entities
 *
 * @param targetIds entities for which metadata is required
 * @return map of entitiyId to set of metadata for that entity
 *///  w w  w .j av a 2s .c  o m
public Set<Metadata> getMetadata(Set<? extends Id.NamespacedId> targetIds) {
    if (targetIds.isEmpty()) {
        return Collections.emptySet();
    }

    List<ImmutablePair<byte[], byte[]>> fuzzyKeys = new ArrayList<>();
    for (Id.NamespacedId targetId : targetIds) {
        fuzzyKeys.add(getFuzzyKeyFor(targetId));
    }

    // Sort fuzzy keys
    Collections.sort(fuzzyKeys, FUZZY_KEY_COMPARATOR);

    // Scan using fuzzy filter. Scan returns one row per property.
    // Group the rows on entityId
    Multimap<Id.NamespacedId, MetadataEntry> metadataMap = HashMultimap.create();
    byte[] start = fuzzyKeys.get(0).getFirst();
    byte[] end = Bytes.stopKeyForPrefix(fuzzyKeys.get(fuzzyKeys.size() - 1).getFirst());
    Scanner scan = indexedTable.scan(new Scan(start, end, new FuzzyRowFilter(fuzzyKeys)));
    try {
        Row next;
        while ((next = scan.next()) != null) {
            MetadataEntry metadataEntry = convertRow(next);
            if (metadataEntry != null) {
                metadataMap.put(metadataEntry.getTargetId(), metadataEntry);
            }
        }
    } finally {
        scan.close();
    }

    // Create metadata objects for each entity from grouped rows
    Set<Metadata> metadataSet = new HashSet<>();
    for (Map.Entry<Id.NamespacedId, Collection<MetadataEntry>> entry : metadataMap.asMap().entrySet()) {
        Map<String, String> properties = new HashMap<>();
        Set<String> tags = Collections.emptySet();
        for (MetadataEntry metadataEntry : entry.getValue()) {
            if (TAGS_KEY.equals(metadataEntry.getKey())) {
                tags = splitTags(metadataEntry.getValue());
            } else {
                properties.put(metadataEntry.getKey(), metadataEntry.getValue());
            }
        }
        metadataSet.add(new Metadata(entry.getKey(), properties, tags));
    }
    return metadataSet;
}

From source file:org.eclipse.sirius.ui.business.internal.viewpoint.ViewpointSelectionDialog.java

/**
 * Get missing dependencies from the current selection
 * /*ww w  .  j  a va2  s. c  o m*/
 * @return missing dependencies
 */
private Map<String, Collection<String>> getMissingDependencies() {
    Set<Viewpoint> selected = Maps.filterValues(selection, Predicates.equalTo(Boolean.TRUE)).keySet();

    Multimap<String, String> result = HashMultimap.create();
    for (Viewpoint viewpoint : selected) {
        for (RepresentationExtensionDescription extension : new ViewpointQuery(viewpoint)
                .getAllRepresentationExtensionDescriptions()) {
            String extended = extension.getViewpointURI();
            final Pattern pattern = Pattern.compile(extended);

            // Is there at least one available selected viewpoint URI ?
            if (!Iterables.any(selected, new Predicate<Viewpoint>() {
                @Override
                public boolean apply(Viewpoint vp) {
                    Option<URI> uri = new ViewpointQuery(vp).getViewpointURI();
                    if (uri.some()) {
                        Matcher matcher = pattern.matcher(uri.get().toString());
                        return matcher.matches();
                    } else {
                        return false;
                    }
                }
            })) {
                result.put(viewpoint.getName(), extended.trim().replaceFirst("^viewpoint:/[^/]+/", "")); //$NON-NLS-1$ //$NON-NLS-2$
            }
        }
    }
    return result.asMap();
}

From source file:com.foundationdb.server.store.OnlineHelper.java

public void checkTableConstraints(final Session session, QueryContext context) {
    LOG.debug("Checking constraints");
    txnService.beginTransaction(session);
    try {/*  w w  w.j  av  a 2 s .  com*/
        Collection<ChangeSet> changeSets = schemaManager.getOnlineChangeSets(session);
        assert (commonChangeLevel(changeSets) == ChangeLevel.METADATA_CONSTRAINT) : changeSets;
        // Gather all tables that need scanned, keyed by group
        AkibanInformationSchema oldAIS = schemaManager.getAis(session);
        Schema oldSchema = SchemaCache.globalSchema(oldAIS);
        Multimap<Group, RowType> groupMap = HashMultimap.create();
        for (ChangeSet cs : changeSets) {
            RowType rowType = oldSchema.tableRowType(cs.getTableId());
            groupMap.put(rowType.table().getGroup(), rowType);
        }
        // Scan all affected groups
        StoreAdapter adapter = store.createAdapter(session);
        final TransformCache transformCache = getTransformCache(session, null);
        for (Entry<Group, Collection<RowType>> entry : groupMap.asMap().entrySet()) {
            Operator plan = API.filter_Default(API.groupScan_Default(entry.getKey()), entry.getValue());
            runPlan(session, contextIfNull(context, adapter), schemaManager, txnService, plan,
                    new RowHandler() {
                        @Override
                        public void handleRow(Row row) {
                            simpleCheckConstraints(session, transformCache, row);
                        }
                    });
        }
    } finally {
        txnService.rollbackTransactionIfOpen(session);
    }
}

From source file:grakn.core.graql.reasoner.atom.binary.RelationAtom.java

/**
 * infer {@link RelationType}s that this {@link RelationAtom} can potentially have
 * NB: {@link EntityType}s and link {@link Role}s are treated separately as they behave differently:
 * NB: Not using Memoized as memoized methods can't have parameters
 * {@link EntityType}s only play the explicitly defined {@link Role}s (not the relevant part of the hierarchy of the specified {@link Role}) and the {@link Role} inherited from parent
 * @return list of {@link RelationType}s this atom can have ordered by the number of compatible {@link Role}s
 *///from   w ww .j a  v a2 s. com
private ImmutableList<Type> inferPossibleTypes(ConceptMap sub) {
    if (possibleTypes == null) {
        if (getSchemaConcept() != null)
            return ImmutableList.of(getSchemaConcept().asType());

        Multimap<RelationType, Role> compatibleConfigurations = inferPossibleRelationConfigurations(sub);
        Set<Variable> untypedRoleplayers = Sets.difference(getRolePlayers(),
                getParentQuery().getVarTypeMap().keySet());
        Set<RelationAtom> untypedNeighbours = getNeighbours(RelationAtom.class)
                .filter(at -> !Sets.intersection(at.getVarNames(), untypedRoleplayers).isEmpty())
                .collect(toSet());

        ImmutableList.Builder<Type> builder = ImmutableList.builder();
        //prioritise relations with higher chance of yielding answers
        compatibleConfigurations.asMap().entrySet().stream()
                //prioritise relations with more allowed roles
                .sorted(Comparator.comparing(e -> -e.getValue().size()))
                //prioritise relations with number of roles equal to arity
                .sorted(Comparator.comparing(e -> e.getKey().roles().count() != getRelationPlayers().size()))
                //prioritise relations having more instances
                .sorted(Comparator.comparing(e -> -tx().getShardCount(e.getKey())))
                //prioritise relations with highest number of possible types played by untyped role players
                .map(e -> {
                    if (untypedNeighbours.isEmpty())
                        return new Pair<>(e.getKey(), 0L);

                    Iterator<RelationAtom> neighbourIterator = untypedNeighbours.iterator();
                    Set<Type> typesFromNeighbour = neighbourIterator.next().inferPossibleEntityTypePlayers(sub);
                    while (neighbourIterator.hasNext()) {
                        typesFromNeighbour = Sets.intersection(typesFromNeighbour,
                                neighbourIterator.next().inferPossibleEntityTypePlayers(sub));
                    }

                    Set<Role> rs = e.getKey().roles().collect(toSet());
                    rs.removeAll(e.getValue());
                    return new Pair<>(e.getKey(),
                            rs.stream().flatMap(Role::players).filter(typesFromNeighbour::contains).count());
                }).sorted(Comparator.comparing(p -> -p.getValue()))
                //prioritise non-implicit relations
                .sorted(Comparator.comparing(e -> e.getKey().isImplicit())).map(Pair::getKey)
                //retain super types only
                .filter(t -> Sets.intersection(ConceptUtils.nonMetaSups(t), compatibleConfigurations.keySet())
                        .isEmpty())
                .forEach(builder::add);

        //TODO need to add THING and meta relation type as well to make it complete
        this.possibleTypes = builder.build();
    }
    return possibleTypes;
}