Example usage for org.apache.commons.collections.map MultiKeyMap get

List of usage examples for org.apache.commons.collections.map MultiKeyMap get

Introduction

In this page you can find the example usage for org.apache.commons.collections.map MultiKeyMap get.

Prototype

public Object get(Object key1, Object key2) 

Source Link

Document

Gets the value mapped to the specified multi-key.

Usage

From source file:com.navercorp.pinpoint.collector.dao.hbase.stat.HbaseDataSourceListDao.java

private List<DataSourceListBo> reorderDataSourceListBos(List<DataSourceListBo> dataSourceListBos) {
    // reorder dataSourceBo using id and timeSlot
    MultiKeyMap dataSourceListBoMap = new MultiKeyMap();

    for (DataSourceListBo dataSourceListBo : dataSourceListBos) {
        for (DataSourceBo dataSourceBo : dataSourceListBo.getList()) {
            int id = dataSourceBo.getId();
            long timestamp = dataSourceBo.getTimestamp();
            long timeSlot = AgentStatUtils.getBaseTimestamp(timestamp);

            DataSourceListBo mappedDataSourceListBo = (DataSourceListBo) dataSourceListBoMap.get(id, timeSlot);
            if (mappedDataSourceListBo == null) {
                mappedDataSourceListBo = new DataSourceListBo();
                mappedDataSourceListBo.setAgentId(dataSourceBo.getAgentId());
                mappedDataSourceListBo.setStartTimestamp(dataSourceBo.getStartTimestamp());
                mappedDataSourceListBo.setTimestamp(dataSourceBo.getTimestamp());

                dataSourceListBoMap.put(id, timeSlot, mappedDataSourceListBo);
            }//ww w  .  j av  a 2  s  .  c o m

            // set fastest timestamp
            if (mappedDataSourceListBo.getTimestamp() > dataSourceBo.getTimestamp()) {
                mappedDataSourceListBo.setTimestamp(dataSourceBo.getTimestamp());
            }

            mappedDataSourceListBo.add(dataSourceBo);
        }
    }

    Collection values = dataSourceListBoMap.values();
    return new ArrayList<DataSourceListBo>(values);
}

From source file:com.gs.obevo.db.apps.reveng.DbFileMerger.java

public void generateDiffs(DbPlatform dialect, MutableCollection<DbMergeInfo> dbNameLocationPairs,
        File outputDir) {//from  w  w  w . j a v  a2 s.co  m
    System.out.println("Generating diffs for " + dbNameLocationPairs);
    MultiKeyMap objectMap = new MultiKeyMap();
    for (DbMergeInfo dbNameLocationPair : dbNameLocationPairs) {
        FileObject mainDir = FileRetrievalMode.FILE_SYSTEM
                .resolveSingleFileObject(dbNameLocationPair.getInputDir().getAbsolutePath());
        for (FileObject schemaDir : mainDir.getChildren()) {
            if (schemaDir.getType() != FileType.FOLDER) {
                continue;
            }
            for (ChangeType changeType : dialect.getChangeTypes()) {
                FileObject changeTypeDir = schemaDir.getChild(changeType.getDirectoryName());
                if (changeTypeDir != null && changeTypeDir.isReadable()
                        && changeTypeDir.getType() == FileType.FOLDER) {
                    FileObject[] childFiles = changeTypeDir.getChildren();
                    for (FileObject objectFile : childFiles) {
                        if (objectFile.getType() == FileType.FILE) {
                            FileComparison fileComparison = (FileComparison) objectMap.get(changeType,
                                    objectFile.getName().getBaseName());
                            if (fileComparison == null) {
                                fileComparison = new FileComparison(schemaDir.getName().getBaseName(),
                                        changeType, objectFile.getName().getBaseName());
                                objectMap.put(changeType, objectFile.getName().getBaseName(), fileComparison);
                            }

                            fileComparison.addFilePair(Tuples.pair(dbNameLocationPair.getName(), objectFile));
                            String fileContent = objectFile.getStringContent();
                            String normalizedContent = DAStringUtil
                                    .normalizeWhiteSpaceFromStringOld(fileContent);
                            // modify the content here if needed
                            fileComparison.addContentValues(fileContent);
                            fileComparison.addDistinctValue(normalizedContent);
                            fileComparison.incrementCount();
                        }
                    }
                }
            }
        }
    }

    for (FileComparison fileComparison : (Collection<FileComparison>) objectMap.values()) {
        File fileComparisonFileRoot = new File(new File(outputDir, fileComparison.getSchemaName()),
                fileComparison.getChangeType().getDirectoryName());
        if (fileComparison.getDistinctValues().size() == 1) {
            File outputFile;
            if (fileComparison.getCount() == dbNameLocationPairs.size()) {
                outputFile = new File(fileComparisonFileRoot, fileComparison.getName());
            } else {
                MutableList<String> dbNames = fileComparison.getFilePairs()
                        .collect(Functions.<String>firstOfPair());
                String dbNameString = "only-" + dbNames.sortThis().makeString("-");
                File dbDir = new File(fileComparisonFileRoot, dbNameString);
                outputFile = new File(dbDir, fileComparison.getName());

                File packageInfoFile = new File(dbDir, "package-info.txt");
                FileUtilsCobra.writeStringToFile(packageInfoFile, "//// METADATA includeEnvs=\""
                        + dbNames.sortThis().collect(StringFunctions.append("*")).makeString(",") + "\"");
            }
            FileUtilsCobra.writeStringToFile(outputFile, fileComparison.getContentValues().getFirst());
        } else {
            for (Pair<String, FileObject> dbNameFileObjectPair : fileComparison.getFilePairs()) {
                String dbName = dbNameFileObjectPair.getOne();
                File outputFile = new File(new File(fileComparisonFileRoot, dbName), fileComparison.getName());
                File packageInfoFile = new File(new File(fileComparisonFileRoot, dbName), "package-info.txt");

                String fileContent = dbNameFileObjectPair.getTwo().getStringContent();
                FileUtilsCobra.writeStringToFile(outputFile, fileContent);
                FileUtilsCobra.writeStringToFile(packageInfoFile,
                        "//// METADATA includeEnvs=\"" + StringFunctions.append("*").valueOf(dbName) + "\"");
            }
        }
    }
}

From source file:edu.cmu.tetrad.search.SearchGraphUtils.java

/**
 * Returns the set of nodes reachable from the given set of initial nodes in the given graph according to the
 * criteria in the given legal pairs object.
 * <p/>//from  www .  j av a2 s  .c  om
 * A variable V is reachable from initialNodes iff for some variable X in initialNodes thers is a path U [X, Y1,
 * ..., V] such that legalPairs.isLegalFirstNode(X, Y1) and for each [H1, H2, H3] as subpaths of U,
 * legalPairs.isLegalPairs(H1, H2, H3).
 * <p/>
 * The algorithm used is a variant of Algorithm 1 from Geiger, Verma, & Pearl (1990).
 *
 * @param initialNodes  The nodes that reachability paths start from.
 * @param legalPairs    Specifies initial edges (given initial nodes) and legal edge pairs.
 * @param c             a set of vertices (intuitively, the set of variables to be conditioned on.
 * @param d             a set of vertices (intuitively to be used in tests of legality, for example, the set of
 *                      ancestors of c).
 * @param graph         the graph with respect to which reachability is
 * @param maxPathLength
 */
public static Set<Node> getReachableNodes(List<Node> initialNodes, LegalPairs legalPairs, List<Node> c,
        List<Node> d, Graph graph, int maxPathLength) {
    HashSet<Node> reachable = new HashSet<Node>();
    MultiKeyMap visited = new MultiKeyMap();
    List<ReachabilityEdge> nextEdges = new LinkedList<ReachabilityEdge>();

    for (Node x : initialNodes) {
        List<Node> adjX = graph.getAdjacentNodes(x);

        for (Node y : adjX) {
            if (legalPairs.isLegalFirstEdge(x, y)) {
                reachable.add(y);
                nextEdges.add(new ReachabilityEdge(x, y));
                visited.put(x, y, Boolean.TRUE);
            }
        }
    }

    int pathLength = 1;

    while (nextEdges.size() > 0) {
        //            System.out.println("Path length = " + pathLength);
        if (++pathLength > maxPathLength)
            return reachable;

        List<ReachabilityEdge> currEdges = nextEdges;
        nextEdges = new LinkedList<ReachabilityEdge>();

        for (ReachabilityEdge edge : currEdges) {
            Node x = edge.getFrom();
            Node y = edge.getTo();
            List<Node> adjY = graph.getAdjacentNodes(y);

            for (Node z : adjY) {
                if ((visited.get(y, z)) == Boolean.TRUE) {
                    continue;
                }

                if (legalPairs.isLegalPair(x, y, z, c, d)) {
                    reachable.add(z);

                    nextEdges.add(new ReachabilityEdge(y, z));
                    visited.put(y, z, Boolean.TRUE);
                }
            }
        }
    }

    return reachable;
}

From source file:org.cbio.portal.pipelines.foundation.CnaDataReader.java

/**
 * Generate map of gene to CNA data /*w  ww . jav  a2 s  .c o  m*/
 * @param fmiCaseMap
 * @return 
 */
private List<String> generateCnaRowData(Map<String, CaseType> fmiCaseTypeMap) {
    Set<String> geneList = new HashSet<>();
    MultiKeyMap cnaMap = new MultiKeyMap();

    int noCnaCount = 0; // keep track of how many cases don't have copy number data
    for (CaseType ct : fmiCaseTypeMap.values()) {
        List<CopyNumberAlterationType> cnaTypeList = ct.getVariantReport().getCopyNumberAlterations()
                .getCopyNumberAlteration();
        if (cnaTypeList != null) {
            for (CopyNumberAlterationType cnaType : cnaTypeList) {
                cnaMap.put(cnaType.getGene(), ct.getCase(), FoundationUtils.resolveCnaType(cnaType));
                geneList.add(cnaType.getGene());
            }
        } else {
            noCnaCount++;
        }
    }
    if (noCnaCount > 0) {
        LOG.info("Number of cases without CNA data: " + noCnaCount);
    }

    // format row data for CNA file                
    List<String> cnaRowData = new ArrayList();
    for (String gene : geneList) {
        List<String> geneCnaData = new ArrayList();
        geneCnaData.add(gene);
        for (String caseId : fmiCaseTypeMap.keySet()) {
            if (cnaMap.containsKey(gene, caseId)) {
                geneCnaData.add((String) cnaMap.get(gene, caseId));
            } else {
                geneCnaData.add("0");
            }

        }
        cnaRowData.add(StringUtils.join(geneCnaData, "\t"));
    }

    return cnaRowData;
}

From source file:org.mskcc.cbio.portal.scripts.ImportClinicalData.java

private boolean addDatum(String[] fields, List<ClinicalAttribute> columnAttrs, MultiKeyMap attributeMap)
        throws Exception {
    int sampleIdIndex = findSampleIdColumn(columnAttrs);
    String stableSampleId = (sampleIdIndex >= 0) ? fields[sampleIdIndex] : "";
    stableSampleId = StableIdUtil.getSampleId(stableSampleId);
    int patientIdIndex = findPatientIdColumn(columnAttrs);
    String stablePatientId = (patientIdIndex >= 0) ? fields[patientIdIndex] : "";
    stablePatientId = StableIdUtil.getPatientId(stablePatientId);
    int internalSampleId = -1;
    int internalPatientId = -1;

    //check if sample is not already added:
    Sample sample = DaoSample.getSampleByCancerStudyAndSampleId(cancerStudy.getInternalId(), stableSampleId,
            false);/*from  ww w .j  av a2 s .c  o  m*/
    if (sample != null) {
        //this should be a WARNING in case of TCGA studies (see https://github.com/cBioPortal/cbioportal/issues/839#issuecomment-203452415)
        //and an ERROR in other studies. I.e. a sample should occur only once in clinical file!
        if (stableSampleId.startsWith("TCGA-")) {
            ProgressMonitor.logWarning("Sample " + stableSampleId
                    + " found to be duplicated in your file. Only data of the first sample will be processed.");
            return false;
        }
        //give error or warning if sample is already in DB and this is NOT expected (i.e. not supplemental data):
        if (!this.isSupplementalData()) {
            throw new RuntimeException(
                    "Error: Sample " + stableSampleId + " found to be duplicated in your file.");
        } else {
            internalSampleId = sample.getInternalId();
        }
    } else {
        Patient patient = DaoPatient.getPatientByCancerStudyAndPatientId(cancerStudy.getInternalId(),
                stablePatientId);
        if (patient != null) {
            //patient exists, get internal id:
            internalPatientId = patient.getInternalId();
        } else {
            //add patient:
            internalPatientId = (patientIdIndex >= 0) ? addPatientToDatabase(fields[patientIdIndex]) : -1;
        }
        // sample is new, so attempt to add to DB
        internalSampleId = (stableSampleId.length() > 0)
                ? addSampleToDatabase(stableSampleId, fields, columnAttrs)
                : -1;

    }

    //validate and count:
    if (internalSampleId != -1) {
        //some minimal validation/fail safe for now: only continue if patientId is same as patient id in 
        //existing sample (can occur in case of this.isSupplementalData or in case of parsing bug in addSampleToDatabase):
        internalPatientId = DaoPatient
                .getPatientByCancerStudyAndPatientId(cancerStudy.getInternalId(), stablePatientId)
                .getInternalId();
        if (internalPatientId != DaoSample.getSampleById(internalSampleId).getInternalPatientId()) {
            throw new RuntimeException("Error: Sample " + stableSampleId
                    + " was previously linked to another patient, and not to " + stablePatientId);
        }
        numSamplesProcessed++;
    }

    // this will happen when clinical file contains sample id, but not patient id
    //TODO - this part, and the dummy patient added in addSampleToDatabase, can be removed as the field PATIENT_ID is now
    //always required (as validated at start of importData() ). Probably kept here for "old" studies, but Ben's tests did not find anything...
    // --> alternative would be to be less strict in validation at importData() and allow for missing PATIENT_ID when type is MIXED... 
    if (internalPatientId == -1 && internalSampleId != -1) {
        sample = DaoSample.getSampleById(internalSampleId);
        internalPatientId = sample.getInternalPatientId();
    }

    for (int lc = 0; lc < fields.length; lc++) {
        //if lc is sampleIdIndex or patientIdIndex, skip as well since these are the relational fields:
        if (lc == sampleIdIndex || lc == patientIdIndex) {
            continue;
        }
        //if the value matches one of the missing values, skip this attribute:
        if (MissingAttributeValues.has(fields[lc])) {
            numEmptyClinicalAttributesSkipped++;
            continue;
        }
        boolean isPatientAttribute = columnAttrs.get(lc).isPatientAttribute();
        if (isPatientAttribute && internalPatientId != -1) {
            // The attributeMap keeps track what  patient/attribute to value pairs are being added to the DB. If there are duplicates,
            // (which can happen in a MIXED_ATTRIBUTES type clinical file), we need to make sure that the value for the same
            // attributes are consistent. This prevents duplicate entries in the temp file that the MySqlBulkLoader uses.
            if (!attributeMap.containsKey(internalPatientId, columnAttrs.get(lc).getAttrId())) {
                addDatum(internalPatientId, columnAttrs.get(lc).getAttrId(), fields[lc],
                        ClinicalAttribute.PATIENT_ATTRIBUTE);
                attributeMap.put(internalPatientId, columnAttrs.get(lc).getAttrId(), fields[lc]);
            } else if (!relaxed) {
                throw new RuntimeException("Error: Duplicated patient in file");
            } else if (!attributeMap.get(internalPatientId, columnAttrs.get(lc).getAttrId())
                    .equals(fields[lc])) {
                ProgressMonitor.logWarning("Error: Duplicated patient " + stablePatientId
                        + " with different values for patient attribute " + columnAttrs.get(lc).getAttrId()
                        + "\n\tValues: " + attributeMap.get(internalPatientId, columnAttrs.get(lc).getAttrId())
                        + " " + fields[lc]);
            }
        } else if (internalSampleId != -1) {
            if (!attributeMap.containsKey(internalSampleId, columnAttrs.get(lc).getAttrId())) {
                addDatum(internalSampleId, columnAttrs.get(lc).getAttrId(), fields[lc],
                        ClinicalAttribute.SAMPLE_ATTRIBUTE);
                attributeMap.put(internalSampleId, columnAttrs.get(lc).getAttrId(), fields[lc]);
            } else if (!relaxed) {
                throw new RuntimeException("Error: Duplicated sample in file");
            } else if (!attributeMap.get(internalSampleId, columnAttrs.get(lc).getAttrId())
                    .equals(fields[lc])) {
                ProgressMonitor.logWarning("Error: Duplicated sample " + stableSampleId
                        + " with different values for sample attribute " + columnAttrs.get(lc).getAttrId()
                        + "\n\tValues: " + attributeMap.get(internalSampleId, columnAttrs.get(lc).getAttrId())
                        + " " + fields[lc]);
            }
        }
    }
    return true;
}

From source file:org.talend.repository.ui.wizards.exportjob.scriptsmanager.JobJavaScriptsManager.java

protected void addModuleNeededsInMap(MultiKeyMap modulesMap, String processId, String processVersion,
        ModuleNeeded module) {/*  www. j av  a 2s  .  c  o m*/
    if (modulesMap != null && module != null) {
        Set<ModuleNeeded> modulesSet = (Set<ModuleNeeded>) modulesMap.get(processId, processVersion);
        if (modulesSet == null) {
            modulesSet = new LinkedHashSet<ModuleNeeded>(50);
            modulesMap.put(processId, processVersion, modulesSet);
        }
        modulesSet.add(module);
    }
}

From source file:piecework.engine.concrete.ProcessEngineConcreteFacade.java

@Override
public TaskResults findTasks(TaskCriteria... criterias) throws ProcessEngineException {
    String keyword = null;//from   w  ww .j av  a  2s  .c  o m
    TaskResults.Builder builder = null;
    Set<String> allowedProcessDefinitionKeys = new HashSet<String>();
    Set<String> engineProcessInstanceIds = new HashSet<String>();

    if (criterias != null && criterias.length > 0) {
        for (TaskCriteria criteria : criterias) {
            if (StringUtils.isNotEmpty(criteria.getKeyword()))
                keyword = criteria.getKeyword();

            if (criteria.getProcesses() != null && !criteria.getProcesses().isEmpty()) {
                Set<String> engineSet = new HashSet<String>();
                for (Process process : criteria.getProcesses()) {
                    ProcessDeployment deployment = processDeployment(process);
                    allowedProcessDefinitionKeys.add(process.getProcessDefinitionKey());
                    if (deployment.getEngine() == null || engineSet.contains(deployment.getEngine()))
                        continue;
                    engineSet.add(deployment.getEngine());
                    ProcessEngineProxy proxy = registry.retrieve(ProcessEngineProxy.class,
                            deployment.getEngine());

                    TaskResults localResults = proxy.findTasks(criteria);
                    if (localResults == null)
                        continue;

                    engineProcessInstanceIds.addAll(localResults.getEngineProcessInstanceIds());

                    if (builder == null)
                        builder = new TaskResults.Builder(localResults);
                    else {
                        builder.tasks(localResults.getTasks());
                        builder.addToTotal(localResults.getTotal());
                    }
                }
            }
        }
    } else {
        builder = new TaskResults.Builder();
    }

    long time = 0;
    if (LOG.isDebugEnabled())
        time = System.currentTimeMillis();

    TaskResults.Builder resultsBuilder = new TaskResults.Builder();

    List<Task> taskInstances = builder != null ? builder.build().getTasks() : null;
    List<Task> tasks;
    int count = 0;

    if (taskInstances != null && !taskInstances.isEmpty()) {
        tasks = new ArrayList<Task>(taskInstances.size());

        List<ProcessInstance> processInstances;

        if (StringUtils.isNotEmpty(keyword))
            processInstances = processInstanceRepository
                    .findByProcessDefinitionKeyInAndEngineProcessInstanceIdInAndKeyword(
                            allowedProcessDefinitionKeys, engineProcessInstanceIds, keyword);
        else
            processInstances = processInstanceRepository
                    .findByProcessDefinitionKeyInAndEngineProcessInstanceIdIn(allowedProcessDefinitionKeys,
                            engineProcessInstanceIds);

        if (processInstances != null && !processInstances.isEmpty()) {

            MultiKeyMap processInstanceMap = new MultiKeyMap();
            for (ProcessInstance processInstance : processInstances) {
                if (processInstance == null)
                    continue;
                if (org.apache.cxf.common.util.StringUtils.isEmpty(processInstance.getProcessDefinitionKey()))
                    continue;
                if (org.apache.cxf.common.util.StringUtils
                        .isEmpty(processInstance.getEngineProcessInstanceId()))
                    continue;

                processInstanceMap.put(processInstance.getProcessDefinitionKey(),
                        processInstance.getEngineProcessInstanceId(), processInstance);
            }
            for (Task taskInstance : taskInstances) {
                ProcessInstance instance = ProcessInstance.class.cast(processInstanceMap.get(
                        taskInstance.getProcessDefinitionKey(), taskInstance.getEngineProcessInstanceId()));
                if (instance == null)
                    continue;

                tasks.add(new Task.Builder(taskInstance, new PassthroughSanitizer())
                        .processInstanceId(instance.getProcessInstanceId())
                        .processInstanceAlias(instance.getAlias())
                        .processInstanceLabel(instance.getProcessInstanceLabel()).build());
                count++;
            }
        }
    } else {
        tasks = Collections.emptyList();
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Searching for process instances took " + (System.currentTimeMillis() - time) + " ms");
    }

    resultsBuilder.firstResult(1);
    resultsBuilder.maxResults(count);
    resultsBuilder.total(count);
    resultsBuilder.tasks(tasks);

    return resultsBuilder.build();
}