Example usage for org.apache.commons.collections MultiMap put

List of usage examples for org.apache.commons.collections MultiMap put

Introduction

In this page you can find the example usage for org.apache.commons.collections MultiMap put.

Prototype

Object put(Object key, Object value);

Source Link

Document

Adds the value to the collection associated with the specified key.

Usage

From source file:fr.gouv.culture.thesaurus.service.rdf.Concept.java

/**
* Renvoie la collection de concepts regroups par collection SKOS.
* 
* @param concepts//from   ww w  .  j  a  va  2s  . co  m
*            Collection de concepts  regrouper
* @return Dictionnaire entre collection SKOS et les concepts
*/
private MultiMap getConceptsByCollection(Collection<Concept> concepts) {
    final MultiMap map = new MultiValueMap();
    for (final Concept concept : concepts) {
        Collection<ConceptCollection> conceptCollections = concept.getCollections();
        if (conceptCollections.isEmpty()) {
            conceptCollections = new ArrayList<ConceptCollection>();
            conceptCollections.add(null);
        }

        for (final ConceptCollection collection : conceptCollections) {
            map.put(collection, concept);
        }
    }

    return map;
}

From source file:edu.harvard.med.screensaver.service.cherrypicks.CherryPickRequestPlateMapFilesBuilder.java

private MultiMap getSourcePlateTypesForEachAssayPlate(CherryPickRequest cherryPickRequest) {
    MultiMap assayPlateName2PlateTypes = MultiValueMap.decorate(new HashMap(), new Factory() {
        public Object create() {
            return new HashSet();
        }//from  w w  w .j  a  v a  2 s .  co  m
    });

    for (LabCherryPick cherryPick : cherryPickRequest.getLabCherryPicks()) {
        if (cherryPick.isAllocated() && cherryPick.isMapped()) {
            assayPlateName2PlateTypes.put(cherryPick.getAssayPlate().getName(), cherryPick.getSourceCopy()
                    .findPlate(cherryPick.getSourceWell().getPlateNumber()).getPlateType());
        }
    }
    return assayPlateName2PlateTypes;
}

From source file:net.jadler.stubbing.Request.java

private MultiMap readParametersFromString(final String parametersString) {
    final MultiMap res = new MultiValueMap();

    if (StringUtils.isBlank(parametersString)) {
        return res;
    }// w  w w  .  ja v  a2s.  c  o m

    final String enc = this.getEncodingInternal();
    final String[] pairs = parametersString.split("&");

    for (final String pair : pairs) {
        final int idx = pair.indexOf('=');
        if (idx > -1) {

            try {
                final String name = URLDecoder.decode(StringUtils.substring(pair, 0, idx), enc);
                final String value = URLDecoder.decode(StringUtils.substring(pair, idx + 1), enc);
                res.put(name, value);
            } catch (final UnsupportedEncodingException ex) {
                //indeed
            }
        } else {
            try {
                res.put(URLDecoder.decode(pair, enc), "");
            } catch (final UnsupportedEncodingException ex) {
                //no way
            }
        }
    }

    return res;
}

From source file:fr.in2p3.cc.storage.treqs.persistence.mysql.dao.MySQLConfigurationDAO.java

@Override
public MultiMap getResourceAllocation() throws TReqSException {
    LOGGER.trace("> getResourceAllocation");

    // Allocations maps a media type to a pair (user,share)
    final MultiMap allocations = new MultiValueMap();

    final Object[] objects = MySQLBroker.getInstance().executeSelect(MySQLStatements.SQL_ALLOCATIONS_SELECT);

    // Store result
    final ResultSet result = (ResultSet) objects[1];
    try {/*from www  . j  a  va  2 s  .com*/
        while (result.next()) {
            int index = 1;
            final byte id = result.getByte(index++);
            final String userName = result.getString(index++);
            final float share = result.getFloat(index++);
            final PersistenceHelperResourceAllocation helper = new PersistenceHelperResourceAllocation(userName,
                    share);
            allocations.put(new Byte(id), helper);
            LOGGER.debug("Allocation on mediatype: '" + id + "', user: '" + userName + "', share: " + share);
        }
    } catch (final SQLException exception) {
        throw new MySQLExecuteException(exception);
    } finally {
        MySQLBroker.getInstance().terminateExecution(objects);
    }
    if (allocations.size() == 0) {
        // No entry in table, something wrong with configuration.
        LOGGER.warn("No media type allocations found. Please define them " + "in the database.");
    }

    LOGGER.trace("< getResourceAllocation");

    return allocations;
}

From source file:com.pactera.edg.am.metamanager.extractor.adapter.mapping.impl.RecordExtractMappingServiceImpl.java

/**
 * ??????????// w  ww . j  a v  a  2 s.  c o  m
 * @param row 
 * @param depList ?(???)
 */
private void prepareDependencyRelation(Map<String, Object> row, List<TRecordRelationship> depList) {
    if (depList == null || depList.isEmpty()) {
        return;
    }
    for (int i = 0; i < depList.size(); i++) {
        TRecordRelationship depc = depList.get(i);
        if (depc.useSql()) {
            continue;
        }
        MultiMap map = this.depReferences.get(depc);
        if (map == null) {
            map = new MultiValueMap();
            this.depReferences.put(depc, map);
        }
        String fromId = extractValue(row, depc.getFromColumns());
        String toId = extractValue(row, depc.getToColumns());
        map.put(fromId, toId);
    }
}

From source file:edu.uci.ics.jung.algorithms.blockmodel.GraphCollapser.java

/**
 * INTERNAL METHOD.//from w w w  .ja v a 2s.  c  o  m
 * For a set of vertices, finds all the edges connected to them, indexed (in a MultiMap)
 * to the vertices to which they connect. Thus, in the graph with edges (A-C, A-D, B-C), 
 * with input (A, B), the result will be ( C {A-C, B-C}; D {A-D} )
 * @param rootSet
 * @return
 */
protected MultiMap findEdgesAndVerticesConnectedToRootSet(Set rootSet) {
    // now, let's get a candidate set of edges
    MultiMap vertices_to_edges = new MultiHashMap();

    for (Iterator iter = rootSet.iterator(); iter.hasNext();) {
        Vertex v = (Vertex) iter.next();
        for (Iterator iterator = v.getIncidentEdges().iterator(); iterator.hasNext();) {
            Edge e = (Edge) iterator.next();
            Vertex other = e.getOpposite(v);
            if (rootSet.contains(other))
                continue;
            vertices_to_edges.put(other, e);
        }
    }
    return vertices_to_edges;
}

From source file:com.zimbra.cs.store.file.BlobDeduper.java

private Pair<Integer, Long> deDupe(List<BlobReference> blobs) throws ServiceException {
    int linksCreated = 0;
    long sizeSaved = 0;
    long srcInodeNum = 0;
    String srcPath = null;/*  w  w  w .  j a v a  2  s. c  om*/
    // check if there is any processed blob
    for (BlobReference blob : blobs) {
        if (blob.isProcessed()) {
            String path = FileBlobStore.getBlobPath(blob.getMailboxId(), blob.getItemId(), blob.getRevision(),
                    blob.getVolumeId());
            try {
                IO.FileInfo fileInfo = IO.fileInfo(path);
                if (fileInfo != null) {
                    srcInodeNum = fileInfo.getInodeNum();
                    srcPath = path;
                    break;
                }
            } catch (IOException e) {
                // ignore
            }
        }
    }
    if (srcInodeNum == 0) {
        // check the path with maximum links
        // organize the paths based on inode
        MultiMap inodeMap = new MultiValueMap();
        for (BlobReference blob : blobs) {
            String path = FileBlobStore.getBlobPath(blob.getMailboxId(), blob.getItemId(), blob.getRevision(),
                    blob.getVolumeId());
            try {
                IO.FileInfo fileInfo = IO.fileInfo(path);
                if (fileInfo != null) {
                    inodeMap.put(fileInfo.getInodeNum(), path);
                    blob.setFileInfo(fileInfo);
                }
            } catch (IOException e) {
                // ignore
            }
        }
        // find inode which has maximum paths
        int maxPaths = 0;
        @SuppressWarnings("unchecked")
        Iterator<Map.Entry<Long, Collection<String>>> iter = inodeMap.entrySet().iterator();
        while (iter.hasNext()) {
            Map.Entry<Long, Collection<String>> entry = iter.next();
            if (entry.getValue().size() > maxPaths) {
                maxPaths = entry.getValue().size();
                srcInodeNum = entry.getKey();
                srcPath = entry.getValue().iterator().next();
            }
        }
    }
    if (srcInodeNum == 0) {
        return new Pair<Integer, Long>(0, Long.valueOf(0));
    }
    // First create a hard link for the source path, so that the file
    // doesn't get deleted in the middle.
    String holdPath = srcPath + "_HOLD";
    File holdFile = new File(holdPath);
    try {
        IO.link(srcPath, holdPath);
        // Now link the other paths to source path
        for (BlobReference blob : blobs) {
            if (blob.isProcessed()) {
                continue;
            }
            String path = FileBlobStore.getBlobPath(blob.getMailboxId(), blob.getItemId(), blob.getRevision(),
                    blob.getVolumeId());
            try {
                if (blob.getFileInfo() == null) {
                    blob.setFileInfo(IO.fileInfo(path));
                }
            } catch (IOException e) {
                // ignore
            }
            if (blob.getFileInfo() == null) {
                continue;
            }
            if (srcInodeNum == blob.getFileInfo().getInodeNum()) {
                markBlobAsProcessed(blob);
                continue;
            }
            // create the links for paths in two steps.
            // first create a temp link and then rename it to actual path
            // this guarantees that the file is always available.
            String tempPath = path + "_TEMP";
            File tempFile = new File(tempPath);
            try {
                IO.link(holdPath, tempPath);
                File destFile = new File(path);
                tempFile.renameTo(destFile);
                markBlobAsProcessed(blob);
                linksCreated++;
                sizeSaved += blob.getFileInfo().getSize();
            } catch (IOException e) {
                ZimbraLog.misc.warn("Ignoring the error while deduping " + path, e);
            } finally {
                if (tempFile.exists()) {
                    tempFile.delete();
                }
            }
        }
    } catch (IOException e) {
        ZimbraLog.misc.warn("Ignoring the error while creating a link for " + srcPath, e);
    } finally { // delete the hold file
        if (holdFile.exists()) {
            holdFile.delete();
        }
    }
    return new Pair<Integer, Long>(linksCreated, sizeSaved);
}

From source file:edu.wustl.geneconnect.metadata.MetadataCalculator.java

/**
 * To store direct paths //from   w w  w  .  j  a  v  a2 s .com
 * @param fromNodeID
 * @param toNodeID
 */
private void updateDirectPath(int fromNodeID, int toNodeID) {
    // fill the list with node Ids 
    List nodeList = new ArrayList();
    nodeList.add(new Integer(toNodeID));

    // store it in the map
    MultiMap pathMap = getPathsForSrc(fromNodeID);
    pathMap.put(new Integer(toNodeID), nodeList);
}

From source file:com.manydesigns.portofino.pageactions.AbstractPageAction.java

public MultiMap initEmbeddedPageActions() {
    if (embeddedPageActions == null) {
        MultiMap mm = new MultiHashMap();
        Layout layout = pageInstance.getLayout();
        for (ChildPage childPage : layout.getChildPages()) {
            String layoutContainerInParent = childPage.getContainer();
            if (layoutContainerInParent != null) {
                String newPath = context.getActionPath() + "/" + childPage.getName();
                File pageDir = new File(pageInstance.getChildrenDirectory(), childPage.getName());
                try {
                    Page page = DispatcherLogic.getPage(pageDir);
                    EmbeddedPageAction embeddedPageAction = new EmbeddedPageAction(childPage.getName(),
                            childPage.getActualOrder(), newPath, page);

                    mm.put(layoutContainerInParent, embeddedPageAction);
                } catch (PageNotActiveException e) {
                    logger.warn("Embedded page action is not active, skipping! " + pageDir, e);
                }//from   w w w  .  j a va 2  s . c  om
            }
        }
        for (Object entryObj : mm.entrySet()) {
            Map.Entry entry = (Map.Entry) entryObj;
            List pageActionContainer = (List) entry.getValue();
            Collections.sort(pageActionContainer);
        }
        embeddedPageActions = mm;
    }
    return embeddedPageActions;
}

From source file:fr.in2p3.cc.storage.treqs.control.controller.QueuesController.java

/**
 * Do house cleaning on done queues. Iterate on the queues map and clean the
 * done queues./* w  ww  .j a v a 2 s .  c  o m*/
 * 
 * @return Quantity of done queues were cleaned.
 */
@SuppressWarnings("unchecked")
public int cleanDoneQueues() {
    LOGGER.trace("> cleanDoneQueues");

    int cleaned = 0;
    final MultiMap toRemove = new MultiValueMap();
    synchronized (this.queuesMap) {
        Iterator<String> iterName = this.queuesMap.keySet().iterator();
        // Checks the references to ended queues.
        while (iterName.hasNext()) {
            final String key = iterName.next();
            final Iterator<Queue> queues = ((Collection<Queue>) this.queuesMap.get(key)).iterator();
            while (queues.hasNext()) {
                final Queue queue = queues.next();

                if (queue.getStatus() == QueueStatus.ENDED) {
                    LOGGER.debug("Queue {} is ended. Cleanup starting.", key);
                    toRemove.put(key, queue);
                    cleaned++;
                } else {
                    LOGGER.debug("Queue {} is not ended.", key);
                }
            }
        }

        // Removes ended queues.
        iterName = toRemove.keySet().iterator();
        while (iterName.hasNext()) {
            final String key = iterName.next();

            final Iterator<Queue> queues = ((Collection<Queue>) toRemove.get(key)).iterator();
            while (queues.hasNext()) {
                final Queue queue = queues.next();

                LOGGER.debug("Deleting {} {}", key, queue.toString());
                this.queuesMap.remove(key, queue);

            }

        }
    }

    LOGGER.trace("< cleanDoneQueues");

    return cleaned;
}