Example usage for com.google.common.collect HashBasedTable create

List of usage examples for com.google.common.collect HashBasedTable create

Introduction

In this page you can find the example usage for com.google.common.collect HashBasedTable create.

Prototype

public static <R, C, V> HashBasedTable<R, C, V> create() 

Source Link

Document

Creates an empty HashBasedTable .

Usage

From source file:org.mousephenotype.dcc.exportlibrary.traverser.CommandImpl.java

public CommandImpl(HibernateManager hibernateManager) {
    this.hibernateManager = hibernateManager;
    this.validationSet = new ValidationSet();
    this.experimentValidations = HashBasedTable.create();
    this.lineValidations = HashBasedTable.create();
    this.housingValidations = HashBasedTable.create();
    this.validExperiments = ArrayListMultimap.create();
    this.validLines = ArrayListMultimap.create();
    this.validHousings = ArrayListMultimap.create();
    this.centreProcedureValidations = ArrayListMultimap.create();
    this.validCentreProcedures = new ArrayList<>();
}

From source file:edu.washington.cs.cupid.CapabilityExecutor.java

private CapabilityExecutor() {
    resultCaches = CacheBuilder.newBuilder().build();
    running = HashBasedTable.create();
    canceling = Sets.newIdentityHashSet();

    IPreferenceStore preferences = CupidActivator.getDefault().getPreferenceStore();
    logJobStatus = preferences.getBoolean(PreferenceConstants.P_JOB_STATUS_LOGGING);
    logCacheStatus = preferences.getBoolean(PreferenceConstants.P_CACHE_STATUS_LOGGING);
    preferences.addPropertyChangeListener(this);
}

From source file:net.librec.data.splitter.LOOCVDataSplitter.java

/**
 * Split ratings into two parts where the last user according to date is
 * preserved as the test set and the remaining data as the training set.
 *///w w  w.  j  a  v  a 2  s.  co  m
public void getLOOByUserDate() {
    trainMatrix = new SparseMatrix(preferenceMatrix);
    Table<Integer, Integer, Double> dataTable = HashBasedTable.create();
    Multimap<Integer, Integer> colMap = HashMultimap.create();

    for (int u = 0, um = preferenceMatrix.numRows(); u < um; u++) {
        List<Integer> items = preferenceMatrix.getColumns(u);
        int i = -1;

        List<RatingContext> rcs = new ArrayList<>();
        for (int j : items) {
            rcs.add(new RatingContext(u, j, (long) datetimeMatrix.get(u, j)));
        }
        Collections.sort(rcs);
        i = rcs.get(rcs.size() - 1).getItem();
        trainMatrix.set(u, i, 0);
        dataTable.put(u, i, preferenceMatrix.get(u, i));
        colMap.put(i, u);
    }
    SparseMatrix.reshape(trainMatrix);
    testMatrix = new SparseMatrix(preferenceMatrix.numRows(), preferenceMatrix.numColumns(), dataTable, colMap);
}

From source file:com.shazam.fork.reporter.FlakinessSorter.java

private Table<TestLabel, Build, TestInstance> getOrCreateTable(
        HashMap<String, Table<TestLabel, Build, TestInstance>> poolToFlakinessTableMap, String poolName) {
    Table<TestLabel, Build, TestInstance> table = poolToFlakinessTableMap.get(poolName);
    if (table == null) {
        table = HashBasedTable.create();
        poolToFlakinessTableMap.put(poolName, table);
    }/*from   ww w  .j  a  v  a  2s  .  com*/
    return table;
}

From source file:es.usc.citius.composit.core.composition.network.AbstractServiceMatchNetwork.java

public Map<Operation<E>, Table<E, E, T>> getTargetOperationsMatchedBy(Operation<E> source) {
    // First, compute the target elements matched by op.outputs
    Map<Operation<E>, Table<E, E, T>> matchMap = new HashMap<Operation<E>, Table<E, E, T>>();
    for (E sourceOutput : source.getSignature().getOutputs()) {
        Map<E, T> targetMatch = getTargetElementsMatchedBy(sourceOutput);
        // Find the providers
        for (Map.Entry<E, T> targetMatchEntry : targetMatch.entrySet()) {
            E targetInput = targetMatchEntry.getKey();
            Set<Operation<E>> targetOps = Sets.newHashSet(getOperationsWithInput(targetInput));
            // Annotate output -> input
            for (Operation<E> op : targetOps) {
                Table<E, E, T> matchTable = matchMap.get(op);
                if (matchTable == null) {
                    matchTable = HashBasedTable.create();
                    matchMap.put(op, matchTable);
                }/*from w w w.  j  av a2 s . c o  m*/
                // Add match entry
                matchTable.put(sourceOutput, targetInput, targetMatchEntry.getValue());
            }
        }
    }
    return matchMap;
}

From source file:edu.cmu.lti.oaqa.baseqa.passage.rerank.scorers.LuceneInMemoryPassageScorer.java

@Override
public void prepare(JCas jcas) throws AnalysisEngineProcessException {
    uri2conf2score = HashBasedTable.create();
    uri2conf2rank = HashBasedTable.create();
    // index//from  ww w  .ja va  2  s. com
    List<Passage> passages = TypeUtil.getRankedPassages(jcas);
    RAMDirectory index = new RAMDirectory();
    try (IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(analyzer))) {
        for (Passage passage : passages) {
            Document doc = new Document();
            doc.add(new StringField("uri", TypeUtil.getUriOffsets(passage, ":"), Field.Store.YES));
            doc.add(new TextField("text", passage.getText(), Field.Store.NO));
            writer.addDocument(doc);
        }
        writer.close();
        reader = DirectoryReader.open(index);
        searcher = new IndexSearcher(reader);
    } catch (IOException e) {
        throw new AnalysisEngineProcessException(e);
    }
    // queries
    List<String> tokens = TypeUtil.getOrderedTokens(jcas).stream().map(Token::getCoveredText)
            .map(QueryParser::escape).filter(name -> !name.isEmpty() && !stoplist.contains(name.toLowerCase()))
            .collect(toList());
    Multimap<String, String> ctype2names = HashMultimap.create();
    for (Concept concept : TypeUtil.getConcepts(jcas)) {
        Set<String> ctypes = TypeUtil.getConceptTypes(concept).stream().map(ConceptType::getAbbreviation)
                .collect(toSet());
        String cnames = TypeUtil.getConceptNames(concept).stream()
                .map(LuceneInMemoryPassageScorer::normalizeQuoteName).distinct().collect(joining(" "));
        ctypes.stream().filter(t -> !FORBIDDEN_CTYPES.contains(t))
                .forEach(ctype -> ctype2names.put(ctype, cnames));
    }
    Multimap<String, String> ctypepre2names = HashMultimap.create();
    ctype2names.asMap().entrySet().forEach(e -> ctypepre2names.putAll(e.getKey().split(":")[0], e.getValue()));
    Multimap<String, String> ctype2mentions = HashMultimap.create();
    for (Concept concept : TypeUtil.getConcepts(jcas)) {
        Set<String> ctypes = TypeUtil.getConceptTypes(concept).stream().map(ConceptType::getAbbreviation)
                .collect(toSet());
        String cmentions = TypeUtil.getConceptMentions(concept).stream().map(ConceptMention::getMatchedName)
                .map(LuceneInMemoryPassageScorer::normalizeQuoteName).distinct().collect(joining(" "));
        ctypes.stream().filter(t -> !FORBIDDEN_CTYPES.contains(t))
                .forEach(ctype -> ctype2mentions.put(ctype, cmentions));
    }
    Multimap<String, String> ctypepre2mentions = HashMultimap.create();
    ctypepre2mentions.asMap().entrySet()
            .forEach(e -> ctypepre2mentions.putAll(e.getKey().split(":")[0], e.getValue()));
    LOG.debug("Query strings");
    ExecutorService service = Executors.newCachedThreadPool();
    // execute against all tokens
    service.submit(() -> {
        String concatTokens = String.join(" ", tokens);
        LOG.debug(" - Concatenated tokens: {}", concatTokens);
        search(concatTokens, "tokens_concatenated@all");
    });
    // execute against concatenated concept names
    service.submit(() -> {
        String concatCnames = String.join(" ", ctype2names.values());
        LOG.debug(" - Concatenated concept names: {}", concatCnames);
        search(concatCnames, "cnames_concatenated@all");
    });
    // execute against concatenated concept mentions
    service.submit(() -> {
        String concatCmentions = String.join(" ", ctype2mentions.values());
        LOG.debug(" - Concatenated concept mentions: {}", concatCmentions);
        search(concatCmentions, "cmentions_concatenated@all");
    });
    // execute against concept names for each concept
    service.submit(() -> {
        for (String cnames : ImmutableSet.copyOf(ctype2names.values())) {
            LOG.debug(" - Concatenated concept names: {}", cnames);
            search(cnames, "cnames_individual@all");
        }
    });
    // execute against concept names for each concept type
    service.submit(() -> {
        for (String ctype : ctype2names.keySet()) {
            String concatCnames = String.join(" ", ctype2names.get(ctype));
            LOG.debug(" - Concatenated concept names for {}: {}", ctype, concatCnames);
            search(concatCnames, "cnames@" + ctype + "@all");
        }
    });
    // execute against concept names for each concept type prefix
    service.submit(() -> {
        for (String ctypepre : ctypepre2names.keySet()) {
            String concatCnames = String.join(" ", ctypepre2names.get(ctypepre));
            LOG.debug(" - Concatenated concept names for {}: {}", ctypepre, concatCnames);
            search(concatCnames, "cnames@" + ctypepre + "@all");
        }
    });
    // execute against concept mentions for each concept
    service.submit(() -> {
        for (String cmentions : ImmutableSet.copyOf(ctype2mentions.values())) {
            LOG.debug(" - Concatenated concept mentions: {}", cmentions);
            search(cmentions, "cmentions_individual@all");
        }
    });
    // execute against concept mentions for each concept type
    service.submit(() -> {
        for (String ctype : ctype2mentions.keySet()) {
            String concatCmentions = String.join(" ", ctype2mentions.get(ctype));
            LOG.debug(" - Concatenated concept mentions for {}: {}", ctype, concatCmentions);
            search(concatCmentions, "cmentions@" + ctype + "@all");
        }
    });
    // execute against concept mentions for each concept type prefix
    service.submit(() -> {
        for (String ctypepre : ctypepre2mentions.keySet()) {
            String concatCmentions = String.join(" ", ctypepre2mentions.get(ctypepre));
            LOG.debug(" - Concatenated concept mentions for {}: {}", ctypepre, concatCmentions);
            search(concatCmentions, "cmentions@" + ctypepre + "@all");
        }
    });
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new AnalysisEngineProcessException(e);
    }
    confs = uri2conf2score.columnKeySet();
}

From source file:org.eclipse.tracecompass.internal.lttng2.kernel.core.analysis.vm.module.VirtualMachineStateProvider.java

/**
 * Constructor// ww  w  .j a v a  2s.c  o m
 *
 * @param experiment
 *            The virtual machine experiment
 */
public VirtualMachineStateProvider(TmfExperiment experiment) {
    super(experiment, "Virtual Machine State Provider"); //$NON-NLS-1$

    fModel = new QemuKvmVmModel(experiment);
    Table<ITmfTrace, String, @Nullable Integer> table = HashBasedTable.create();
    fEventNames = table;
    fLayouts = new HashMap<>();
}

From source file:i5.las2peer.services.recommender.librec.data.TagDataSplitter.java

/**
 * Split ratings into k-fold./*w  w  w.j a  va 2  s .c om*/
 * 
 * @param kfold
 *            number of folds
 * @param trainRatio
 *            ratio of training data, e.g. ratio of 0.8 means a training/test split of 80/20 (time-based)
 * @param foldSize
 *            portion of the dataset used for each fold
 */
private void splitFolds(int kfold, double trainRatio, double foldSize) {
    assert kfold > 0;

    numFold = kfold;

    // Find min and max timestamps
    startTimestamp = new long[kfold];
    endTimestamp = new long[kfold];
    splitTimestamp = new long[kfold];

    long minTimestamp = Long.MAX_VALUE;
    long maxTimestamp = Long.MIN_VALUE;
    for (MatrixEntry e : timeMatrix) {
        long timestamp = (long) e.get();
        if (minTimestamp > timestamp)
            minTimestamp = timestamp;
        if (maxTimestamp < timestamp)
            maxTimestamp = timestamp;
    }

    long timeRange = maxTimestamp - minTimestamp;
    long foldLength = (long) (foldSize * timeRange);

    // earliest and latest possible fold start timestamps
    long earliestStart = minTimestamp;
    long latestStart = maxTimestamp - foldLength;

    if (kfold == 1) {
        startTimestamp[0] = latestStart;
        endTimestamp[0] = maxTimestamp;
        splitTimestamp[0] = startTimestamp[0] + (long) (trainRatio * foldLength);
    } else {
        // time between fold start timestamps
        long foldStep = (latestStart - earliestStart) / (kfold - 1);

        // start, end and split timestamps for each fold
        for (int i = 0; i < kfold; i++) {
            startTimestamp[i] = earliestStart + i * foldStep;
            endTimestamp[i] = startTimestamp[i] + foldLength;
            splitTimestamp[i] = startTimestamp[i] + (long) (trainRatio * foldLength);
        }
    }

    // initialize training tables for each fold
    trainUserTagTable = new ArrayList<Table<Integer, Integer, Set<Long>>>(numFold);
    trainItemTagTable = new ArrayList<Table<Integer, Integer, Set<Long>>>(numFold);
    for (int i = 0; i < kfold; i++) {
        trainUserTagTable.add(i, HashBasedTable.create());
        trainItemTagTable.add(i, HashBasedTable.create());
    }

    // split user tagging data
    Set<Cell<Integer, Integer, Set<Long>>> userTaggingCells = userTagTable.cellSet();
    for (Cell<Integer, Integer, Set<Long>> c : userTaggingCells) {
        int user = c.getRowKey();
        int tag = c.getColumnKey();
        Set<Long> times = c.getValue();
        for (long time : times) {
            for (int fold = 0; fold < numFold; fold++) {
                if (time >= startTimestamp[fold] && time <= splitTimestamp[fold]) {
                    if (!trainUserTagTable.get(fold).contains(user, tag)) {
                        trainUserTagTable.get(fold).put(user, tag, new HashSet<Long>());
                    }
                    trainUserTagTable.get(fold).get(user, tag).add(time);
                }
            }
        }
    }

    // split item tagging data
    Set<Cell<Integer, Integer, Set<Long>>> itemTaggingCells = itemTagTable.cellSet();
    for (Cell<Integer, Integer, Set<Long>> c : itemTaggingCells) {
        int item = c.getRowKey();
        int tag = c.getColumnKey();
        Set<Long> times = c.getValue();
        for (long time : times) {
            for (int fold = 0; fold < numFold; fold++) {
                if (time >= startTimestamp[fold] && time <= splitTimestamp[fold]) {
                    if (!trainItemTagTable.get(fold).contains(item, tag)) {
                        trainItemTagTable.get(fold).put(item, tag, new HashSet<Long>());
                    }
                    trainItemTagTable.get(fold).get(item, tag).add(time);
                }
            }
        }
    }
    debugInfo();
}

From source file:co.cask.cdap.data2.dataset2.InMemoryDatasetFramework.java

@Inject
public InMemoryDatasetFramework(DatasetDefinitionRegistryFactory registryFactory,
        @Named("defaultDatasetModules") Map<String, DatasetModule> defaultModules,
        CConfiguration configuration) {//w w  w .java 2s .  co  m
    this.registryFactory = registryFactory;
    this.allowDatasetUncheckedUpgrade = configuration.getBoolean(Constants.Dataset.DATASET_UNCHECKED_UPGRADE);

    this.namespaces = Sets.newHashSet();
    this.nonDefaultTypes = HashMultimap.create();
    this.instances = HashBasedTable.create();
    this.registries = Maps.newHashMap();
    // the order in which module classes are inserted is important,
    // so we use a table where Map<Id.DatasetModule, String> is a LinkedHashMap
    Map<Id.Namespace, Map<Id.DatasetModule, String>> backingMap = Maps.newHashMap();
    this.moduleClasses = Tables.newCustomTable(backingMap, new Supplier<Map<Id.DatasetModule, String>>() {
        @Override
        public Map<Id.DatasetModule, String> get() {
            return Maps.newLinkedHashMap();
        }
    });

    // add default dataset modules to system namespace
    namespaces.add(Constants.SYSTEM_NAMESPACE_ID);
    DatasetDefinitionRegistry systemRegistry = registryFactory.create();
    for (Map.Entry<String, DatasetModule> entry : defaultModules.entrySet()) {
        LOG.info("Adding Default module {} to system namespace", entry.getKey());
        String moduleName = entry.getKey();
        DatasetModule module = entry.getValue();
        entry.getValue().register(systemRegistry);
        // keep track of default module classes. These are used when creating registries for other namespaces,
        // which need to register system classes too.
        String moduleClassName = DatasetModules.getDatasetModuleClass(module).getName();
        Id.DatasetModule moduleId = Id.DatasetModule.from(Constants.SYSTEM_NAMESPACE_ID, moduleName);
        moduleClasses.put(Constants.SYSTEM_NAMESPACE_ID, moduleId, moduleClassName);
    }
    registries.put(Constants.SYSTEM_NAMESPACE_ID, systemRegistry);

    ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
    readLock = readWriteLock.readLock();
    writeLock = readWriteLock.writeLock();
}

From source file:net.librec.recommender.cf.ranking.ItemBigramRecommender.java

@Override
protected void setup() throws LibrecException {
    super.setup();
    numTopics = conf.getInt("rec.topic.number", 10);

    initAlpha = conf.getFloat("rec.user.dirichlet.prior", 0.01f);
    initBeta = conf.getFloat("rec.topic.dirichlet.prior", 0.01f);

    timeMatrix = (SparseMatrix) getDataModel().getDatetimeDataSet();

    // build the training data, sorting by date
    userItemsMap = new HashMap<>();
    for (int userIdx = 0; userIdx < numUsers; userIdx++) {
        List<Integer> unsortedItems = trainMatrix.getColumns(userIdx);
        int size = unsortedItems.size();

        List<RatingContext> rcs = new ArrayList<>(size);
        for (Integer itemIdx : unsortedItems) {
            rcs.add(new RatingContext(userIdx, itemIdx, (long) timeMatrix.get(userIdx, itemIdx)));
        }//from  w  w w .  j  a  va  2 s.  c  o m
        Collections.sort(rcs);

        List<Integer> sortedItems = new ArrayList<>(size);
        for (RatingContext rc : rcs) {
            sortedItems.add(rc.getItem());
        }

        userItemsMap.put(userIdx, sortedItems);
    }

    // count variables
    // initialize count variables.
    userTopicNumbers = new DenseMatrix(numUsers, numTopics);
    userTokenNumbers = new DenseVector(numUsers);

    topicPreItemCurItemNum = new int[numTopics][numItems + 1][numItems];
    topicItemProbs = new DenseMatrix(numTopics, numItems + 1);

    // Logs.debug("topicPreItemCurItemNum consumes {} bytes", Strings.toString(Memory.bytes(topicPreItemCurItemNum)));

    // parameters
    userTopicProbsSum = new DenseMatrix(numUsers, numTopics);
    topicPreItemCurItemSumProbs = new double[numTopics][numItems + 1][numItems];
    topicPreItemCurItemProbs = new double[numTopics][numItems + 1][numItems];

    // hyper-parameters
    alpha = new DenseVector(numTopics);
    alpha.setAll(initAlpha);

    beta = new DenseMatrix(numTopics, numItems + 1);
    beta.setAll(initBeta);

    // initialization
    topicAssignments = HashBasedTable.create();
    for (Map.Entry<Integer, List<Integer>> userItemEntry : userItemsMap.entrySet()) {
        int userIdx = userItemEntry.getKey();
        List<Integer> itemIdxList = userItemEntry.getValue();

        for (int itemIdxIndex = 0; itemIdxIndex < itemIdxList.size(); itemIdxIndex++) {
            int itemIdx = itemIdxList.get(itemIdxIndex);

            int topicIdx = (int) (Math.random() * numTopics);
            topicAssignments.put(userIdx, itemIdx, topicIdx);

            userTopicNumbers.add(userIdx, topicIdx, 1.0);
            userTokenNumbers.add(userIdx, 1.0);

            int preItemIdx = itemIdxIndex > 0 ? itemIdxList.get(itemIdxIndex - 1) : numItems;
            topicPreItemCurItemNum[topicIdx][preItemIdx][itemIdx]++;
            topicItemProbs.add(topicIdx, preItemIdx, 1);
        }
    }
}