Example usage for com.google.common.collect HashBasedTable create

List of usage examples for com.google.common.collect HashBasedTable create

Introduction

In this page you can find the example usage for com.google.common.collect HashBasedTable create.

Prototype

public static <R, C, V> HashBasedTable<R, C, V> create() 

Source Link

Document

Creates an empty HashBasedTable .

Usage

From source file:i5.las2peer.services.recommender.librec.rating.URP.java

@Override
protected void initModel() throws Exception {

    // cumulative parameters
    PukSum = new DenseMatrix(numUsers, numFactors);
    PkirSum = new double[numFactors][numItems][numLevels];

    // initialize count variables
    Nuk = new DenseMatrix(numUsers, numFactors);
    Nu = new DenseVector(numUsers);

    Nkir = new int[numFactors][numItems][numLevels];
    Nki = new DenseMatrix(numFactors, numItems);

    alpha = new DenseVector(numFactors);
    alpha.setAll(initAlpha);//  w  w  w  . ja v  a 2s.  c o m

    beta = new DenseVector(numLevels);
    beta.setAll(initBeta);

    // initialize topics
    z = HashBasedTable.create();
    for (MatrixEntry me : trainMatrix) {

        int u = me.row();
        int i = me.column();
        double rui = me.get();

        int r = ratingScale.indexOf(rui); // rating level 0 ~ numLevels
        int t = (int) (Math.random() * numFactors); // 0 ~ k-1

        // assign a topic t to pair (u, i)
        z.put(u, i, t);
        // number of pairs (u, t) in (u, i, t)
        Nuk.add(u, t, 1);
        // total number of items of user u
        Nu.add(u, 1);

        // number of pairs (t, i, r)
        Nkir[t][i][r]++;
        // total number of words assigned to topic t
        Nki.add(t, i, 1);
    }

}

From source file:org.splevo.vpm.analyzer.semantic.lucene.finder.SharedTermFinder.java

@Override
public Table<String, String, Set<String>> findSimilarEntries() {

    Table<String, String, Set<String>> sharedTermTable = HashBasedTable.create();

    try {//from w  ww  . j ava2 s .  c o  m
        IndexSearcher indexSearcher = new IndexSearcher(reader);

        // Iterate over all documents (VariationPoints).
        for (int i = 0; i < reader.maxDoc(); i++) {
            Document referenceDoc = indexSearcher.doc(i);

            if (referenceDoc.getField(Indexer.INDEX_CONTENT) != null) {
                Table<String, String, Set<String>> sharedTerms = buildQueryAndExecuteSearch(indexSearcher,
                        Indexer.INDEX_CONTENT, i, referenceDoc);
                sharedTermTable.putAll(sharedTerms);
            }

            if (matchComments && referenceDoc.getField(Indexer.INDEX_COMMENT) != null) {
                Table<String, String, Set<String>> sharedTerms = buildQueryAndExecuteSearch(indexSearcher,
                        Indexer.INDEX_COMMENT, i, referenceDoc);
                sharedTermTable.putAll(sharedTerms);
            }
        }
    } catch (IOException e) {
        logger.error("Failure while searching Lucene index.", e);
    }

    return sharedTermTable;
}

From source file:com.c4om.xsdfriendlyvalidator.ValidationResultsBuilder.java

/**
 * Constructor//from ww w  .j  a v  a  2 s . com
 */
public ValidationResultsBuilder() {
    this.currentlyCollectedErrorMessages = new ArrayList<>();
    this.currentDocumentSystemId = null;
    this.currentPath = new LinkedList<>();
    this.knownRepetitionsCache = HashBasedTable.create();
}

From source file:com.rackspacecloud.blueflood.io.datastax.DEnumIO.java

/**
 * Read the metrics_enum column family for the specified locators. Organize
 * the data as a table of locator, enum value hash, and enum value.
 * This is a representation on how the data looks in the column family.
 *
 * @param locators/*from  w ww.jav  a2  s.  com*/
 * @return
 */
@Override
public Table<Locator, Long, String> getEnumHashValuesForLocators(final List<Locator> locators) {

    Timer.Context ctx = Instrumentation.getReadTimerContext(CassandraModel.CF_METRICS_ENUM_NAME);
    Session session = DatastaxIO.getSession();

    Table<Locator, Long, String> locatorEnumHashValues = HashBasedTable.create();
    List<ResultSetFuture> resultsFuture = new ArrayList<ResultSetFuture>();

    try {
        for (String locatorStr : LocatorsUtils.toStringList(locators)) {
            resultsFuture.add(session.executeAsync(selectFromMetricsEnumStatement.bind(locatorStr)));
        }

        for (ResultSetFuture future : resultsFuture) {
            try {
                List<Row> results = future.getUninterruptibly().all();
                for (Row row : results) {
                    String key = row.getString(metricsCFPreparedStatements.KEY);
                    Locator locator = Locator.createLocatorFromDbKey(key);

                    Long hash = row.getLong(metricsCFPreparedStatements.COLUMN1);
                    String enumValue = row.getString(metricsCFPreparedStatements.VALUE);
                    locatorEnumHashValues.put(locator, hash, enumValue);
                }
            } catch (Exception ex) {
                Instrumentation.markReadError();
                LOG.error("error querying enum from " + CassandraModel.CF_METRICS_ENUM_NAME, ex);
            }
        }
    } finally {
        ctx.stop();
    }
    return locatorEnumHashValues;
}

From source file:org.opendaylight.groupbasedpolicy.util.SubjectResolverUtils.java

/**
 * Choose the set of subjects that in scope for each possible set of
 * endpoint conditions//  w  w  w .ja va2  s.  c  o  m
 */
// TODO Li msunal do we really need contractMatches to be a type Table<EgKey, EgKey, List<ContractMatch>>
// it should be sufficient to be just List<ContractMatch>
static Table<EgKey, EgKey, Policy> selectSubjects(Table<EgKey, EgKey, List<ContractMatch>> contractMatches,
        Map<EgKey, Set<ConditionSet>> egConditions) {
    // TODO: Note that it's possible to further simplify the resulting
    // policy
    // in the case of things like repeated rules, condition sets that
    // cover other condition sets, etc. This would be a good thing to do
    // at some point
    Table<EgKey, EgKey, Policy> policy = HashBasedTable.create();

    for (List<ContractMatch> matches : contractMatches.values()) {
        for (ContractMatch match : matches) {
            List<Clause> clauses = match.contract.getClause();
            if (clauses == null)
                continue;

            List<Subject> subjectList = match.contract.getSubject();
            if (subjectList == null)
                continue;

            EgKey ckey = new EgKey(match.consumerTenant.getId(), match.consumer.getId());
            EgKey pkey = new EgKey(match.providerTenant.getId(), match.provider.getId());
            Policy existing = policy.get(ckey, pkey);

            HashMap<SubjectName, Subject> subjects = new HashMap<>();
            for (Subject s : subjectList) {
                subjects.put(s.getName(), s);
            }

            Table<EndpointConstraint, EndpointConstraint, List<Subject>> subjectMap = HashBasedTable.create();

            for (Clause clause : clauses) {
                if (clause.getSubjectRefs() != null
                        && clauseMatchesByGroupReqAndCapConstraints(clause, match)) {
                    ConditionSet consCSet = buildConsConditionSet(clause);
                    addConditionSet(ckey, consCSet, egConditions);
                    EndpointConstraint consEpConstraint = new EndpointConstraint(consCSet,
                            clause.getConsumerMatchers() == null ? null
                                    : clause.getConsumerMatchers().getEndpointIdentificationConstraints());
                    ConditionSet provCSet = buildProvConditionSet(clause);
                    addConditionSet(pkey, provCSet, egConditions);
                    EndpointConstraint provEpConstraint = new EndpointConstraint(provCSet,
                            clause.getProviderMatchers() == null ? null
                                    : clause.getProviderMatchers().getEndpointIdentificationConstraints());
                    List<Subject> clauseSubjects = subjectMap.get(consEpConstraint, provEpConstraint);
                    if (clauseSubjects == null) {
                        clauseSubjects = new ArrayList<>();
                        subjectMap.put(consEpConstraint, provEpConstraint, clauseSubjects);
                    }
                    for (SubjectName sn : clause.getSubjectRefs()) {
                        Subject s = subjects.get(sn);
                        if (s != null)
                            clauseSubjects.add(s);
                    }
                }
            }

            policy.put(ckey, pkey, resolvePolicy(match.contractTenant, match.contract, existing, subjectMap));
        }
    }

    return policy;
}

From source file:com.netease.flume.taildirSource.ReliableTaildirEventReader.java

/**
 * Create a ReliableTaildirEventReader to watch the given directory.
 *//*from w w  w  . jav  a 2  s  .  c o m*/
private ReliableTaildirEventReader(Map<String, String> filePaths, Map<String, String> targetFilenames,
        Table<String, String, String> headerTable, String positionFilePath, boolean skipToEnd,
        boolean addByteOffset) throws IOException {
    // Sanity checks
    Preconditions.checkNotNull(filePaths);
    Preconditions.checkNotNull(positionFilePath);

    if (logger.isDebugEnabled()) {
        logger.debug("Initializing {} with directory={}, metaDir={}",
                new Object[] { ReliableTaildirEventReader.class.getSimpleName(), filePaths });
    }

    Table<String, File, Pattern> tailFileTable = HashBasedTable.create();
    for (Entry<String, String> e : filePaths.entrySet()) {
        File f = new File(e.getValue());
        File parentDir = f.getParentFile();
        Preconditions.checkState(parentDir.exists(),
                "Directory does not exist: " + parentDir.getAbsolutePath());
        Pattern fileNamePattern = Pattern.compile(f.getName());
        tailFileTable.put(e.getKey(), parentDir, fileNamePattern);
    }
    logger.info("tailFileTable: " + tailFileTable.toString());
    logger.info("headerTable: " + headerTable.toString());

    this.targetFilenames = targetFilenames;
    this.tailFileTable = tailFileTable;
    this.headerTable = headerTable;
    this.addByteOffset = addByteOffset;
    updateTailFiles(skipToEnd);

    logger.info("Updating position from position file: " + positionFilePath);
    loadPositionFile(positionFilePath);
}

From source file:com.eucalyptus.entities.Interceptors.java

private static final Table stringify(Object[] state, final String[] propertyNames, final Type[] types) {
    return HashBasedTable.create();
}

From source file:net.librec.data.convertor.feature.SocialDataFeature.java

/**
 * Read data from the data file. Note that we didn't take care of the
 * duplicated lines.//from ww w.  jav  a 2s.co m
 *
 * @param inputDataPath
 *            the path of the data file
 * @throws IOException if I/O error occurs during reading
 */
private void readData(String inputDataPath) throws IOException {
    // Table {row-id, col-id, rate}
    Table<Integer, Integer, Double> dataTable = HashBasedTable.create();
    // Map {col-id, multiple row-id}: used to fast build a rating matrix
    Multimap<Integer, Integer> colMap = HashMultimap.create();
    // BiMap {raw id, inner id} userIds, itemIds
    final List<File> files = new ArrayList<File>();
    final ArrayList<Long> fileSizeList = new ArrayList<Long>();
    SimpleFileVisitor<Path> finder = new SimpleFileVisitor<Path>() {
        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fileSizeList.add(file.toFile().length());
            files.add(file.toFile());
            return super.visitFile(file, attrs);
        }
    };
    Files.walkFileTree(Paths.get(inputDataPath), finder);
    long allFileSize = 0;
    for (Long everyFileSize : fileSizeList) {
        allFileSize = allFileSize + everyFileSize.longValue();
    }
    // loop every dataFile collecting from walkFileTree
    for (File dataFile : files) {
        FileInputStream fis = new FileInputStream(dataFile);
        FileChannel fileRead = fis.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(BSIZE);
        int len;
        String bufferLine = new String();
        byte[] bytes = new byte[BSIZE];
        while ((len = fileRead.read(buffer)) != -1) {
            buffer.flip();
            buffer.get(bytes, 0, len);
            bufferLine = bufferLine.concat(new String(bytes, 0, len)).replaceAll("\r", "\n");
            String[] bufferData = bufferLine.split("(\n)+");
            boolean isComplete = bufferLine.endsWith("\n");
            int loopLength = isComplete ? bufferData.length : bufferData.length - 1;
            for (int i = 0; i < loopLength; i++) {
                String line = new String(bufferData[i]);
                String[] data = line.trim().split("[ \t,]+");
                String userA = data[0];
                String userB = data[1];
                Double rate = (data.length >= 3) ? Double.valueOf(data[2]) : 1.0;
                if (userIds.containsKey(userA) && userIds.containsKey(userB)) {
                    int row = userIds.get(userA);
                    int col = userIds.get(userB);
                    dataTable.put(row, col, rate);
                    colMap.put(col, row);
                }
            }
            if (!isComplete) {
                bufferLine = bufferData[bufferData.length - 1];
            }
            buffer.clear();
        }
        fileRead.close();
        fis.close();
    }
    int numRows = userIds.size(), numCols = userIds.size();
    // build rating matrix
    userSocialMatrix = new SparseMatrix(numRows, numCols, dataTable, colMap);
    // release memory of data table
    dataTable = null;
}

From source file:co.cask.cdap.app.mapreduce.LocalMRJobInfoFetcher.java

/**
 * @param runId for which information will be returned.
 * @return a {@link MRJobInfo} containing information about a particular MapReduce program run.
 *//*from w ww .  ja v a2  s .  c  o m*/
public MRJobInfo getMRJobInfo(Id.Run runId) throws Exception {
    Preconditions.checkArgument(ProgramType.MAPREDUCE.equals(runId.getProgram().getType()));

    // baseTags has tag keys: ns.app.mr.runid
    Map<String, String> baseTags = Maps.newHashMap();
    baseTags.put(Constants.Metrics.Tag.NAMESPACE, runId.getNamespace().getId());
    baseTags.put(Constants.Metrics.Tag.APP, runId.getProgram().getApplicationId());
    baseTags.put(Constants.Metrics.Tag.MAPREDUCE, runId.getProgram().getId());
    baseTags.put(Constants.Metrics.Tag.RUN_ID, runId.getId());

    Map<String, String> mapTags = Maps.newHashMap(baseTags);
    mapTags.put(Constants.Metrics.Tag.MR_TASK_TYPE, MapReduceMetrics.TaskType.Mapper.getId());

    Map<String, String> reduceTags = Maps.newHashMap(baseTags);
    reduceTags.put(Constants.Metrics.Tag.MR_TASK_TYPE, MapReduceMetrics.TaskType.Reducer.getId());

    // map from RunId -> (CounterName -> CounterValue)
    Table<String, String, Long> mapTaskMetrics = HashBasedTable.create();
    Table<String, String, Long> reduceTaskMetrics = HashBasedTable.create();

    // Populate mapTaskMetrics and reduce Task Metrics via MetricStore. Used to construct MRTaskInfo below.
    Map<String, String> metricNamesToCounters = Maps.newHashMap();
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS),
            TaskCounter.MAP_INPUT_RECORDS.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS),
            TaskCounter.MAP_OUTPUT_RECORDS.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_BYTES),
            TaskCounter.MAP_OUTPUT_BYTES.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_COMPLETION),
            MapReduceMetrics.METRIC_TASK_COMPLETION);

    // get metrics grouped by instance-id for the map tasks
    queryGroupedAggregates(mapTags, mapTaskMetrics, metricNamesToCounters);

    Map<String, Long> mapProgress = Maps.newHashMap();
    if (mapTaskMetrics.columnMap().containsKey(MapReduceMetrics.METRIC_TASK_COMPLETION)) {
        mapProgress = Maps
                .newHashMap(mapTaskMetrics.columnMap().remove(MapReduceMetrics.METRIC_TASK_COMPLETION));
    }

    Map<String, String> reduceMetricsToCounters = Maps.newHashMap();
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS),
            TaskCounter.REDUCE_INPUT_RECORDS.name());
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS),
            TaskCounter.REDUCE_OUTPUT_RECORDS.name());
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_COMPLETION),
            MapReduceMetrics.METRIC_TASK_COMPLETION);

    // get metrics grouped by instance-id for the map tasks
    queryGroupedAggregates(reduceTags, reduceTaskMetrics, reduceMetricsToCounters);

    Map<String, Long> reduceProgress = Maps.newHashMap();
    if (reduceTaskMetrics.columnMap().containsKey(MapReduceMetrics.METRIC_TASK_COMPLETION)) {
        reduceProgress = Maps
                .newHashMap(reduceTaskMetrics.columnMap().remove(MapReduceMetrics.METRIC_TASK_COMPLETION));
    }

    // Construct MRTaskInfos from the information we can get from Metric system.
    List<MRTaskInfo> mapTaskInfos = Lists.newArrayList();
    for (Map.Entry<String, Map<String, Long>> taskEntry : mapTaskMetrics.rowMap().entrySet()) {
        String mapTaskId = taskEntry.getKey();
        mapTaskInfos.add(new MRTaskInfo(mapTaskId, null, null, null, mapProgress.get(mapTaskId) / 100.0F,
                taskEntry.getValue()));
    }

    List<MRTaskInfo> reduceTaskInfos = Lists.newArrayList();
    for (Map.Entry<String, Map<String, Long>> taskEntry : reduceTaskMetrics.rowMap().entrySet()) {
        String reduceTaskId = taskEntry.getKey();
        reduceTaskInfos.add(new MRTaskInfo(reduceTaskId, null, null, null,
                reduceProgress.get(reduceTaskId) / 100.0F, taskEntry.getValue()));
    }

    return getJobCounters(mapTags, reduceTags, mapTaskInfos, reduceTaskInfos);
}

From source file:librec.rating.TimeSVD.java

@Override
protected void initModel() throws Exception {
    super.initModel();

    numDays = days(maxTimestamp, minTimestamp) + 1;

    userBias = new DenseVector(numUsers);
    userBias.init();//from   www  . j  a  v  a  2s  . c  om

    itemBias = new DenseVector(numItems);
    itemBias.init();

    Alpha = new DenseVector(numUsers);
    Alpha.init();

    Bit = new DenseMatrix(numItems, numBins);
    Bit.init();

    Y = new DenseMatrix(numItems, numFactors);
    Y.init();

    Auk = new DenseMatrix(numUsers, numFactors);
    Auk.init();

    But = HashBasedTable.create();
    Pukt = new HashMap<>();

    Cu = new DenseVector(numUsers);
    Cu.init();

    Cut = new DenseMatrix(numUsers, numDays);
    Cut.init();

    // cache
    userItemsCache = trainMatrix.rowColumnsCache(cacheSpec);

    // global average date
    double sum = 0;
    int cnt = 0;
    for (MatrixEntry me : trainMatrix) {
        int u = me.row();
        int i = me.column();
        double rui = me.get();

        if (rui <= 0)
            continue;

        sum += days((long) timeMatrix.get(u, i), minTimestamp);
        cnt++;
    }
    double globalMeanDate = sum / cnt;

    // compute user's mean of rating timestamps
    userMeanDate = new DenseVector(numUsers);
    List<Integer> Ru = null;
    for (int u = 0; u < numUsers; u++) {

        sum = 0;
        Ru = userItemsCache.get(u);
        for (int i : Ru) {
            sum += days((long) timeMatrix.get(u, i), minTimestamp);
        }

        double mean = (Ru.size() > 0) ? (sum + 0.0) / Ru.size() : globalMeanDate;
        userMeanDate.set(u, mean);
    }
}