Example usage for org.apache.mahout.cf.taste.model DataModel getUserIDs

List of usage examples for org.apache.mahout.cf.taste.model DataModel getUserIDs

Introduction

In this page you can find the example usage for org.apache.mahout.cf.taste.model DataModel getUserIDs.

Prototype

LongPrimitiveIterator getUserIDs() throws TasteException;

Source Link

Usage

From source file:be.ugent.tiwi.sleroux.newsrec.newsreccollaborativefiltering.MahoutTermRecommender.java

public Map<Long, List<RecommendedItem>> makeRecommendations(int n) throws IOException, TasteException {

    DataModel model = new FileDataModel(new File(mahoutInputFile), ";");
    UserSimilarity similarity = new TanimotoCoefficientSimilarity(model);
    UserNeighborhood neighborhood = new NearestNUserNeighborhood(2, similarity, model);
    Recommender recommender = new GenericUserBasedRecommender(model, neighborhood, similarity);

    LongPrimitiveIterator it = model.getUserIDs();
    Map<Long, List<RecommendedItem>> output = new HashMap<>(model.getNumUsers());

    while (it.hasNext()) {
        long user = it.nextLong();
        List<RecommendedItem> items = recommender.recommend(user, n);
        output.put(user, items);//from  w w w  .  ja va2  s . c om
    }

    return output;
}

From source file:com.msiiplab.recsys.rwr.GLRecommenderIRStatsEvaluator.java

License:Apache License

public void predict(RecommenderBuilder recommenderBuilder, List<DataModel> trainingDataModels,
        List<DataModel> testingDataModels, List<File> outputFileList) throws TasteException {

    // num of train/test pair: num of cross validation folds
    int numFolds = trainingDataModels.size();

    for (int i_folds = 0; i_folds < numFolds; i_folds++) {
        DataModel trainDataModel = trainingDataModels.get(i_folds);
        DataModel testDataModel = testingDataModels.get(i_folds);

        // Build recommender
        Recommender recommender = recommenderBuilder.buildRecommender(trainDataModel);

        LongPrimitiveIterator it_item;//from  ww w . ja va2 s  . c o m
        FastIDSet allItems = new FastIDSet();
        it_item = trainDataModel.getItemIDs();
        while (it_item.hasNext()) {
            allItems.add(it_item.nextLong());
        }
        it_item = testDataModel.getItemIDs();
        while (it_item.hasNext()) {
            allItems.add(it_item.nextLong());
        }

        if (outputFileList != null) {
            File file = outputFileList.get(i_folds);
            log.info("Writing recommender output to file: " + file.getPath());
            try {
                FileOutputStream out = new FileOutputStream(file);
                LongPrimitiveIterator it_user = testDataModel.getUserIDs();
                while (it_user.hasNext()) {
                    long userID = it_user.nextLong();
                    FastIDSet ratedItems = trainDataModel.getItemIDsFromUser(userID);
                    StringBuilder sb = new StringBuilder();
                    Iterator<Long> it = allItems.iterator();
                    while (it.hasNext()) {
                        long itemID = it.next();
                        if (!ratedItems.contains(itemID)) {
                            float pref = 0;
                            try {
                                pref = recommender.estimatePreference(userID, itemID);
                            } catch (NoSuchItemException e) {
                                pref = Float.NaN;
                            }
                            sb.append(userID + "," + itemID + "," + pref + "\n");
                        }
                    }
                    out.write(sb.toString().getBytes());
                    out.flush();
                }
                out.close();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }
}

From source file:com.msiiplab.recsys.rwr.GLRecommenderIRStatsEvaluator.java

License:Apache License

public GLIRStatisticsImpl evaluate(RecommenderBuilder recommenderBuilder, List<DataModel> trainingDataModels,
        List<DataModel> testingDataModels, IDRescorer rescorer, int at, double relevanceThreshold,
        double evaluationPercentage) throws TasteException {

    Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null");
    Preconditions.checkArgument(trainingDataModels != null, "trainingDataModels is null");
    Preconditions.checkArgument(testingDataModels != null, "testingDataModels is null");
    Preconditions.checkArgument(testingDataModels.size() == trainingDataModels.size(),
            "trainingDataModels.size must equals testingDataModels.size");
    Preconditions.checkArgument(at >= 1, "at must be at least 1");
    Preconditions.checkArgument(evaluationPercentage > 0.0 && evaluationPercentage <= 1.0,
            "Invalid evaluationPercentage: %s", evaluationPercentage);

    // num of train/test pair: num of cross validation folds
    int numFolds = trainingDataModels.size();

    RunningAverage CrossValidationPrecision = new GLRunningAverage();
    RunningAverage CrossValidationRPrecision = new GLRunningAverage();
    RunningAverage CrossValidationRecall = new GLRunningAverage();
    RunningAverage CrossValidationFallOut = new GLRunningAverage();
    RunningAverage CrossValidationNDCG = new GLRunningAverage();
    RunningAverage CrossValidationRNDCG = new GLRunningAverage();//rating-nDCG
    RunningAverage CrossValidationReach = new GLRunningAverage();
    RunningAverage CrossValidationMacroDOA = new GLRunningAverage();
    RunningAverage CrossValidationMicroDOA = new GLRunningAverage();
    RunningAverage CrossValidationMacroInnerDOA = new GLRunningAverage();
    RunningAverage CrossValidationMicroInnerDOA = new GLRunningAverage();

    for (int i_folds = 0; i_folds < numFolds; i_folds++) {
        log.info("fold {}", i_folds);
        DataModel trainDataModel = trainingDataModels.get(i_folds);
        DataModel testDataModel = testingDataModels.get(i_folds);

        FastIDSet MovieIDs = new FastIDSet();
        LongPrimitiveIterator it_train_temp = trainDataModel.getItemIDs();
        LongPrimitiveIterator it_test_temp = testDataModel.getItemIDs();
        while (it_train_temp.hasNext()) {
            MovieIDs.add(it_train_temp.nextLong());
        }//  w  w w  .j  a v a  2 s. c om
        while (it_test_temp.hasNext()) {
            MovieIDs.add(it_test_temp.nextLong());
        }

        int numTrainItems = trainDataModel.getNumItems();
        int numTestItems = testDataModel.getNumItems();
        int numItems = numTestItems + numTrainItems;

        RunningAverage precision = new GLRunningAverage();
        RunningAverage rPrecision = new GLRunningAverage();
        RunningAverage recall = new GLRunningAverage();
        RunningAverage fallOut = new GLRunningAverage();
        RunningAverage nDCG = new GLRunningAverage();
        RunningAverage rNDCG = new GLRunningAverage();
        RunningAverage macroDOA = new GLRunningAverage();
        RunningAverage microDOA1 = new GLRunningAverage();
        RunningAverage microDOA2 = new GLRunningAverage();
        RunningAverage macroInnerDOA = new GLRunningAverage();
        RunningAverage microInnerDOA1 = new GLRunningAverage();
        RunningAverage microInnerDOA2 = new GLRunningAverage();

        int numUsersRecommendedFor = 0;
        int numUsersWithRecommendations = 0;

        long start = System.currentTimeMillis();

        // Build recommender
        Recommender recommender = recommenderBuilder.buildRecommender(trainDataModel);

        LongPrimitiveIterator it_user = testDataModel.getUserIDs();
        while (it_user.hasNext()) {
            long userID = it_user.nextLong();
            log.info("user {}", userID);
            // Use all in testDataModel as relevant
            FastIDSet learnedItemIDs;
            FastIDSet relevantItemIDs;

            try {
                learnedItemIDs = trainDataModel.getItemIDsFromUser(userID);
                relevantItemIDs = testDataModel.getItemIDsFromUser(userID);
            } catch (NoSuchUserException e1) {
                continue;
            }

            // We excluded zero relevant items situation
            int numRelevantItems = relevantItemIDs.size();
            if (numRelevantItems <= 0) {
                continue;
            }

            // We excluded all prefs for the user that has no pref record in
            // training set
            try {
                trainDataModel.getPreferencesFromUser(userID);
            } catch (NoSuchUserException nsee) {
                continue; // Oops we excluded all prefs for the user -- just
                          // move on
            }

            // Recommend items
            List<RecommendedItem> recommendedItems = recommender.recommend(userID, at, rescorer);
            List<RecommendedItem> recommendedItemsAtRelNum = recommender.recommend(userID, numRelevantItems,
                    rescorer);

            PreferenceArray userPreferences = testDataModel.getPreferencesFromUser(userID);
            FastByIDMap<Preference> userPreferenceMap = getPrefereceMap(userPreferences);
            userPreferences.sortByValueReversed();

            // relevantItemIDsAtN only consider top N items as relevant items
            FastIDSet relevantItemIDsAtN = new FastIDSet();
            Iterator<Preference> it_pref = userPreferences.iterator();
            int num_pref = 0;
            while (it_pref.hasNext()) {
                relevantItemIDsAtN.add(it_pref.next().getItemID());
                num_pref++;
                if (num_pref >= at) {
                    break;
                }
            }

            // Compute intersection between recommended items and relevant
            // items
            int intersectionSize = 0;
            int numRecommendedItems = recommendedItems.size();
            for (RecommendedItem recommendedItem : recommendedItems) {
                if (relevantItemIDs.contains(recommendedItem.getItemID())) {
                    intersectionSize++;
                }
            }

            // Precision
            double prec = 0;
            if (numRecommendedItems > 0) {
                prec = (double) intersectionSize / (double) numRecommendedItems;
            }
            precision.addDatum(prec);
            log.info("Precision for user {} is {}", userID, prec);

            // Recall
            double rec = (double) intersectionSize / (double) numRelevantItems;
            recall.addDatum(rec);
            log.info("Recall for user {} is {}", userID, rec);

            // R-precision
            double rprec = 0;
            int intersectionSizeAtRelNum = 0;
            int numRecommendedItemsAtRelNum = recommendedItemsAtRelNum.size();
            for (RecommendedItem recommendedItem : recommendedItemsAtRelNum) {
                if (relevantItemIDs.contains(recommendedItem.getItemID())) {
                    intersectionSizeAtRelNum++;
                }
            }
            if (numRecommendedItemsAtRelNum > 0) {
                rprec = (double) intersectionSizeAtRelNum / (double) numRelevantItems;
            }
            rPrecision.addDatum(rprec);
            log.info("RPrecision for user {} is {}", userID, rprec);

            double F1 = 0;
            if (prec + rec > 0) {
                F1 = 2 * prec * rec / (prec + rec);
            }
            log.info("F1 for user {} is {}", userID, F1);

            // Fall-out
            double fall = 0;
            int size = numRelevantItems + trainDataModel.getItemIDsFromUser(userID).size();
            if (numRelevantItems < size) {
                fall = (double) (numRecommendedItems - intersectionSize)
                        / (double) (numItems - numRelevantItems);
            }
            fallOut.addDatum(fall);
            log.info("Fallout for user {} is {}", userID, fall);

            // nDCG
            // In computing, assume relevant IDs have relevance ${rating} and others
            // 0
            PreferenceArray userPredictions = getPreferenceArray(recommendedItems, userID);
            double userNDCG = computeNDCG(userPreferences, userPredictions, relevantItemIDs, userPreferenceMap,
                    at);
            double userRNDCG = computeRNDCG(userPreferences, userPredictions, relevantItemIDs,
                    userPreferenceMap, at);
            nDCG.addDatum(userNDCG);
            rNDCG.addDatum(userRNDCG);
            log.info("NDCG for user {} is {}", userID, userNDCG);
            log.info("RNDCG for user {} is {}", userID, userRNDCG);

            // Reach
            numUsersRecommendedFor++;
            if (numRecommendedItems > 0) {
                numUsersWithRecommendations++;
            }

            // DOA
            // [Siegel and Castellan, 1988] and [Gori and Pucci, 2007]
            // LongPrimitiveIterator it_movies = MovieIDs.iterator();
            LongPrimitiveIterator it_movies = trainDataModel.getItemIDs();
            long numNW = 0;
            long sumCheckOrder = 0;
            while (it_movies.hasNext()) {
                long itemID = it_movies.nextLong();
                if (!learnedItemIDs.contains(itemID) && !relevantItemIDs.contains(itemID)) {
                    // itemID is in NW_{u_i}
                    numNW++;

                    LongPrimitiveIterator it_test = relevantItemIDs.iterator();
                    while (it_test.hasNext()) {
                        long testItemID = it_test.nextLong();
                        float itemPref = 0;
                        float testItemPref = 0;
                        try {
                            itemPref = recommender.estimatePreference(userID, itemID);
                        } catch (NoSuchItemException e) {
                        }
                        try {
                            testItemPref = recommender.estimatePreference(userID, testItemID);
                        } catch (NoSuchItemException e) {
                        }
                        if (itemPref <= testItemPref) {
                            sumCheckOrder++;
                        }
                    }
                }
            }
            if (numNW > 0 && relevantItemIDs.size() > 0) {
                macroDOA.addDatum((double) sumCheckOrder / (double) (relevantItemIDs.size() * numNW));
                microDOA1.addDatum((double) sumCheckOrder);
                microDOA2.addDatum((double) (relevantItemIDs.size() * numNW));
            }
            //            log.info(
            //                  "sumCheckOrder / (numNW * numRelevant) = {} / ({} * {})",
            //                  sumCheckOrder, numNW, relevantItemIDs.size());

            // InnerDOA: only check the agreement of order in test set
            LongPrimitiveIterator it_test1 = relevantItemIDs.iterator();
            long sumCheckInnerOrder = 0;
            long sumAll = 0;
            while (it_test1.hasNext()) {
                long itemID1 = it_test1.nextLong();
                LongPrimitiveIterator it_test2 = relevantItemIDs.iterator();
                while (it_test2.hasNext()) {
                    long itemID2 = it_test2.nextLong();
                    if (itemID1 != itemID2) {
                        try {
                            float pref_v1 = testDataModel.getPreferenceValue(userID, itemID1);
                            float pref_v2 = testDataModel.getPreferenceValue(userID, itemID2);
                            float predict_v1 = recommender.estimatePreference(userID, itemID1);
                            float predict_v2 = recommender.estimatePreference(userID, itemID2);
                            if ((pref_v1 >= pref_v2 && predict_v1 >= predict_v2)
                                    || (pref_v1 <= pref_v2 && predict_v1 <= predict_v2)) {
                                sumCheckInnerOrder++;
                            }
                            sumAll++;
                        } catch (NoSuchItemException e) {
                            // do nothing, just ignore
                        }
                    }
                }
            }
            if (relevantItemIDs.size() > 1) {
                macroInnerDOA.addDatum((double) sumCheckInnerOrder / (double) sumAll);
                microInnerDOA1.addDatum((double) sumCheckInnerOrder);
                microInnerDOA2.addDatum((double) sumAll);
            }
            //            log.info(
            //                  "sumCheckInnerOrder / (|T| * (|T|-1) ) = {} / ({} * {}) = ",
            //                  sumCheckInnerOrder, relevantItemIDs.size(), relevantItemIDs.size()-1);
        }

        long end = System.currentTimeMillis();

        CrossValidationPrecision.addDatum(precision.getAverage());
        CrossValidationRPrecision.addDatum(rPrecision.getAverage());
        CrossValidationRecall.addDatum(recall.getAverage());
        CrossValidationFallOut.addDatum(fallOut.getAverage());
        CrossValidationNDCG.addDatum(nDCG.getAverage());
        CrossValidationRNDCG.addDatum(rNDCG.getAverage());
        CrossValidationReach.addDatum((double) numUsersWithRecommendations / (double) numUsersRecommendedFor);
        CrossValidationMacroDOA.addDatum(macroDOA.getAverage());
        CrossValidationMicroDOA.addDatum(microDOA1.getAverage() / microDOA2.getAverage());
        CrossValidationMacroInnerDOA.addDatum(macroInnerDOA.getAverage());
        CrossValidationMicroInnerDOA.addDatum(microInnerDOA1.getAverage() / microInnerDOA2.getAverage());

        log.info("Evaluated with training/testing set # {} in {}ms", i_folds, end - start);
        System.out.printf("Evaluated with training/testing set # %d in %d ms \n", i_folds, end - start);

        log.info(
                "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}",
                precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(),
                nDCG.getAverage(), rNDCG.getAverage(),
                (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(),
                microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(),
                microInnerDOA1.getAverage() / microInnerDOA2.getAverage());
        System.out.printf(
                "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f \n",
                precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(),
                nDCG.getAverage(), rNDCG.getAverage(),
                (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(),
                microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(),
                microInnerDOA1.getAverage() / microInnerDOA2.getAverage());

    }

    log.info(
            "Cross Validation Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}",
            CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());
    System.out.printf(
            "Cross Validation: \nPrecision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f\n",
            CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());

    return new GLIRStatisticsImpl(CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());
}

From source file:com.msiiplab.recsys.rwr.ParallelGLRecommenderIRStatsEvaluator.java

License:Apache License

@Override
public GLIRStatisticsImpl evaluate(RecommenderBuilder recommenderBuilder, List<DataModel> trainingDataModels,
        List<DataModel> testingDataModels, IDRescorer rescorer, int at, double relevanceThreshold,
        double evaluationPercentage) throws TasteException {

    Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null");
    Preconditions.checkArgument(trainingDataModels != null, "trainingDataModels is null");
    Preconditions.checkArgument(testingDataModels != null, "testingDataModels is null");
    Preconditions.checkArgument(testingDataModels.size() == trainingDataModels.size(),
            "trainingDataModels.size must equals testingDataModels.size");
    Preconditions.checkArgument(at >= 1, "at must be at least 1");
    Preconditions.checkArgument(evaluationPercentage > 0.0 && evaluationPercentage <= 1.0,
            "Invalid evaluationPercentage: %s", evaluationPercentage);

    // num of train/test pair: num of cross validation folds
    int numFolds = trainingDataModels.size();

    RunningAverage CrossValidationPrecision = new GLRunningAverage();
    RunningAverage CrossValidationRPrecision = new GLRunningAverage();
    RunningAverage CrossValidationRecall = new GLRunningAverage();
    RunningAverage CrossValidationFallOut = new GLRunningAverage();
    RunningAverage CrossValidationNDCG = new GLRunningAverage();
    RunningAverage CrossValidationRNDCG = new GLRunningAverage();//rating-nDCG
    RunningAverage CrossValidationReach = new GLRunningAverage();
    RunningAverage CrossValidationMacroDOA = new GLRunningAverage();
    RunningAverage CrossValidationMicroDOA = new GLRunningAverage();
    RunningAverage CrossValidationMacroInnerDOA = new GLRunningAverage();
    RunningAverage CrossValidationMicroInnerDOA = new GLRunningAverage();

    for (int i_folds = 0; i_folds < numFolds; i_folds++) {

        log.info("fold {}", i_folds);
        DataModel trainDataModel = trainingDataModels.get(i_folds);
        DataModel testDataModel = testingDataModels.get(i_folds);

        FastIDSet MovieIDs = new FastIDSet();
        LongPrimitiveIterator it_train_temp = trainDataModel.getItemIDs();
        LongPrimitiveIterator it_test_temp = testDataModel.getItemIDs();
        while (it_train_temp.hasNext()) {
            MovieIDs.add(it_train_temp.nextLong());
        }//  ww  w .  ja  v  a2s  . co  m
        while (it_test_temp.hasNext()) {
            MovieIDs.add(it_test_temp.nextLong());
        }

        int numTrainItems = trainDataModel.getNumItems();
        int numTestItems = testDataModel.getNumItems();
        int numItems = numTestItems + numTrainItems;

        RunningAverage precision = new GLRunningAverage();
        RunningAverage rPrecision = new GLRunningAverage();
        RunningAverage recall = new GLRunningAverage();
        RunningAverage fallOut = new GLRunningAverage();
        RunningAverage nDCG = new GLRunningAverage();
        RunningAverage rNDCG = new GLRunningAverage();
        RunningAverage macroDOA = new GLRunningAverage();
        RunningAverage microDOA1 = new GLRunningAverage();
        RunningAverage microDOA2 = new GLRunningAverage();
        RunningAverage macroInnerDOA = new GLRunningAverage();
        RunningAverage microInnerDOA1 = new GLRunningAverage();
        RunningAverage microInnerDOA2 = new GLRunningAverage();

        int numUsersRecommendedFor = 0;
        int numUsersWithRecommendations = 0;

        long start = System.currentTimeMillis();

        // Build recommender
        Recommender recommender = recommenderBuilder.buildRecommender(trainDataModel);

        // Futures
        ArrayList<Future<Integer>> futureList = new ArrayList<Future<Integer>>();

        int N_CPUS = Runtime.getRuntime().availableProcessors();
        ExecutorService pool = Executors.newFixedThreadPool(N_CPUS - 1);
        // ExecutorService pool = Executors.newFixedThreadPool(1);

        LongPrimitiveIterator it_user = testDataModel.getUserIDs();
        while (it_user.hasNext()) {
            long userID = it_user.nextLong();

            Future<Integer> future = pool.submit(new Eval(precision, rPrecision, recall, fallOut, nDCG, rNDCG,
                    macroDOA, microDOA1, microDOA2, macroInnerDOA, microInnerDOA1, microInnerDOA2,
                    trainDataModel, testDataModel, userID, recommender, at, rescorer, numItems));
            futureList.add(future);
        }

        for (Future<Integer> future : futureList) {
            numUsersRecommendedFor++;
            try {
                if (future.get() == 1) {
                    numUsersWithRecommendations++;
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
                e.getCause().printStackTrace();
                System.exit(1);
            }
        }

        pool.shutdown();

        long end = System.currentTimeMillis();

        CrossValidationPrecision.addDatum(precision.getAverage());
        CrossValidationRPrecision.addDatum(rPrecision.getAverage());
        CrossValidationRecall.addDatum(recall.getAverage());
        CrossValidationFallOut.addDatum(fallOut.getAverage());
        CrossValidationNDCG.addDatum(nDCG.getAverage());
        CrossValidationRNDCG.addDatum(rNDCG.getAverage());
        CrossValidationReach.addDatum((double) numUsersWithRecommendations / (double) numUsersRecommendedFor);
        CrossValidationMacroDOA.addDatum(macroDOA.getAverage());
        CrossValidationMicroDOA.addDatum(microDOA1.getAverage() / microDOA2.getAverage());
        CrossValidationMacroInnerDOA.addDatum(macroInnerDOA.getAverage());
        CrossValidationMicroInnerDOA.addDatum(microInnerDOA1.getAverage() / microInnerDOA2.getAverage());

        log.info("Evaluated with training/testing set # {} in {}ms", i_folds, end - start);
        System.out.printf("Evaluated with training/testing set # %d in %d ms \n", i_folds, end - start);

        log.info(
                "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}",
                precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(),
                nDCG.getAverage(), rNDCG.getAverage(),
                (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(),
                microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(),
                microInnerDOA1.getAverage() / microInnerDOA2.getAverage());
        System.out.printf(
                "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f \n",
                precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(),
                nDCG.getAverage(), rNDCG.getAverage(),
                (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(),
                microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(),
                microInnerDOA1.getAverage() / microInnerDOA2.getAverage());

    }

    log.info(
            "Cross Validation Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}",
            CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());
    System.out.printf(
            "Cross Validation: \nPrecision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f\n",
            CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());

    return new GLIRStatisticsImpl(CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(),
            CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(),
            CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(),
            CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(),
            CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(),
            CrossValidationMicroInnerDOA.getAverage());
}

From source file:com.packtpub.mahout.cookbook.chapter01.App.java

License:Apache License

public static void main(String[] args) throws IOException, TasteException, OptionException {
    CreateCsvRatingsFile();// ww  w.j  a  v a 2 s. com

    // create data source (model) - from the csv file
    File ratingsFile = new File(outputFile);
    DataModel model = new FileDataModel(ratingsFile);

    // create a simple recommender on our data
    CachingRecommender cachingRecommender = new CachingRecommender(new SlopeOneRecommender(model));

    // for all users
    for (LongPrimitiveIterator it = model.getUserIDs(); it.hasNext();) {
        long userId = it.nextLong();

        // get the recommendations for the user
        List<RecommendedItem> recommendations = cachingRecommender.recommend(userId, 10);

        // if empty write something
        if (recommendations.size() == 0) {
            System.out.print("User ");
            System.out.print(userId);
            System.out.println(": no recommendations");
        }

        // print the list of recommendations for each
        for (RecommendedItem recommendedItem : recommendations) {
            System.out.print("User ");
            System.out.print(userId);
            System.out.print(": ");
            System.out.println(recommendedItem);
        }
    }
}

From source file:com.webir.popcornsaver.cluster.TreeClusteringRecommender.java

License:Apache License

private void buildClusters() throws TasteException {
    DataModel model = getDataModel();
    int numUsers = model.getNumUsers();
    if (numUsers > 0) {
        List<FastIDSet> newClusters = new ArrayList<FastIDSet>(numUsers);
        // Begin with a cluster for each user:
        LongPrimitiveIterator it = model.getUserIDs();
        while (it.hasNext()) {
            FastIDSet newCluster = new FastIDSet();
            newCluster.add(it.nextLong());
            newClusters.add(newCluster);
        }//ww w.  ja  va  2s . c  o  m
        if (numUsers > 1) {
            findClusters(newClusters);
        }
        topRecsByUserID = computeTopRecsPerUserID(newClusters);
        clustersByUserID = computeClustersPerUserID(newClusters);
        allClusters = newClusters.toArray(new FastIDSet[newClusters.size()]);
    } else {
        topRecsByUserID = new FastByIDMap<List<RecommendedItem>>();
        clustersByUserID = new FastByIDMap<FastIDSet>();
        allClusters = NO_CLUSTERS;
    }
}

From source file:de.unima.dws.webmining.rs.recommender.AvgUserPrefAdaptedUserBasedRecommender.java

License:Apache License

private long[] doMostSimilarUsers(int howMany, TopItems.Estimator<Long> estimator) throws TasteException {
    DataModel model = getDataModel();
    return TopItems.getTopUsers(howMany, model.getUserIDs(), null, estimator);
}

From source file:lib.eval.AbstractRecommenderEvaluator.java

License:Apache License

@Override
public double evaluate(RecommenderBuilder recommenderBuilder, DataModelBuilder dataModelBuilder,
        DataModel dataModel, double trainingPercentage, double evaluationPercentage) throws TasteException {
    Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null");
    Preconditions.checkArgument(dataModel != null, "dataModel is null");
    Preconditions.checkArgument(trainingPercentage >= 0.0 && trainingPercentage <= 1.0,
            "Invalid trainingPercentage: " + trainingPercentage);
    Preconditions.checkArgument(evaluationPercentage >= 0.0 && evaluationPercentage <= 1.0,
            "Invalid evaluationPercentage: " + evaluationPercentage);

    log.info("Beginning evaluation using {} of {}", trainingPercentage, dataModel);

    int numUsers = dataModel.getNumUsers();
    FastByIDMap<PreferenceArray> trainingUsers = new FastByIDMap<PreferenceArray>(
            1 + (int) (evaluationPercentage * numUsers));
    FastByIDMap<PreferenceArray> testUserPrefs = new FastByIDMap<PreferenceArray>(
            1 + (int) (evaluationPercentage * numUsers));

    LongPrimitiveIterator it = dataModel.getUserIDs();
    while (it.hasNext()) {
        long userID = it.nextLong();
        if (random.nextDouble() < evaluationPercentage) {
            processOneUser(trainingPercentage, trainingUsers, testUserPrefs, userID, dataModel);
        } else { //this user will not be used for evaluation so use all its preferences in training
            PreferenceArray trainingPrefs = dataModel.getPreferencesFromUser(userID);
            trainingUsers.put(userID, trainingPrefs);
        }/* w  w  w  . j  a  va2 s . c  o  m*/
    }

    DataModel trainingModel = dataModelBuilder == null ? new GenericDataModel(trainingUsers)
            : dataModelBuilder.buildDataModel(trainingUsers);

    Recommender recommender = recommenderBuilder.buildRecommender(trainingModel);

    double result = getEvaluation(testUserPrefs, recommender);
    log.info("Evaluation result: {}", result);
    return result;
}

From source file:net.recommenders.rival.examples.movielens100k.IterativeCrossValidatedMahoutKNNRecommenderEvaluator.java

License:Apache License

/**
 * Recommends using an UB algorithm.//from   www  .  java  2 s . c om
 *
 * @param nFolds number of folds
 * @param inPath path where training and test models have been stored
 * @param outPath path where recommendation files will be stored
 */
public static void recommend(final int nFolds, final String inPath, final String outPath) {
    for (int i = 0; i < nFolds; i++) {
        org.apache.mahout.cf.taste.model.DataModel trainModel;
        org.apache.mahout.cf.taste.model.DataModel testModel;
        try {
            trainModel = new FileDataModel(new File(inPath + "train_" + i + ".csv"));
            testModel = new FileDataModel(new File(inPath + "test_" + i + ".csv"));
        } catch (IOException e) {
            e.printStackTrace();
            return;
        }

        GenericRecommenderBuilder grb = new GenericRecommenderBuilder();
        String recommenderClass = "org.apache.mahout.cf.taste.impl.recommender.GenericUserBasedRecommender";
        String similarityClass = "org.apache.mahout.cf.taste.impl.similarity.PearsonCorrelationSimilarity";
        int neighborhoodSize = NEIGH_SIZE;
        Recommender recommender = null;
        try {
            recommender = grb.buildRecommender(trainModel, recommenderClass, similarityClass, neighborhoodSize);
        } catch (RecommenderException e) {
            e.printStackTrace();
        }

        String fileName = "recs_" + i + ".csv";

        LongPrimitiveIterator users;
        try {
            users = testModel.getUserIDs();
            boolean createFile = true;
            while (users.hasNext()) {
                long u = users.nextLong();
                assert recommender != null;
                List<RecommendedItem> items = recommender.recommend(u, trainModel.getNumItems());
                RecommenderIO.writeData(u, items, outPath, fileName, !createFile, null);
                createFile = false;
            }
        } catch (TasteException e) {
            e.printStackTrace();
        }
    }
}

From source file:net.recommenders.rival.examples.movielens100k.RandomSplitMahoutKNNRecommenderEvaluator.java

License:Apache License

/**
 * Recommends using an UB algorithm./*from   ww w.j  a v a 2  s . c  o  m*/
 *
 * @param inPath path where training and test models have been stored
 * @param outPath path where recommendation files will be stored
 */
public static void recommend(final String inPath, final String outPath) {
    int i = 0;
    org.apache.mahout.cf.taste.model.DataModel trainModel = null;
    org.apache.mahout.cf.taste.model.DataModel testModel = null;
    try {
        trainModel = new FileDataModel(new File(inPath + "train_" + i + ".csv"));
        testModel = new FileDataModel(new File(inPath + "test_" + i + ".csv"));
    } catch (IOException e) {
        e.printStackTrace();
        return;
    }

    GenericRecommenderBuilder grb = new GenericRecommenderBuilder();
    String recommenderClass = "org.apache.mahout.cf.taste.impl.recommender.GenericUserBasedRecommender";
    String similarityClass = "org.apache.mahout.cf.taste.impl.similarity.PearsonCorrelationSimilarity";
    int neighborhoodSize = NEIGH_SIZE;
    Recommender recommender = null;
    try {
        recommender = grb.buildRecommender(trainModel, recommenderClass, similarityClass, neighborhoodSize);
    } catch (RecommenderException e) {
        e.printStackTrace();
    }

    String fileName = "recs_" + i + ".csv";

    LongPrimitiveIterator users = null;
    try {
        users = testModel.getUserIDs();
        boolean createFile = true;
        while (users.hasNext()) {
            long u = users.nextLong();
            List<RecommendedItem> items = recommender.recommend(u, trainModel.getNumItems());
            RecommenderIO.writeData(u, items, outPath, fileName, !createFile, null);
            createFile = false;
        }
    } catch (TasteException e) {
        e.printStackTrace();
    }
}