Example usage for org.apache.commons.math3.stat.descriptive.moment Mean increment

List of usage examples for org.apache.commons.math3.stat.descriptive.moment Mean increment

Introduction

In this page you can find the example usage for org.apache.commons.math3.stat.descriptive.moment Mean increment.

Prototype

@Override
public void increment(final double d) 

Source Link

Document

Note that when #Mean(FirstMoment) is used to create a Mean, this method does nothing.

Usage

From source file:com.itemanalysis.psychometrics.irt.model.Irm4PL.java

/**
 * Mean/sigma linking coefficients are computed from the mean and standard deviation of item difficulty.
 * The summary statistics are computed in a storeless manner. This method allows for the incremental
 * update to item difficulty summary statistics by combining them with other summary statistics.
 *
 * @param mean item difficulty mean.//from   w  w w .  j  av  a2 s  . c om
 * @param sd item difficulty standard deviation.
 */
public void incrementMeanSigma(Mean mean, StandardDeviation sd) {//TODO check for correctness
    mean.increment(difficulty);
    sd.increment(difficulty);
}

From source file:com.itemanalysis.psychometrics.irt.model.Irm4PL.java

/**
 * Mean/mean linking coefficients are computed from the mean item difficulty and mean item discrimination.
 * The summary statistics are computed in a storeless manner. This method allows for the incremental
 * update to item difficulty summary statistics by combining them with other summary statistics.
 *
 * @param meanDiscrimination item discrimination mean.
 * @param meanDifficulty item difficulty mean.
 *//*w  w w . j  a  v  a 2  s  . c om*/
public void incrementMeanMean(Mean meanDiscrimination, Mean meanDifficulty) {//TODO check for correctness
    meanDiscrimination.increment(discrimination);
    meanDifficulty.increment(difficulty);
}

From source file:com.cloudera.oryx.app.serving.als.LoadBenchmark.java

@Test
public void testRecommendLoad() throws Exception {
    AtomicLong count = new AtomicLong();
    Mean meanReqTimeMS = new Mean();
    long start = System.currentTimeMillis();

    int workers = LoadTestALSModelFactory.WORKERS;
    ExecUtils.doInParallel(workers, workers, true, i -> {
        RandomGenerator random = RandomManager.getRandom(Integer.toString(i).hashCode() ^ System.nanoTime());
        for (int j = 0; j < LoadTestALSModelFactory.REQS_PER_WORKER; j++) {
            String userID = "U" + random.nextInt(LoadTestALSModelFactory.USERS);
            long callStart = System.currentTimeMillis();
            target("/recommend/" + userID).request().accept(MediaType.APPLICATION_JSON_TYPE)
                    .get(LIST_ID_VALUE_TYPE);
            long timeMS = System.currentTimeMillis() - callStart;
            synchronized (meanReqTimeMS) {
                meanReqTimeMS.increment(timeMS);
            }/*from   w  w w . j  a  v a2 s .c  o  m*/
            long currentCount = count.incrementAndGet();
            if (currentCount % 100 == 0) {
                log(currentCount, meanReqTimeMS, start);
            }
        }
    });

    int totalRequests = workers * LoadTestALSModelFactory.REQS_PER_WORKER;
    log(totalRequests, meanReqTimeMS, start);
}

From source file:com.itemanalysis.psychometrics.irt.model.IrmGPCM.java

public void incrementMeanSigma(Mean mean, StandardDeviation sd) {
    for (int i = 1; i < ncat; i++) {//Start at 1 because first step is fixed to zero. Do not count it here.
        mean.increment(step[i]);
        sd.increment(step[i]);/* w  w w  .  ja  v a  2  s  .  c o  m*/
    }
}

From source file:com.cloudera.oryx.app.serving.als.model.ALSServingModelTest.java

@Test
public void testLSHEffect() {
    RandomGenerator random = RandomManager.getRandom();
    PoissonDistribution itemPerUserDist = new PoissonDistribution(random, 20,
            PoissonDistribution.DEFAULT_EPSILON, PoissonDistribution.DEFAULT_MAX_ITERATIONS);
    int features = 20;
    ALSServingModel mainModel = new ALSServingModel(features, true, 1.0, null);
    ALSServingModel lshModel = new ALSServingModel(features, true, 0.5, null);

    int userItemCount = 20000;
    for (int user = 0; user < userItemCount; user++) {
        String userID = "U" + user;
        float[] vec = VectorMath.randomVectorF(features, random);
        mainModel.setUserVector(userID, vec);
        lshModel.setUserVector(userID, vec);
        int itemsPerUser = itemPerUserDist.sample();
        Collection<String> knownIDs = new ArrayList<>(itemsPerUser);
        for (int i = 0; i < itemsPerUser; i++) {
            knownIDs.add("I" + random.nextInt(userItemCount));
        }/*  w  w  w.ja v  a  2s  .c o  m*/
        mainModel.addKnownItems(userID, knownIDs);
        lshModel.addKnownItems(userID, knownIDs);
    }

    for (int item = 0; item < userItemCount; item++) {
        String itemID = "I" + item;
        float[] vec = VectorMath.randomVectorF(features, random);
        mainModel.setItemVector(itemID, vec);
        lshModel.setItemVector(itemID, vec);
    }

    int numRecs = 10;
    Mean meanMatchLength = new Mean();
    for (int user = 0; user < userItemCount; user++) {
        String userID = "U" + user;
        List<Pair<String, Double>> mainRecs = mainModel
                .topN(new DotsFunction(mainModel.getUserVector(userID)), null, numRecs, null)
                .collect(Collectors.toList());
        List<Pair<String, Double>> lshRecs = lshModel
                .topN(new DotsFunction(lshModel.getUserVector(userID)), null, numRecs, null)
                .collect(Collectors.toList());
        int i = 0;
        while (i < lshRecs.size() && i < mainRecs.size() && lshRecs.get(i).equals(mainRecs.get(i))) {
            i++;
        }
        meanMatchLength.increment(i);
    }
    log.info("Mean matching prefix: {}", meanMatchLength.getResult());
    assertGreaterOrEqual(meanMatchLength.getResult(), 4.0);

    meanMatchLength.clear();
    for (int item = 0; item < userItemCount; item++) {
        String itemID = "I" + item;
        List<Pair<String, Double>> mainRecs = mainModel
                .topN(new CosineAverageFunction(mainModel.getItemVector(itemID)), null, numRecs, null)
                .collect(Collectors.toList());
        List<Pair<String, Double>> lshRecs = lshModel
                .topN(new CosineAverageFunction(lshModel.getItemVector(itemID)), null, numRecs, null)
                .collect(Collectors.toList());
        int i = 0;
        while (i < lshRecs.size() && i < mainRecs.size() && lshRecs.get(i).equals(mainRecs.get(i))) {
            i++;
        }
        meanMatchLength.increment(i);
    }
    log.info("Mean matching prefix: {}", meanMatchLength.getResult());
    assertGreaterOrEqual(meanMatchLength.getResult(), 5.0);
}

From source file:com.cloudera.oryx.als.common.candidate.LocationSensitiveHashIT.java

@Test
public void testLSH() {
    RandomGenerator random = RandomManager.getRandom();

    Mean avgPercentTopRecsConsidered = new Mean();
    Mean avgNDCG = new Mean();
    Mean avgPercentAllItemsConsidered = new Mean();

    for (int iteration = 0; iteration < ITERATIONS; iteration++) {

        LongObjectMap<float[]> Y = new LongObjectMap<>();
        for (int i = 0; i < NUM_ITEMS; i++) {
            Y.put(i, RandomUtils.randomUnitVector(NUM_FEATURES, random));
        }/*from  ww w . ja v a2 s.com*/
        float[] userVec = RandomUtils.randomUnitVector(NUM_FEATURES, random);

        double[] results = doTestRandomVecs(Y, userVec);
        double percentTopRecsConsidered = results[0];
        double ndcg = results[1];
        double percentAllItemsConsidered = results[2];

        log.info("Considered {}% of all candidates, {} nDCG, got {}% recommendations correct",
                100 * percentAllItemsConsidered, ndcg, 100 * percentTopRecsConsidered);

        avgPercentTopRecsConsidered.increment(percentTopRecsConsidered);
        avgNDCG.increment(ndcg);
        avgPercentAllItemsConsidered.increment(percentAllItemsConsidered);
    }

    log.info("{}", avgPercentTopRecsConsidered.getResult());
    log.info("{}", avgNDCG.getResult());
    log.info("{}", avgPercentAllItemsConsidered.getResult());

    assertTrue(avgPercentTopRecsConsidered.getResult() > 0.85);
    assertTrue(avgNDCG.getResult() > 0.85);
    assertTrue(avgPercentAllItemsConsidered.getResult() < 0.1);
}

From source file:net.myrrix.online.candidate.LocationSensitiveHashTest.java

@Test
public void testLSH() {
    System.setProperty("model.lsh.sampleRatio", "0.1");
    System.setProperty("model.lsh.numHashes", "20");
    RandomGenerator random = RandomManager.getRandom();

    Mean avgPercentTopRecsConsidered = new Mean();
    Mean avgNDCG = new Mean();
    Mean avgPercentAllItemsConsidered = new Mean();

    for (int iteration = 0; iteration < ITERATIONS; iteration++) {

        FastByIDMap<float[]> Y = new FastByIDMap<float[]>();
        for (int i = 0; i < NUM_ITEMS; i++) {
            Y.put(i, RandomUtils.randomUnitVector(NUM_FEATURES, random));
        }/*w w  w.j  ava2s.  co  m*/
        float[] userVec = RandomUtils.randomUnitVector(NUM_FEATURES, random);

        double[] results = doTestRandomVecs(Y, userVec);
        double percentTopRecsConsidered = results[0];
        double ndcg = results[1];
        double percentAllItemsConsidered = results[2];

        log.info("Considered {}% of all candidates, {} nDCG, got {}% recommendations correct",
                100 * percentAllItemsConsidered, ndcg, 100 * percentTopRecsConsidered);

        avgPercentTopRecsConsidered.increment(percentTopRecsConsidered);
        avgNDCG.increment(ndcg);
        avgPercentAllItemsConsidered.increment(percentAllItemsConsidered);
    }

    log.info("{}", avgPercentTopRecsConsidered.getResult());
    log.info("{}", avgNDCG.getResult());
    log.info("{}", avgPercentAllItemsConsidered.getResult());

    assertTrue(avgPercentTopRecsConsidered.getResult() > 0.55);
    assertTrue(avgNDCG.getResult() > 0.55);
    assertTrue(avgPercentAllItemsConsidered.getResult() < 0.075);
}

From source file:com.cloudera.oryx.als.common.lsh.LocationSensitiveHashIT.java

@Test
public void testLSH() {
    RandomGenerator random = RandomManager.getRandom();

    Mean avgPercentTopRecsConsidered = new Mean();
    Mean avgNDCG = new Mean();
    Mean avgPercentAllItemsConsidered = new Mean();

    for (int iteration = 0; iteration < ITERATIONS; iteration++) {

        LongObjectMap<float[]> Y = new LongObjectMap<float[]>();
        for (int i = 0; i < NUM_ITEMS; i++) {
            Y.put(i, RandomUtils.randomUnitVector(NUM_FEATURES, random));
        }//from   w ww.j  a  v  a 2  s . c  o  m
        float[] userVec = RandomUtils.randomUnitVector(NUM_FEATURES, random);

        double[] results = doTestRandomVecs(Y, userVec);
        double percentTopRecsConsidered = results[0];
        double ndcg = results[1];
        double percentAllItemsConsidered = results[2];

        log.info("Considered {}% of all candidates, {} nDCG, got {}% recommendations correct",
                100 * percentAllItemsConsidered, ndcg, 100 * percentTopRecsConsidered);

        avgPercentTopRecsConsidered.increment(percentTopRecsConsidered);
        avgNDCG.increment(ndcg);
        avgPercentAllItemsConsidered.increment(percentAllItemsConsidered);
    }

    log.info("{}", avgPercentTopRecsConsidered.getResult());
    log.info("{}", avgNDCG.getResult());
    log.info("{}", avgPercentAllItemsConsidered.getResult());

    assertTrue(avgPercentTopRecsConsidered.getResult() > 0.8);
    assertTrue(avgNDCG.getResult() > 0.8);
    assertTrue(avgPercentAllItemsConsidered.getResult() < 0.09);
}

From source file:com.itemanalysis.jmetrik.graph.nicc.NonparametricCurveAnalysis.java

private void initializeGridPoints() throws SQLException {
    Statement stmt = null;//from   w ww. ja  va2s.c om
    ResultSet rs = null;

    //connect to db
    try {
        Table sqlTable = new Table(tableName.getNameForDatabase());
        SelectQuery select = new SelectQuery();
        select.addColumn(sqlTable, regressorVariable.getName().nameForDatabase());
        stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        rs = stmt.executeQuery(select.toString());

        Min min = new Min();
        Max max = new Max();
        Mean mean = new Mean();
        StandardDeviation sd = new StandardDeviation();

        double value = 0.0;
        while (rs.next()) {
            value = rs.getDouble(regressorVariable.getName().nameForDatabase());
            if (!rs.wasNull()) {
                min.increment(value);
                max.increment(value);
                mean.increment(value);
                sd.increment(value);
            }
            updateProgress();
        }
        rs.close();
        stmt.close();

        //evaluation points
        double sdv = sd.getResult();
        double mn = mean.getResult();
        double lower = mn - 2.5 * sdv;
        double upper = mn + 2.5 * sdv;
        bwAdjustment *= sdv;
        bandwidth = new NonparametricIccBandwidth(sampleSize, bwAdjustment);
        gridPoints = command.getFreeOption("gridpoints").getInteger();
        //            uniformDistributionApproximation = new UniformDistributionApproximation(
        //                    min.getResult(), max.getResult(), gridPoints);
        uniformDistributionApproximation = new UniformDistributionApproximation(lower, upper, gridPoints);

    } catch (SQLException ex) {
        throw ex;
    } finally {
        if (rs != null)
            rs.close();
        if (stmt != null)
            stmt.close();
    }

}

From source file:com.itemanalysis.psychometrics.irt.model.Irm3PL.java

/**
 * Mean/sigma linking coefficients are computed from the mean and standard deviation of item difficulty.
 * The summary statistics are computed in a storeless manner. This method allows for the incremental
 * update to item difficulty summary statistics by combining them with other summary statistics.
 *
 * @param mean item difficulty mean.//from   ww w.  ja v a 2s  .c o  m
 * @param sd item difficulty standard deviation.
 */
public void incrementMeanSigma(Mean mean, StandardDeviation sd) {
    mean.increment(difficulty);
    sd.increment(difficulty);
}