Example usage for org.apache.commons.math3.linear RealVector getDimension

List of usage examples for org.apache.commons.math3.linear RealVector getDimension

Introduction

In this page you can find the example usage for org.apache.commons.math3.linear RealVector getDimension.

Prototype

public abstract int getDimension();

Source Link

Document

Returns the size of the vector.

Usage

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorDivision() {
    GaussianFactor quotient = abcFactor.division(abFactor);

    // then/*from  w ww  .  j  a  v a2s . c om*/
    Collection<Variable> newVariables = quotient.getVariables().getVariables();
    assertThat(newVariables).hasSize(3);
    assertThat(newVariables).containsAll(abcFactor.getVariables().getVariables());
    assertThat(newVariables).containsAll(abFactor.getVariables().getVariables());

    // precision matrix
    RealMatrix precisionMatrix = quotient.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(3);

    double[] row = precisionMatrix.getRowVector(0).toArray();
    assertThat(row[0]).isEqualTo(-2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(1).toArray();
    assertThat(row[0]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(4.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(7.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(2).toArray();
    assertThat(row[0]).isEqualTo(10.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector
    RealVector scaledMeanVector = quotient.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(3);

    double[] meanVectorValues = scaledMeanVector.toArray();
    assertThat(meanVectorValues[0]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[1]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[2]).isEqualTo(1.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    assertThat(quotient.getNormalizationConstant()).isEqualTo(6.3d, TestConstants.DOUBLE_VALUE_TOLERANCE);
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testFactorProduct() {
    // when//from  w w w.  java 2 s. com
    GaussianFactor product = acFactor.product(abFactor);

    // then
    Collection<Variable> newVariables = product.getVariables().getVariables();
    assertThat(newVariables).hasSize(3);
    assertThat(newVariables).containsAll(acFactor.getVariables().getVariables());
    assertThat(newVariables).containsAll(abFactor.getVariables().getVariables());

    // precision matrix
    RealMatrix precisionMatrix = product.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(3);

    double[] row = precisionMatrix.getRowVector(0).toArray();
    assertThat(row[0]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(1).toArray();
    assertThat(row[0]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    row = precisionMatrix.getRowVector(2).toArray();
    assertThat(row[0]).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[1]).isEqualTo(0.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(row[2]).isEqualTo(1.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector
    RealVector scaledMeanVector = product.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(3);

    double[] meanVectorValues = scaledMeanVector.toArray();
    assertThat(meanVectorValues[0]).isEqualTo(8.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[1]).isEqualTo(2.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);
    assertThat(meanVectorValues[2]).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    assertThat(product.getNormalizationConstant()).isEqualTo(7.7d, TestConstants.DOUBLE_VALUE_TOLERANCE);

}

From source file:edu.utexas.cs.tactex.BrokerUtilsTest.java

@Test
public void test_rotateWeeklyRecordAndAppendTillEndOfDay() {

    // this test is very similar and is based on 
    // EnergyPredictionTest.testEnergyPredictionOfAboutOneWeek()
    // we moved it here after refactoring a method to BrokersUtils
    ///*w  w w. j  av  a 2s  .  co  m*/
    // initialize an record vector where the value of an 
    // entry is its index
    ArrayRealVector record = new ArrayRealVector(7 * 24);
    for (int i = 0; i < record.getDimension(); ++i) {
        record.setEntry(i, i);
    }

    int currentTimeslot;
    ArrayRealVector expected;
    ArrayRealVector actual;

    currentTimeslot = 7 * 24 - 1;
    expected = new ArrayRealVector(record);
    actual = BrokerUtils.rotateWeeklyRecordAndAppendTillEndOfDay(record, currentTimeslot);
    assertArrayEquals("no rotation at the beginning of week", expected.toArray(), actual.toArray(), 1e-6);

    currentTimeslot = 1 * 24 - 1;
    // actual
    actual = BrokerUtils.rotateWeeklyRecordAndAppendTillEndOfDay(record, currentTimeslot);
    // prepare expected
    RealVector slice1 = record.getSubVector(1 * 24, 7 * 24 - 24);
    expected.setSubVector(0, slice1);
    expected.setSubVector(slice1.getDimension(), record.getSubVector(0, 24));
    assertArrayEquals("end of first day results in days 2,...,7,1", expected.toArray(), actual.toArray(), 1e-6);

    currentTimeslot = 6 * 24 - 1;
    // actual
    actual = BrokerUtils.rotateWeeklyRecordAndAppendTillEndOfDay(record, currentTimeslot);
    // prepare expected
    slice1 = record.getSubVector(6 * 24, 7 * 24 - 6 * 24);
    expected.setSubVector(0, slice1);
    expected.setSubVector(slice1.getDimension(), record.getSubVector(0, 6 * 24));
    assertArrayEquals("end of 6th day results in days 7,1,...,6", expected.toArray(), actual.toArray(), 1e-6);

    currentTimeslot = 0;
    // predict
    actual = BrokerUtils.rotateWeeklyRecordAndAppendTillEndOfDay(record, currentTimeslot);
    // prepare expected
    slice1 = record.getSubVector(1, 7 * 24 - 1);
    expected.setSubVector(0, slice1);
    expected.setSubVector(slice1.getDimension(), record.getSubVector(0, 1));
    expected.append(record.getSubVector(1, 24 - 1));
    assertArrayEquals("if not at day start, appending until the end of day", expected.toArray(),
            actual.toArray(), 1e-6);
}

From source file:com.github.thorbenlindhauer.factor.GaussianFactorTest.java

@Test
public void testValueObservation() {
    GaussianFactor reducedVector = abcFactor.observation(newScope(new ContinuousVariable("A")),
            new double[] { 2.5d });

    // then/*from w ww  .j a  va2s.co m*/
    Collection<Variable> newVariables = reducedVector.getVariables().getVariables();
    assertThat(newVariables).hasSize(2);
    assertThat(newVariables).contains(new ContinuousVariable("B"), new ContinuousVariable("C"));

    //  B     C    A
    //6.0d, 7.0d, 3.0d
    //3.0d, 5.5d, 10.0d
    //4.0d, 6.0d, 3.0d
    //
    // X = {B, C}, Y = {A}

    // precision matrix: K_xx
    RealMatrix precisionMatrix = reducedVector.getPrecisionMatrix();
    assertThat(precisionMatrix.isSquare()).isTrue();
    assertThat(precisionMatrix.getColumnDimension()).isEqualTo(2);

    double precision = precisionMatrix.getEntry(0, 0);
    assertThat(precision).isEqualTo(6.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(0, 1);
    assertThat(precision).isEqualTo(7.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(1, 0);
    assertThat(precision).isEqualTo(3.0d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    precision = precisionMatrix.getEntry(1, 1);
    assertThat(precision).isEqualTo(5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // scaled mean vector: h_x - K_xy * y
    RealVector scaledMeanVector = reducedVector.getScaledMeanVector();
    assertThat(scaledMeanVector.getDimension()).isEqualTo(2);

    double meanValue = scaledMeanVector.getEntry(0);
    assertThat(meanValue).isEqualTo(-5.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    meanValue = scaledMeanVector.getEntry(1);
    assertThat(meanValue).isEqualTo(-23.5d, TestConstants.DOUBLE_VALUE_TOLERANCE);

    // normalization constant: g + h_y * y - 0.5 * (y * K_yy * y)
    //                         8.5 + 7.5 - 9,375
    assertThat(reducedVector.getNormalizationConstant()).isEqualTo(6.625d,
            TestConstants.DOUBLE_VALUE_TOLERANCE);
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.BisquareLinearFit.java

/**
* Calculates the leverages of data points for least squares fitting (assuming equal variances).
* 
* @param indVarValues The values of the independent variable used for the fitting.
* @return a RealVector containing a leverage value for each independent variable value.
*//* w w w  .  ja v a 2  s  . c  o  m*/
protected RealVector calculateLeverages(RealVector indVarValues) {

    RealMatrix indVarMatrix = null;

    if (this.noIntercept) {
        indVarMatrix = new Array2DRowRealMatrix(indVarValues.getDimension(), 1);
    } else {
        indVarMatrix = new Array2DRowRealMatrix(indVarValues.getDimension(), 2);
    }

    indVarMatrix.setColumnVector(0, indVarValues);

    if (!this.noIntercept) {
        indVarMatrix.setColumnVector(1, indVarValues.mapMultiply(0).mapAdd(1));
    }

    RealVector leverages = new ArrayRealVector(indVarValues.getDimension());

    QRDecomposition xQR = new QRDecomposition(indVarMatrix);

    RealMatrix xR = xQR.getR();

    int smallerDim = xR.getRowDimension() < xR.getColumnDimension() ? xR.getRowDimension()
            : xR.getColumnDimension();

    RealMatrix xRSq = xR.getSubMatrix(0, smallerDim - 1, 0, smallerDim - 1);

    QRDecomposition xRQR = new QRDecomposition(xRSq);

    RealMatrix xRInv = xRQR.getSolver().getInverse();

    RealMatrix xxRInv = indVarMatrix.multiply(xRInv);

    for (int i = 0; i < indVarValues.getDimension(); i++) {
        double sum = 0;
        for (int j = 0; j < xxRInv.getColumnDimension(); j++) {
            sum += Math.pow(xxRInv.getEntry(i, j), 2);
        }
        leverages.setEntry(i, sum);
    }

    return leverages;

}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.NelderMeadMinimizer.java

/**
 * Constructs the initial simplex that is the starting point of the optimization given an initial guess at the minimum.
 *
 * This method will attempt to guess the scale of each parameters, but it is preferable to specify this manually using the other form of
 * generateInitialSimplex if any information about these scales is known.
 *
 * @param initialPoint      The initial guess at the minimum, one component per parameter.
 * @return                  A matrix containing p + 1 rows, each of which is one set of p parameters, which specify the simplex.
 *///from   ww w  . j  a v  a2s.  co  m
public RealMatrix generateInitialSimplex(RealVector initialPoint) {

    final double constantScale = 0.1;

    RealVector componentScales = initialPoint.mapMultiply(constantScale);

    //if the initial point has zeros in it, those entries will not be optimized
    //perturb slightly to allow optimization
    for (int i = 0; i < componentScales.getDimension(); i++) {
        if (componentScales.getEntry(i) == 0.0) {
            componentScales.setEntry(i, constantScale);
        }
    }

    return generateInitialSimplex(initialPoint, componentScales);

}

From source file:com.joptimizer.functions.SOCPLogarithmicBarrier.java

/**
 * Create the barrier function for the Phase I.
 * It is an instance of this class for the constraints: 
 * <br>||Ai.x+bi|| < ci.x+di+t, i=1,...,m
 * @see "S.Boyd and L.Vandenberghe, Convex Optimization, 11.6.2"
 *//*  ww w  .ja  v  a2 s  . co  m*/
public BarrierFunction createPhase1BarrierFunction() {

    final int dimPh1 = dim + 1;
    List<SOCPConstraintParameters> socpConstraintParametersPh1List = new ArrayList<SOCPConstraintParameters>();
    SOCPLogarithmicBarrier bfPh1 = new SOCPLogarithmicBarrier(socpConstraintParametersPh1List, dimPh1);

    for (int i = 0; i < socpConstraintParametersList.size(); i++) {
        SOCPConstraintParameters param = socpConstraintParametersList.get(i);
        RealMatrix A = param.getA();
        RealVector b = param.getB();
        RealVector c = param.getC();
        double d = param.getD();

        RealMatrix APh1 = MatrixUtils.createRealMatrix(A.getRowDimension(), dimPh1);
        APh1.setSubMatrix(A.getData(), 0, 0);
        RealVector bPh1 = b;
        RealVector cPh1 = new ArrayRealVector(c.getDimension() + 1);
        cPh1.setSubVector(0, c);
        cPh1.setEntry(c.getDimension(), 1);
        double dPh1 = d;

        SOCPConstraintParameters paramsPh1 = new SOCPConstraintParameters(APh1.getData(), bPh1.toArray(),
                cPh1.toArray(), dPh1);
        socpConstraintParametersPh1List.add(socpConstraintParametersPh1List.size(), paramsPh1);
    }

    return bfPh1;
}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.DifferentialEvolutionMinimizer.java

/**
 * Performs a minimization of a function starting with only parameter bounds; a full population is generated based on these bounds.
 *
 * @param f                     The function to be minimized.
 * @param parameterLowerBounds  The lower bounds of each parameter.  Generated parameter values less than these values will be discarded.
 * @param parameterUpperBounds  The upper bounds of each parameter.  Generated parameter values greater than these values will be discarded.
 * @param populationSize        The size of the population of parameters sets to use for optimization.
 * @param scaleFactor           Factor controlling the scale of crossed over entries during crossover events.
 * @param maxIterations         The maximum number of iterations to allow before returning a result.
 * @param crossoverFrequency    The frequency of crossover from 0 to 1.  At any given parameter, this is the probability of initiating a crossover as well as the probability of ending one after it has started.
 * @param tol                   Relative function value tolerance controlling termination; if the maximal and minimal population values differ by less than this factor times the maximal value, optimization will terminate.
 * @return                      The parameter values at the minimal function value found.
 *///from   w w w  .  ja v a2 s  . c om
public RealVector minimize(ObjectiveFunction f, RealVector parameterLowerBounds,
        RealVector parameterUpperBounds, int populationSize, double scaleFactor, int maxIterations,
        double crossoverFrequency, double tol) {

    int numberOfParameters = parameterLowerBounds.getDimension();

    //generate the initial population

    RealMatrix population = new Array2DRowRealMatrix(populationSize, numberOfParameters);

    for (int i = 0; i < populationSize; i++) {

        for (int j = 0; j < numberOfParameters; j++) {

            population.setEntry(i, j,
                    RandomGenerator.rand()
                            * (parameterUpperBounds.getEntry(j) - parameterLowerBounds.getEntry(j))
                            + parameterLowerBounds.getEntry(j));

        }
    }

    return minimizeWithPopulation(population, f, parameterLowerBounds, parameterUpperBounds, populationSize,
            scaleFactor, maxIterations, crossoverFrequency, tol);

}

From source file:edu.stanford.cfuller.imageanalysistools.fitting.DifferentialEvolutionMinimizer.java

/**
 * Performs a minimization of a function starting with an single initial guess; a full population is generated based on this guess and the parameter bounds.
 *
 * @param f                     The function to be minimized.
 * @param parameterLowerBounds  The lower bounds of each parameter.  Generated parameter values less than these values will be discarded.
 * @param parameterUpperBounds  The upper bounds of each parameter.  Generated parameter values greater than these values will be discarded.
 * @param populationSize        The size of the population of parameters sets to use for optimization.
 * @param scaleFactor           Factor controlling the scale of crossed over entries during crossover events.
 * @param maxIterations         The maximum number of iterations to allow before returning a result.
 * @param crossoverFrequency    The frequency of crossover from 0 to 1.  At any given parameter, this is the probability of initiating a crossover as well as the probability of ending one after it has started.
 * @param tol                   Relative function value tolerance controlling termination; if the maximal and minimal population values differ by less than this factor times the maximal value, optimization will terminate.
 * @param initialGuess          A guess at the value of each parameter at the optimum.
 * @return                      The parameter values at the minimal function value found.
 *//*from w w  w.  j  a v  a  2 s  . c o m*/
public RealVector minimizeWithInitial(ObjectiveFunction f, RealVector parameterLowerBounds,
        RealVector parameterUpperBounds, int populationSize, double scaleFactor, int maxIterations,
        double crossoverFrequency, double tol, RealVector initialGuess) {

    int numberOfParameters = parameterLowerBounds.getDimension();

    //generate the initial population

    RealMatrix population = new Array2DRowRealMatrix(populationSize, numberOfParameters);

    for (int i = 1; i < populationSize; i++) {

        for (int j = 0; j < numberOfParameters; j++) {

            population.setEntry(i, j,
                    RandomGenerator.rand()
                            * (parameterUpperBounds.getEntry(j) - parameterLowerBounds.getEntry(j))
                            + parameterLowerBounds.getEntry(j));

        }
    }

    population.setRowVector(0, initialGuess);

    return minimizeWithPopulation(population, f, parameterLowerBounds, parameterUpperBounds, populationSize,
            scaleFactor, maxIterations, crossoverFrequency, tol);

}

From source file:com.simiacryptus.mindseye.applications.ObjectLocationBase.java

/**
 * Run.//  w  w  w .jav  a  2 s. com
 *
 * @param log the log
 */
public void run(@Nonnull final NotebookOutput log) {
    //    @Nonnull String logName = "cuda_" + log.getName() + ".log";
    //    log.p(log.file((String) null, logName, "GPU Log"));
    //    CudaSystem.addLog(new PrintStream(log.file(logName)));

    ImageClassifierBase classifier = getClassifierNetwork();
    Layer classifyNetwork = classifier.getNetwork();

    ImageClassifierBase locator = getLocatorNetwork();
    Layer locatorNetwork = locator.getNetwork();
    ArtistryUtil.setPrecision((DAGNetwork) classifyNetwork, Precision.Float);
    ArtistryUtil.setPrecision((DAGNetwork) locatorNetwork, Precision.Float);

    Tensor[][] inputData = loadImages_library();
    //    Tensor[][] inputData = loadImage_Caltech101(log);
    double alphaPower = 0.8;

    final AtomicInteger index = new AtomicInteger(0);
    Arrays.stream(inputData).limit(10).forEach(row -> {
        log.h3("Image " + index.getAndIncrement());
        final Tensor img = row[0];
        log.p(log.image(img.toImage(), ""));
        Result classifyResult = classifyNetwork.eval(new MutableResult(row));
        Result locationResult = locatorNetwork.eval(new MutableResult(row));
        Tensor classification = classifyResult.getData().get(0);
        List<CharSequence> categories = classifier.getCategories();
        int[] sortedIndices = IntStream.range(0, categories.size()).mapToObj(x -> x)
                .sorted(Comparator.comparing(i -> -classification.get(i))).mapToInt(x -> x).limit(10).toArray();
        logger.info(Arrays.stream(sortedIndices)
                .mapToObj(
                        i -> String.format("%s: %s = %s%%", i, categories.get(i), classification.get(i) * 100))
                .reduce((a, b) -> a + "\n" + b).orElse(""));
        LinkedHashMap<CharSequence, Tensor> vectors = new LinkedHashMap<>();
        List<CharSequence> predictionList = Arrays.stream(sortedIndices).mapToObj(categories::get)
                .collect(Collectors.toList());
        Arrays.stream(sortedIndices).limit(6).forEach(category -> {
            CharSequence name = categories.get(category);
            log.h3(name);
            Tensor alphaTensor = renderAlpha(alphaPower, img, locationResult, classification, category);
            log.p(log.image(img.toRgbImageAlphaMask(0, 1, 2, alphaTensor), ""));
            vectors.put(name, alphaTensor.unit());
        });

        Tensor avgDetection = vectors.values().stream().reduce((a, b) -> a.add(b)).get()
                .scale(1.0 / vectors.size());
        Array2DRowRealMatrix covarianceMatrix = new Array2DRowRealMatrix(predictionList.size(),
                predictionList.size());
        for (int x = 0; x < predictionList.size(); x++) {
            for (int y = 0; y < predictionList.size(); y++) {
                Tensor l = vectors.get(predictionList.get(x));
                Tensor r = vectors.get(predictionList.get(y));

                covarianceMatrix.setEntry(x, y,
                        null == l || null == r ? 0 : (l.minus(avgDetection)).dot(r.minus(avgDetection)));
            }
        }
        @Nonnull
        final EigenDecomposition decomposition = new EigenDecomposition(covarianceMatrix);

        for (int objectVector = 0; objectVector < 10; objectVector++) {
            log.h3("Eigenobject " + objectVector);
            double eigenvalue = decomposition.getRealEigenvalue(objectVector);
            RealVector eigenvector = decomposition.getEigenvector(objectVector);
            Tensor detectionRegion = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                Tensor tensor = vectors.get(predictionList.get(i));
                return null == tensor ? null : tensor.scale(eigenvector.getEntry(i));
            }).filter(x -> null != x).reduce((a, b) -> a.add(b)).get();
            detectionRegion = detectionRegion.scale(255.0 / detectionRegion.rms());
            CharSequence categorization = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                CharSequence category = predictionList.get(i);
                double component = eigenvector.getEntry(i);
                return String.format("<li>%s = %.4f</li>", category, component);
            }).reduce((a, b) -> a + "" + b).get();
            log.p(String.format("Object Detected: <ol>%s</ol>", categorization));
            log.p("Object Eigenvalue: " + eigenvalue);
            log.p("Object Region: " + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion), ""));
            log.p("Object Region Compliment: "
                    + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion.scale(-1)), ""));
        }

        //      final int[] orderedVectors = IntStream.range(0, 10).mapToObj(x -> x)
        //        .sorted(Comparator.comparing(x -> -decomposition.getRealEigenvalue(x))).mapToInt(x -> x).toArray();
        //      IntStream.range(0, orderedVectors.length)
        //        .mapToObj(i -> {
        //            //double realEigenvalue = decomposition.getRealEigenvalue(orderedVectors[i]);
        //            return decomposition.getEigenvector(orderedVectors[i]).toArray();
        //          }
        //        ).toArray(i -> new double[i][]);

        log.p(String.format(
                "<table><tr><th>Cosine Distance</th>%s</tr>%s</table>", Arrays.stream(sortedIndices).limit(10)
                        .mapToObj(col -> "<th>" + categories.get(col) + "</th>").reduce((a, b) -> a + b).get(),
                Arrays.stream(sortedIndices).limit(10).mapToObj(r -> {
                    return String.format("<tr><td>%s</td>%s</tr>", categories.get(r),
                            Arrays.stream(sortedIndices).limit(10).mapToObj(col -> {
                                Tensor l = vectors.get(categories.get(r));
                                Tensor r2 = vectors.get(categories.get(col));
                                return String.format("<td>%.4f</td>",
                                        (null == l || null == r2) ? 0 : Math.acos(l.dot(r2)));
                            }).reduce((a, b) -> a + b).get());
                }).reduce((a, b) -> a + b).orElse("")));
    });

    log.setFrontMatterProperty("status", "OK");
}