Example usage for org.deeplearning4j.eval Evaluation accuracy

List of usage examples for org.deeplearning4j.eval Evaluation accuracy

Introduction

In this page you can find the example usage for org.deeplearning4j.eval Evaluation accuracy.

Prototype

public double accuracy() 

Source Link

Document

Accuracy: (TP + TN) / (P + N)

Usage

From source file:org.wso2.carbon.ml.rest.api.neuralNetworks.FeedForwardNetwork.java

License:Open Source License

/**
 * method to createFeedForwardNetwork./*from ww w  . j ava 2s. co  m*/
 * @param seed
 * @param learningRate
 * @param analysisID
 * @param bachSize
 * @param backprop
 * @param hiddenList
 * @param inputLayerNodes
 * @param iterations
 * @param versionID
 * @param momentum
 * @param nepoches
 * @param datasetId
 * @param noHiddenLayers
 * @param optimizationAlgorithms
 * @param outputList
 * @param pretrain
 * @param updater
 * @return an String object with evaluation result.
 */
public String createFeedForwardNetwork(long seed, double learningRate, int bachSize, double nepoches,
        int iterations, String optimizationAlgorithms, String updater, double momentum, boolean pretrain,
        boolean backprop, int noHiddenLayers, int inputLayerNodes, int datasetId, int versionID, int analysisID,
        List<HiddenLayerDetails> hiddenList, List<OutputLayerDetails> outputList)
        throws IOException, InterruptedException {

    String evaluationDetails = null;
    int numLinesToSkip = 0;
    String delimiter = ",";
    mlDataSet = getDatasetPath(datasetId, versionID);
    analysisFraction = getAnalysisFraction(analysisID);
    analysisResponceVariable = getAnalysisResponseVariable(analysisID);
    responseIndex = getAnalysisResponseVariableIndex(analysisID);
    SplitTestAndTrain splitTestAndTrain;
    DataSet currentDataset;
    DataSet trainingSet = null;
    DataSet testingSet = null;
    INDArray features = null;
    INDArray labels = null;
    INDArray predicted = null;
    Random rnd = new Random();
    int labelIndex = 0;
    int numClasses = 0;
    int fraction = 0;

    //Initialize RecordReader
    RecordReader rr = new CSVRecordReader(numLinesToSkip, delimiter);
    //read the dataset
    rr.initialize(new FileSplit(new File(mlDataSet)));
    labelIndex = responseIndex;
    numClasses = outputList.get(0).outputNodes;

    //Get the fraction to do the spliting data to training and testing
    FileReader fr = new FileReader(mlDataSet);
    LineNumberReader lineNumberReader = new LineNumberReader(fr);
    //Get the total number of lines
    lineNumberReader.skip(Long.MAX_VALUE);
    int lines = lineNumberReader.getLineNumber();

    //handling multiplication of 0 error
    if (analysisFraction == 0) {
        return null;
    }

    //Take floor value to set the numHold of training data
    fraction = ((int) Math.floor(lines * analysisFraction));

    org.nd4j.linalg.dataset.api.iterator.DataSetIterator trainIter = new RecordReaderDataSetIterator(rr, lines,
            labelIndex, numClasses);

    //Create NeuralNetConfiguration object having basic settings.
    NeuralNetConfiguration.ListBuilder neuralNetConfiguration = new NeuralNetConfiguration.Builder().seed(seed)
            .iterations(iterations).optimizationAlgo(mapOptimizationAlgorithm(optimizationAlgorithms))
            .learningRate(learningRate).updater(mapUpdater(updater)).momentum(momentum)
            .list(noHiddenLayers + 1);

    //Add Hidden Layers to the network with unique settings
    for (int i = 0; i < noHiddenLayers; i++) {
        int nInput = 0;
        if (i == 0)
            nInput = inputLayerNodes;
        else
            nInput = hiddenList.get(i - 1).hiddenNodes;

        neuralNetConfiguration.layer(i,
                new DenseLayer.Builder().nIn(nInput).nOut(hiddenList.get(i).hiddenNodes)
                        .weightInit(mapWeightInit(hiddenList.get(i).weightInit))
                        .activation(hiddenList.get(i).activationAlgo).build());
    }

    //Add Output Layers to the network with unique settings
    neuralNetConfiguration.layer(noHiddenLayers,
            new OutputLayer.Builder(mapLossFunction(outputList.get(0).lossFunction))
                    .nIn(hiddenList.get(noHiddenLayers - 1).hiddenNodes).nOut(outputList.get(0).outputNodes)
                    .weightInit(mapWeightInit(outputList.get(0).weightInit))
                    .activation(outputList.get(0).activationAlgo).build());

    //Create MultiLayerConfiguration network
    MultiLayerConfiguration conf = neuralNetConfiguration.pretrain(pretrain).backprop(backprop).build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Collections.singletonList((IterationListener) new ScoreIterationListener(1)));

    while (trainIter.hasNext()) {
        currentDataset = trainIter.next();
        splitTestAndTrain = currentDataset.splitTestAndTrain(fraction, rnd);
        trainingSet = splitTestAndTrain.getTrain();
        testingSet = splitTestAndTrain.getTest();
        features = testingSet.getFeatureMatrix();
        labels = testingSet.getLabels();
    }

    //Train the model with the training data
    for (int n = 0; n < nepoches; n++) {
        model.fit(trainingSet);
    }

    //Do the evaluations of the model including the Accuracy, F1 score etc.
    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputList.get(0).outputNodes);
    predicted = model.output(features, false);

    eval.eval(labels, predicted);

    evaluationDetails = "{\"Accuracy\":\"" + eval.accuracy() + "\", \"Pecision\":\"" + eval.precision()
            + "\",\"Recall\":\"" + eval.recall() + "\",\"F1Score\":\"" + eval.f1() + "\"}";
    return evaluationDetails;

}

From source file:seqtest.Pair.java

public static void main(String[] args) throws Exception {
    downloadUCIData();//from w ww . j  a v  a 2 s  . c o m

    // ----- Load the training data -----
    //Note that we have 450 training files for features: train/features/0.csv through train/features/449.csv
    SequenceRecordReader trainFeatures = new CSVSequenceRecordReader();
    trainFeatures
            .initialize(new NumberedFileInputSplit(featuresDirTrain.getAbsolutePath() + "/%d.csv", 0, 449));
    SequenceRecordReader trainLabels = new CSVSequenceRecordReader();
    trainLabels.initialize(new NumberedFileInputSplit(labelsDirTrain.getAbsolutePath() + "/%d.csv", 0, 449));

    int miniBatchSize = 10;
    int numLabelClasses = 6;
    DataSetIterator trainData = new SequenceRecordReaderDataSetIterator(trainFeatures, trainLabels,
            miniBatchSize, numLabelClasses, false, SequenceRecordReaderDataSetIterator.AlignmentMode.ALIGN_END);

    //Normalize the training data
    //DataNormalization normalizer = new NormalizerStandardize();
    //normalizer.fit(trainData);              //Collect training data statistics
    //trainData.reset();

    //Use previously collected statistics to normalize on-the-fly. Each DataSet returned by 'trainData' iterator will be normalized
    //trainData.setPreProcessor(normalizer);

    // ----- Load the test data -----
    //Same process as for the training data.
    SequenceRecordReader testFeatures = new CSVSequenceRecordReader();
    testFeatures.initialize(new NumberedFileInputSplit(featuresDirTest.getAbsolutePath() + "/%d.csv", 0, 149));
    SequenceRecordReader testLabels = new CSVSequenceRecordReader();
    testLabels.initialize(new NumberedFileInputSplit(labelsDirTest.getAbsolutePath() + "/%d.csv", 0, 149));

    DataSetIterator testData = new SequenceRecordReaderDataSetIterator(testFeatures, testLabels, miniBatchSize,
            numLabelClasses, false, SequenceRecordReaderDataSetIterator.AlignmentMode.ALIGN_END);

    //testData.setPreProcessor(normalizer);   //Note that we are using the exact same normalization process as the training data

    // ----- Configure the network -----
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) //Random number generator seed for improved repeatability. Optional.
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1)
            .weightInit(WeightInit.XAVIER).updater(Updater.NESTEROVS).momentum(0.9).learningRate(0.005)
            .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) //Not always required, but helps with this data set
            .gradientNormalizationThreshold(0.5).list(2)
            .layer(0, new GravesLSTM.Builder().activation("tanh").nIn(1).nOut(10).build())
            .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation("softmax")
                    .nIn(10).nOut(numLabelClasses).build())
            .pretrain(false).backprop(true).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    net.setListeners(new ScoreIterationListener(20)); //Print the score (loss function value) every 20 iterations

    // ----- Train the network, evaluating the test set performance at each epoch -----
    int nEpochs = 40;
    String str = "Test set evaluation at epoch %d: Accuracy = %.2f, F1 = %.2f";
    for (int i = 0; i < nEpochs; i++) {
        net.fit(trainData);

        //Evaluate on the test set:
        Evaluation evaluation = new Evaluation();
        while (testData.hasNext()) {
            DataSet t = testData.next();
            INDArray features = t.getFeatureMatrix();
            INDArray lables = t.getLabels();
            INDArray inMask = t.getFeaturesMaskArray();
            INDArray outMask = t.getLabelsMaskArray();
            INDArray predicted = net.output(features, false, inMask, outMask);

            evaluation.evalTimeSeries(lables, predicted, outMask);
        }

        System.out.println(String.format(str, i, evaluation.accuracy(), evaluation.f1()));

        testData.reset();
        trainData.reset();
    }

    System.out.println("----- Example Complete -----");
}

From source file:stratego.neural.net.NeuralNetTest.java

public static void main(String[] args) throws Exception {
    int numInput = 12; //Setting the number of input neurons
    int numHidden = 50; // SUBJECT TO CHANGE setting the number of hidden layer neurons
    int numOutput = 9; // setting the number of output neurons
    int rngSeed = 123; // setting the RNG seed 
    int batchSize = 150; // SUBJECT TO CHANGE setting the size of the mini-batch        
    int numEpochs = 150; // SUBJECT TO CHANGE setting the number of epochs to run the training for
    int iterations = 10; // SUBJECT TO CHANGE setting the number of iterations
    double learningRate = 0.05; // SUBJECT TO CHANGE the learning rate of the network

    // Reading in the data from a file
    //MIGHT NOT NEED THIS
    /*//from  www .j av  a 2s .  co m
    int numLinesToSkip = 0; // SUBJECT TO CHANGE The amount of lines to be skipped (should be zero if we format our data well)
    String delimiter = ","; // what the data is going to be split on
            
    RecordReader recordReader = new CSVRecordReader(numLinesToSkip,delimiter);
    recordReader.initialize(new FileSplit(new ClassPathResource(data).getFile())); // NOTE UPDATE "data.txt" to where the actual file is, and it's name!
    */

    int labelIndex = 12; // SUBJECT TO CHANGE: The index of where the label will be (The label is what the outcome should be)
    String data = "src/Data/test_data_1.csv"; // SUBJECT TO CHANGE the location of our data

    DataSet allData = readCSVDataset(data, batchSize, labelIndex, numOutput);

    allData.shuffle();
    double ratio = 0.9; // SUBJECT TO CHANGE the percentage of data to be used for training (now set to 80%)

    SplitTestAndTrain testAndTrain = allData.splitTestAndTrain(ratio);

    DataSet trainingData = testAndTrain.getTrain();
    DataSet testData = testAndTrain.getTest();

    //Normalizing our data (giving us mean 0, unit variance):
    DataNormalization normalizer = new NormalizerStandardize();
    normalizer.fit(trainingData); // collect the statistics from the training data. This does not modify the input data
    normalizer.transform(trainingData); // Apply normalization to the training data
    normalizer.transform(testData); // Apply the normalization to the test data, using the statistics from the training set (which is bigger so should be the same or better)

    //Building the neural network  NOTE: the format of the network is different across examples, might want to try a bunch of them out or research what vague things do
    System.out.println("Build Model....");
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(rngSeed).iterations(iterations)
            .learningRate(learningRate).updater(Updater.NESTEROVS).momentum(0.9) // Not exactly sure what this does, might want to leave it out or properly research this
            .regularization(true).l2(1e-4) // applying L2 regularizations to work against overfitting (that's an "l" not a one)
            .list().layer(0, new DenseLayer.Builder().nIn(numInput).nOut(numHidden).activation("relu") // again, not entirely sure what relu is, but this is the activation fucntion (might need to research)
                    .weightInit(WeightInit.XAVIER).build())
            .layer(1,
                    new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).nIn(numHidden).nOut(numOutput)
                            .activation("softmax") // again, but vague, but I have some idea what a softmax function is (S function, 1/(1+e^-x))
                            .weightInit(WeightInit.XAVIER).build())
            .pretrain(false).backprop(true) // of course we're using backpropagation!
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(new ScoreIterationListener(1)); // Listens to the score every iteration (might want to raise this value if we start training on large datasets

    double[] trainAccuracies = new double[numEpochs];
    double[] testAccuracies = new double[numEpochs];
    Evaluation eval = new Evaluation(numOutput);

    // HERE BE BUGS!
    System.out.println("Train model....");

    model.fit(trainingData);

    for (int i = 0; i < numEpochs; i++) { // for the total amount of epochs
        System.out.println("=====================");
        System.out.println("     Epoch " + i);
        System.out.println("=====================");
        model.fit(trainingData);

        INDArray outputTraining = model.output(trainingData.getFeatureMatrix());
        INDArray outputTest = model.output(testData.getFeatureMatrix());

        //Here we want some of the data from the evaluation, so we can make nice plots regarding the accuracy so we can say something about overfitting

        //evaluating on the training data and storing the accuracy in the array
        eval.eval(trainingData.getLabels(), outputTraining);
        trainAccuracies[i] = eval.accuracy();

        //evaluating on the test data and storing the accuracy in the array
        eval.eval(testData.getLabels(), outputTest);
        testAccuracies[i] = eval.accuracy();
    }

    //creating a list for the two accuracy arrays so we can use them for plotting
    List<NamedDataSet> AccuracyData = new ArrayList<>();
    NamedDataSet trainAccurSet = new NamedDataSet("Training", trainAccuracies);
    NamedDataSet testAccurSet = new NamedDataSet("Test", testAccuracies);

    AccuracyData.add(trainAccurSet);
    AccuracyData.add(testAccurSet);

    plotDataSet(AccuracyData);

    //Evaluate the model on the test set

    System.out.println("Evaluate model....");

    INDArray outputTraining = model.output(trainingData.getFeatureMatrix());
    INDArray outputTest = model.output(testData.getFeatureMatrix());
    System.out.println("Scores on training data");
    eval.eval(trainingData.getLabels(), outputTraining);
    System.out.println(eval.stats());
    System.out.println("Scores on test data");
    eval.eval(testData.getLabels(), outputTest);
    System.out.println(eval.stats());

    //Degbug code

    /*
     System.out.println("Test accuracy");
    System.out.print("[");
    for(int i=0; i<testAccuracies.length;i++){
    System.out.print(testAccuracies[i]+" ");
    }
    System.out.print("]");
    System.out.println();
            
     System.out.println("Train accuracy");
    System.out.print("[");
    for(int i=0; i<trainAccuracies.length;i++){
    System.out.print(trainAccuracies[i]+" ");
    }
    System.out.print("]");
    System.out.println();
            
    */

    //This is the predicting bit!

    /*
    double[][] voorspellingData = new double[][]{{4,4,3,4,1,5,4,4,4,2,4,3},{4,4,4,1,4,5,4,4,4,3,2,3}};
    INDArray voorspeldata = Nd4j.create(voorspellingData);
            
    int[] resultaat = model.predict(voorspeldata);
            
    System.out.println("Testje voor het voorspellen");
    System.out.print("[");
    for(int i=0; i<resultaat.length;i++){
    System.out.print(resultaat[i]+" ");
    }
    System.out.print("]");
    System.out.println();
    */

}

From source file:stratego.neural.net.OneLayerNetwork.java

public List<NamedDataSet> train(DataSet data, double ratio, int numEpochs) {

    data.shuffle(); // shuffles the data, reducing the chance of bias when splitting the dataset
    SplitTestAndTrain testAndTrain = data.splitTestAndTrain(ratio); // generates an object for splitting the data

    DataSet trainingData = testAndTrain.getTrain(); // stores the training data
    DataSet testData = testAndTrain.getTest(); // stores the test data

    //For the network to perform optimally, the data needs to be normalized
    DataNormalization normalizer = new NormalizerStandardize();
    normalizer.fit(trainingData); // collecting statistics from the input data, not modifying the input data
    normalizer.transform(trainingData); // applying normalization to the training data
    normalizer.transform(testData); // applying the training data normalization to the test data (since the training data is bigger, better statistics, better normalization

    //Creating arrays to store the accuracy during training in
    double[] trainAccuracy = new double[numEpochs];
    double[] testAccuracy = new double[numEpochs];

    Evaluation eval = new Evaluation(); // creating the Evaluation object, allowing easy access to evaluation statistics

    System.out.println("Training network " + name + ".....");

    //running the training for the given amount of epochs
    for (int i = 0; i < numEpochs; i++) {
        System.out.println("======================");
        System.out.println("      Epoch " + i);
        System.out.println("======================");

        model.fit(trainingData); // training with the training set

        //Note, a DataSet contains the input variables as well as the classification values. getFeatureMatrix only the input variables in an INDArray object
        INDArray outputTraining = model.output(trainingData.getFeatureMatrix()); //gets the output (classification) of the network based on the training data input
        INDArray outputTest = model.output(testData.getFeatureMatrix()); // gets the output (classification) of the network based on the test data input

        // getLabels retrieves the classification values from a DataSet
        eval.eval(trainingData.getLabels(), outputTraining); // evaluates the results classified by the network compared to the actual results, on the training data
        trainAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the training array

        //If this was the final epoch, write some extra statistics
        if (i == numEpochs - 1) {
            /*//from   w w  w  .jav  a 2 s .c o  m
            System.out.println("Evaluate network "+name+".....");
            System.out.println("Scores on training data: ");
            System.out.println(eval.stats()); // shows more elaborately the performance of the network
            `           */
            trainingEvaluation = eval.stats();
        }

        eval.eval(testData.getLabels(), outputTest); // evaluates the results classified by the network compared to the actual results, on the test data
        testAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the test array

        //if this was the final epoc, finish writing the extra statistics
        if (i == numEpochs - 1) {
            /*
            System.out.println("Socres on test data: ");
            System.out.println(eval.stats()); // shows more elaborately the performance of the network    
            */
            testEvaluation = eval.stats();
        }

    }

    List<NamedDataSet> accuracyDataList = new ArrayList<>(); // creaing a list of NameDataSet objects, required for the overfitting plot
    NamedDataSet trainAccur = new NamedDataSet("Training", trainAccuracy); // creating a NamedDataSet for the training accuracies
    NamedDataSet testAccur = new NamedDataSet("Test", testAccuracy); // creaing a NamedDataSet for the test accuracies

    //adding the NamedDataSets to the list
    accuracyDataList.add(trainAccur);
    accuracyDataList.add(testAccur);

    return accuracyDataList;
}

From source file:stratego.neural.net.ThreeLayerNetwork.java

public List<NamedDataSet> train(DataSet data, double ratio, int numEpochs) {

    data.shuffle(); // shuffles the data, reducing the chance of bias when splitting the dataset
    SplitTestAndTrain testAndTrain = data.splitTestAndTrain(ratio); // generates an object for splitting the data

    DataSet trainingData = testAndTrain.getTrain(); // stores the training data
    DataSet testData = testAndTrain.getTest(); // stores the test data

    //For the network to perform optimally, the data needs to be normalized
    DataNormalization normalizer = new NormalizerStandardize();
    normalizer.fit(trainingData); // collecting statistics from the input data, not modifying the input data
    normalizer.transform(trainingData); // applying normalization to the training data
    normalizer.transform(testData); // applying the training data normalization to the test data (since the training data is bigger, better statistics, better normalization

    //Creating arrays to store the accuracy during training in
    double[] trainAccuracy = new double[numEpochs];
    double[] testAccuracy = new double[numEpochs];

    Evaluation eval = new Evaluation(); // creating the Evaluation object, allowing easy access to evaluation statistics

    System.out.println("Training network " + name + ".....");

    //running the training for the given amount of epochs
    for (int i = 0; i < numEpochs; i++) {
        System.out.println("======================");
        System.out.println("      Epoch " + i);
        System.out.println("======================");

        model.fit(trainingData); // training with the training set

        //Note, a DataSet contains the input variables as well as the classification values. getFeatureMatrix only the input variables in an INDArray object
        INDArray outputTraining = model.output(trainingData.getFeatureMatrix()); //gets the output (classification) of the network based on the training data input
        INDArray outputTest = model.output(testData.getFeatureMatrix()); // gets the output (classification) of the network based on the test data input

        // getLabels retrieves the classification values from a DataSet
        eval.eval(trainingData.getLabels(), outputTraining); // evaluates the results classified by the network compared to the actual results, on the training data
        trainAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the training array

        //If this was the final epoch, write some extra statistics
        if (i == numEpochs - 1) {
            /*//from ww  w . j  ava2  s . c o m
            System.out.println("Evaluate network "+name+".....");
            System.out.println("Scores on training data: ");
            System.out.println(eval.stats()); // shows more elaborately the performance of the network
            */
            trainingEvaluation = eval.stats();
        }

        eval.eval(testData.getLabels(), outputTest); // evaluates the results classified by the network compared to the actual results, on the test data
        testAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the test array

        //if this was the final epoc, finish writing the extra statistics
        if (i == numEpochs - 1) {
            /*
            System.out.println("Socres on test data: ");
            System.out.println(eval.stats()); // shows more elaborately the performance of the network
            */
            testEvaluation = eval.stats();
        }

    }

    List<NamedDataSet> accuracyDataList = new ArrayList<>(); // creaing a list of NameDataSet objects, required for the overfitting plot
    NamedDataSet trainAccur = new NamedDataSet("Training", trainAccuracy); // creating a NamedDataSet for the training accuracies
    NamedDataSet testAccur = new NamedDataSet("Test", testAccuracy); // creaing a NamedDataSet for the test accuracies

    //adding the NamedDataSets to the list
    accuracyDataList.add(trainAccur);
    accuracyDataList.add(testAccur);

    return accuracyDataList;
}