List of usage examples for org.deeplearning4j.eval Evaluation Evaluation
public Evaluation()
From source file:seqmodel.RNNModel.java
public void evaluate() throws Exception { String dataSetBaseDir = prop.getProperty("docvec.dir"); train = getAMISentenceIterator(dataSetBaseDir + "/train/"); test = getAMISentenceIterator(dataSetBaseDir + "/test/"); System.out.println("Traning num_instances: " + train.numExamples()); System.out.println("Test num_instances: " + test.numExamples()); //+++ DEBUG:/*from w w w .j a va2s . c o m*/ //System.out.println("train:"); //train.reset(); //while (train.hasNext()) { // System.out.println(train.next()); //} //System.out.println("test:"); //test.reset(); //while (test.hasNext()) { // System.out.println(test.next()); //} //--- DEBUG MultiLayerNetwork rnn = buildRNN(train); for (int i = 0; i < NUM_EPOCHS; i++) { System.out.println("Epoch: " + i); rnn.fit(train); Evaluation evaluation = new Evaluation(); while (test.hasNext()) { DataSet t = test.next(); INDArray features = t.getFeatureMatrix(); INDArray lables = t.getLabels(); //INDArray inMask = t.getFeaturesMaskArray(); //INDArray outMask = t.getLabelsMaskArray(); INDArray predicted = null; predicted = rnn.output(features, false/*, inMask, outMask*/); evaluation.evalTimeSeries(lables, predicted/*, outMask*/); } train.reset(); test.reset(); System.out.println(evaluation.stats()); } }
From source file:seqtest.Pair.java
public static void main(String[] args) throws Exception { downloadUCIData();/*w w w . j av a2 s .co m*/ // ----- Load the training data ----- //Note that we have 450 training files for features: train/features/0.csv through train/features/449.csv SequenceRecordReader trainFeatures = new CSVSequenceRecordReader(); trainFeatures .initialize(new NumberedFileInputSplit(featuresDirTrain.getAbsolutePath() + "/%d.csv", 0, 449)); SequenceRecordReader trainLabels = new CSVSequenceRecordReader(); trainLabels.initialize(new NumberedFileInputSplit(labelsDirTrain.getAbsolutePath() + "/%d.csv", 0, 449)); int miniBatchSize = 10; int numLabelClasses = 6; DataSetIterator trainData = new SequenceRecordReaderDataSetIterator(trainFeatures, trainLabels, miniBatchSize, numLabelClasses, false, SequenceRecordReaderDataSetIterator.AlignmentMode.ALIGN_END); //Normalize the training data //DataNormalization normalizer = new NormalizerStandardize(); //normalizer.fit(trainData); //Collect training data statistics //trainData.reset(); //Use previously collected statistics to normalize on-the-fly. Each DataSet returned by 'trainData' iterator will be normalized //trainData.setPreProcessor(normalizer); // ----- Load the test data ----- //Same process as for the training data. SequenceRecordReader testFeatures = new CSVSequenceRecordReader(); testFeatures.initialize(new NumberedFileInputSplit(featuresDirTest.getAbsolutePath() + "/%d.csv", 0, 149)); SequenceRecordReader testLabels = new CSVSequenceRecordReader(); testLabels.initialize(new NumberedFileInputSplit(labelsDirTest.getAbsolutePath() + "/%d.csv", 0, 149)); DataSetIterator testData = new SequenceRecordReaderDataSetIterator(testFeatures, testLabels, miniBatchSize, numLabelClasses, false, SequenceRecordReaderDataSetIterator.AlignmentMode.ALIGN_END); //testData.setPreProcessor(normalizer); //Note that we are using the exact same normalization process as the training data // ----- Configure the network ----- MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) //Random number generator seed for improved repeatability. Optional. .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1) .weightInit(WeightInit.XAVIER).updater(Updater.NESTEROVS).momentum(0.9).learningRate(0.005) .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) //Not always required, but helps with this data set .gradientNormalizationThreshold(0.5).list(2) .layer(0, new GravesLSTM.Builder().activation("tanh").nIn(1).nOut(10).build()) .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation("softmax") .nIn(10).nOut(numLabelClasses).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.setListeners(new ScoreIterationListener(20)); //Print the score (loss function value) every 20 iterations // ----- Train the network, evaluating the test set performance at each epoch ----- int nEpochs = 40; String str = "Test set evaluation at epoch %d: Accuracy = %.2f, F1 = %.2f"; for (int i = 0; i < nEpochs; i++) { net.fit(trainData); //Evaluate on the test set: Evaluation evaluation = new Evaluation(); while (testData.hasNext()) { DataSet t = testData.next(); INDArray features = t.getFeatureMatrix(); INDArray lables = t.getLabels(); INDArray inMask = t.getFeaturesMaskArray(); INDArray outMask = t.getLabelsMaskArray(); INDArray predicted = net.output(features, false, inMask, outMask); evaluation.evalTimeSeries(lables, predicted, outMask); } System.out.println(String.format(str, i, evaluation.accuracy(), evaluation.f1())); testData.reset(); trainData.reset(); } System.out.println("----- Example Complete -----"); }
From source file:stratego.neural.net.OneLayerNetwork.java
public List<NamedDataSet> train(DataSet data, double ratio, int numEpochs) { data.shuffle(); // shuffles the data, reducing the chance of bias when splitting the dataset SplitTestAndTrain testAndTrain = data.splitTestAndTrain(ratio); // generates an object for splitting the data DataSet trainingData = testAndTrain.getTrain(); // stores the training data DataSet testData = testAndTrain.getTest(); // stores the test data //For the network to perform optimally, the data needs to be normalized DataNormalization normalizer = new NormalizerStandardize(); normalizer.fit(trainingData); // collecting statistics from the input data, not modifying the input data normalizer.transform(trainingData); // applying normalization to the training data normalizer.transform(testData); // applying the training data normalization to the test data (since the training data is bigger, better statistics, better normalization //Creating arrays to store the accuracy during training in double[] trainAccuracy = new double[numEpochs]; double[] testAccuracy = new double[numEpochs]; Evaluation eval = new Evaluation(); // creating the Evaluation object, allowing easy access to evaluation statistics System.out.println("Training network " + name + "....."); //running the training for the given amount of epochs for (int i = 0; i < numEpochs; i++) { System.out.println("======================"); System.out.println(" Epoch " + i); System.out.println("======================"); model.fit(trainingData); // training with the training set //Note, a DataSet contains the input variables as well as the classification values. getFeatureMatrix only the input variables in an INDArray object INDArray outputTraining = model.output(trainingData.getFeatureMatrix()); //gets the output (classification) of the network based on the training data input INDArray outputTest = model.output(testData.getFeatureMatrix()); // gets the output (classification) of the network based on the test data input // getLabels retrieves the classification values from a DataSet eval.eval(trainingData.getLabels(), outputTraining); // evaluates the results classified by the network compared to the actual results, on the training data trainAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the training array //If this was the final epoch, write some extra statistics if (i == numEpochs - 1) { /*// w ww .j a va 2 s. co m System.out.println("Evaluate network "+name+"....."); System.out.println("Scores on training data: "); System.out.println(eval.stats()); // shows more elaborately the performance of the network ` */ trainingEvaluation = eval.stats(); } eval.eval(testData.getLabels(), outputTest); // evaluates the results classified by the network compared to the actual results, on the test data testAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the test array //if this was the final epoc, finish writing the extra statistics if (i == numEpochs - 1) { /* System.out.println("Socres on test data: "); System.out.println(eval.stats()); // shows more elaborately the performance of the network */ testEvaluation = eval.stats(); } } List<NamedDataSet> accuracyDataList = new ArrayList<>(); // creaing a list of NameDataSet objects, required for the overfitting plot NamedDataSet trainAccur = new NamedDataSet("Training", trainAccuracy); // creating a NamedDataSet for the training accuracies NamedDataSet testAccur = new NamedDataSet("Test", testAccuracy); // creaing a NamedDataSet for the test accuracies //adding the NamedDataSets to the list accuracyDataList.add(trainAccur); accuracyDataList.add(testAccur); return accuracyDataList; }
From source file:stratego.neural.net.ThreeLayerNetwork.java
public List<NamedDataSet> train(DataSet data, double ratio, int numEpochs) { data.shuffle(); // shuffles the data, reducing the chance of bias when splitting the dataset SplitTestAndTrain testAndTrain = data.splitTestAndTrain(ratio); // generates an object for splitting the data DataSet trainingData = testAndTrain.getTrain(); // stores the training data DataSet testData = testAndTrain.getTest(); // stores the test data //For the network to perform optimally, the data needs to be normalized DataNormalization normalizer = new NormalizerStandardize(); normalizer.fit(trainingData); // collecting statistics from the input data, not modifying the input data normalizer.transform(trainingData); // applying normalization to the training data normalizer.transform(testData); // applying the training data normalization to the test data (since the training data is bigger, better statistics, better normalization //Creating arrays to store the accuracy during training in double[] trainAccuracy = new double[numEpochs]; double[] testAccuracy = new double[numEpochs]; Evaluation eval = new Evaluation(); // creating the Evaluation object, allowing easy access to evaluation statistics System.out.println("Training network " + name + "....."); //running the training for the given amount of epochs for (int i = 0; i < numEpochs; i++) { System.out.println("======================"); System.out.println(" Epoch " + i); System.out.println("======================"); model.fit(trainingData); // training with the training set //Note, a DataSet contains the input variables as well as the classification values. getFeatureMatrix only the input variables in an INDArray object INDArray outputTraining = model.output(trainingData.getFeatureMatrix()); //gets the output (classification) of the network based on the training data input INDArray outputTest = model.output(testData.getFeatureMatrix()); // gets the output (classification) of the network based on the test data input // getLabels retrieves the classification values from a DataSet eval.eval(trainingData.getLabels(), outputTraining); // evaluates the results classified by the network compared to the actual results, on the training data trainAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the training array //If this was the final epoch, write some extra statistics if (i == numEpochs - 1) { /*//w w w . ja v a2 s .c o m System.out.println("Evaluate network "+name+"....."); System.out.println("Scores on training data: "); System.out.println(eval.stats()); // shows more elaborately the performance of the network */ trainingEvaluation = eval.stats(); } eval.eval(testData.getLabels(), outputTest); // evaluates the results classified by the network compared to the actual results, on the test data testAccuracy[i] = eval.accuracy(); // stores the accuracy statistic in the test array //if this was the final epoc, finish writing the extra statistics if (i == numEpochs - 1) { /* System.out.println("Socres on test data: "); System.out.println(eval.stats()); // shows more elaborately the performance of the network */ testEvaluation = eval.stats(); } } List<NamedDataSet> accuracyDataList = new ArrayList<>(); // creaing a list of NameDataSet objects, required for the overfitting plot NamedDataSet trainAccur = new NamedDataSet("Training", trainAccuracy); // creating a NamedDataSet for the training accuracies NamedDataSet testAccur = new NamedDataSet("Test", testAccuracy); // creaing a NamedDataSet for the test accuracies //adding the NamedDataSets to the list accuracyDataList.add(trainAccur); accuracyDataList.add(testAccur); return accuracyDataList; }