Example usage for org.deeplearning4j.eval Evaluation recall

List of usage examples for org.deeplearning4j.eval Evaluation recall

Introduction

In this page you can find the example usage for org.deeplearning4j.eval Evaluation recall.

Prototype

public double recall() 

Source Link

Document

Recall based on guesses so far
Note: value returned will differ depending on number of classes and settings.
1.

Usage

From source file:org.wso2.carbon.ml.rest.api.neuralNetworks.FeedForwardNetwork.java

License:Open Source License

/**
 * method to createFeedForwardNetwork.//from   ww w .j  a  va2 s.  c  om
 * @param seed
 * @param learningRate
 * @param analysisID
 * @param bachSize
 * @param backprop
 * @param hiddenList
 * @param inputLayerNodes
 * @param iterations
 * @param versionID
 * @param momentum
 * @param nepoches
 * @param datasetId
 * @param noHiddenLayers
 * @param optimizationAlgorithms
 * @param outputList
 * @param pretrain
 * @param updater
 * @return an String object with evaluation result.
 */
public String createFeedForwardNetwork(long seed, double learningRate, int bachSize, double nepoches,
        int iterations, String optimizationAlgorithms, String updater, double momentum, boolean pretrain,
        boolean backprop, int noHiddenLayers, int inputLayerNodes, int datasetId, int versionID, int analysisID,
        List<HiddenLayerDetails> hiddenList, List<OutputLayerDetails> outputList)
        throws IOException, InterruptedException {

    String evaluationDetails = null;
    int numLinesToSkip = 0;
    String delimiter = ",";
    mlDataSet = getDatasetPath(datasetId, versionID);
    analysisFraction = getAnalysisFraction(analysisID);
    analysisResponceVariable = getAnalysisResponseVariable(analysisID);
    responseIndex = getAnalysisResponseVariableIndex(analysisID);
    SplitTestAndTrain splitTestAndTrain;
    DataSet currentDataset;
    DataSet trainingSet = null;
    DataSet testingSet = null;
    INDArray features = null;
    INDArray labels = null;
    INDArray predicted = null;
    Random rnd = new Random();
    int labelIndex = 0;
    int numClasses = 0;
    int fraction = 0;

    //Initialize RecordReader
    RecordReader rr = new CSVRecordReader(numLinesToSkip, delimiter);
    //read the dataset
    rr.initialize(new FileSplit(new File(mlDataSet)));
    labelIndex = responseIndex;
    numClasses = outputList.get(0).outputNodes;

    //Get the fraction to do the spliting data to training and testing
    FileReader fr = new FileReader(mlDataSet);
    LineNumberReader lineNumberReader = new LineNumberReader(fr);
    //Get the total number of lines
    lineNumberReader.skip(Long.MAX_VALUE);
    int lines = lineNumberReader.getLineNumber();

    //handling multiplication of 0 error
    if (analysisFraction == 0) {
        return null;
    }

    //Take floor value to set the numHold of training data
    fraction = ((int) Math.floor(lines * analysisFraction));

    org.nd4j.linalg.dataset.api.iterator.DataSetIterator trainIter = new RecordReaderDataSetIterator(rr, lines,
            labelIndex, numClasses);

    //Create NeuralNetConfiguration object having basic settings.
    NeuralNetConfiguration.ListBuilder neuralNetConfiguration = new NeuralNetConfiguration.Builder().seed(seed)
            .iterations(iterations).optimizationAlgo(mapOptimizationAlgorithm(optimizationAlgorithms))
            .learningRate(learningRate).updater(mapUpdater(updater)).momentum(momentum)
            .list(noHiddenLayers + 1);

    //Add Hidden Layers to the network with unique settings
    for (int i = 0; i < noHiddenLayers; i++) {
        int nInput = 0;
        if (i == 0)
            nInput = inputLayerNodes;
        else
            nInput = hiddenList.get(i - 1).hiddenNodes;

        neuralNetConfiguration.layer(i,
                new DenseLayer.Builder().nIn(nInput).nOut(hiddenList.get(i).hiddenNodes)
                        .weightInit(mapWeightInit(hiddenList.get(i).weightInit))
                        .activation(hiddenList.get(i).activationAlgo).build());
    }

    //Add Output Layers to the network with unique settings
    neuralNetConfiguration.layer(noHiddenLayers,
            new OutputLayer.Builder(mapLossFunction(outputList.get(0).lossFunction))
                    .nIn(hiddenList.get(noHiddenLayers - 1).hiddenNodes).nOut(outputList.get(0).outputNodes)
                    .weightInit(mapWeightInit(outputList.get(0).weightInit))
                    .activation(outputList.get(0).activationAlgo).build());

    //Create MultiLayerConfiguration network
    MultiLayerConfiguration conf = neuralNetConfiguration.pretrain(pretrain).backprop(backprop).build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Collections.singletonList((IterationListener) new ScoreIterationListener(1)));

    while (trainIter.hasNext()) {
        currentDataset = trainIter.next();
        splitTestAndTrain = currentDataset.splitTestAndTrain(fraction, rnd);
        trainingSet = splitTestAndTrain.getTrain();
        testingSet = splitTestAndTrain.getTest();
        features = testingSet.getFeatureMatrix();
        labels = testingSet.getLabels();
    }

    //Train the model with the training data
    for (int n = 0; n < nepoches; n++) {
        model.fit(trainingSet);
    }

    //Do the evaluations of the model including the Accuracy, F1 score etc.
    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputList.get(0).outputNodes);
    predicted = model.output(features, false);

    eval.eval(labels, predicted);

    evaluationDetails = "{\"Accuracy\":\"" + eval.accuracy() + "\", \"Pecision\":\"" + eval.precision()
            + "\",\"Recall\":\"" + eval.recall() + "\",\"F1Score\":\"" + eval.f1() + "\"}";
    return evaluationDetails;

}