List of usage examples for weka.core Instance attribute
public Attribute attribute(int index);
From source file:controller.BothClassificationsServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathBothClassifications = path + "/" + aux + "-bothClassifications.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;/*from w ww .jav a 2 s . co m*/ } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { J48 j48 = new J48(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); j48.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); int correctsDecisionTree = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = j48.classifyInstance(instance); if (correctValue == classification) { correctsDecisionTree++; } } Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(j48, instancesTest); PrintWriter writer = new PrintWriter( new BufferedWriter(new FileWriter(pathBothClassifications, false))); writer.println("?rvore de Deciso\n\n"); writer.println(j48.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); NaiveBayes naiveBayes = new NaiveBayes(); naiveBayes.buildClassifier(instancesTraining); eval = new Evaluation(instancesTraining); eval.evaluateModel(naiveBayes, instancesTest); int correctsNaiveBayes = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = naiveBayes.classifyInstance(instance); if (correctValue == classification) { correctsNaiveBayes++; } } writer.println("Naive Bayes\n\n"); writer.println(naiveBayes.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect("BothClassifications?action=view&correctsDecisionTree=" + correctsDecisionTree + "&correctsNaiveBayes=" + correctsNaiveBayes + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&range=" + range + "&fileName=" + aux + "-bothClassifications.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=decisionTree"); } break; } default: response.sendError(404); } }
From source file:controller.DecisionTreeServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathDecisionTree = path + "/" + aux + "-decisionTree.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;/* w ww . j av a2 s .c om*/ } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { J48 j48 = new J48(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); j48.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); int corrects = 0; int truePositive = 0; int trueNegative = 0; int falsePositive = 0; int falseNegative = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = j48.classifyInstance(instance); if (correctValue == classification) { corrects++; } if (correctValue == 1 && classification == 1) { truePositive++; } if (correctValue == 1 && classification == 0) { falseNegative++; } if (correctValue == 0 && classification == 1) { falsePositive++; } if (correctValue == 0 && classification == 0) { trueNegative++; } } Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(j48, instancesTest); PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(pathDecisionTree, false))); writer.println(j48.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect("DecisionTree?action=view&corrects=" + corrects + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&truePositive=" + truePositive + "&trueNegative=" + trueNegative + "&falsePositive=" + falsePositive + "&falseNegative=" + falseNegative + "&fileName=" + aux + "-decisionTree.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=decisionTree"); } break; } default: response.sendError(404); } }
From source file:controller.NaiveBayesServlet.java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setCharacterEncoding("UTF-8"); String dir = "/data/"; String path = getServletContext().getRealPath(dir); String action = request.getParameter("action"); switch (action) { case "create": { String fileName = request.getParameter("file"); String aux = fileName.substring(0, fileName.indexOf(".")); String pathInput = path + "/" + request.getParameter("file"); String pathTrainingOutput = path + "/" + aux + "-training-arff.txt"; String pathTestOutput = path + "/" + aux + "-test-arff.txt"; String pathNaivebayes = path + "/" + aux + "-naiveBayes.txt"; String name = request.getParameter("name"); int range = Integer.parseInt(request.getParameter("range")); int size = Integer.parseInt(request.getParameter("counter")); String[] columns = new String[size]; String[] types = new String[size]; int[] positions = new int[size]; int counter = 0; for (int i = 0; i < size; i++) { if (request.getParameter("column-" + (i + 1)) != null) { columns[counter] = request.getParameter("column-" + (i + 1)); types[counter] = request.getParameter("type-" + (i + 1)); positions[counter] = Integer.parseInt(request.getParameter("position-" + (i + 1))); counter++;//from ww w . jav a2 s .co m } } FormatFiles.convertTxtToArff(pathInput, pathTrainingOutput, pathTestOutput, name, columns, types, positions, counter, range); try { NaiveBayes naiveBayes = new NaiveBayes(); BufferedReader readerTraining = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTraining = new Instances(readerTraining); instancesTraining.setClassIndex(instancesTraining.numAttributes() - 1); naiveBayes.buildClassifier(instancesTraining); BufferedReader readerTest = new BufferedReader(new FileReader(pathTestOutput)); //BufferedReader readerTest = new BufferedReader(new FileReader(pathTrainingOutput)); Instances instancesTest = new Instances(readerTest); instancesTest.setClassIndex(instancesTest.numAttributes() - 1); Evaluation eval = new Evaluation(instancesTraining); eval.evaluateModel(naiveBayes, instancesTest); int corrects = 0; int truePositive = 0; int trueNegative = 0; int falsePositive = 0; int falseNegative = 0; for (int i = 0; i < instancesTest.size(); i++) { Instance instance = instancesTest.get(i); double correctValue = instance.value(instance.attribute(instancesTest.numAttributes() - 1)); double classification = naiveBayes.classifyInstance(instance); if (correctValue == classification) { corrects++; } if (correctValue == 1 && classification == 1) { truePositive++; } if (correctValue == 1 && classification == 0) { falseNegative++; } if (correctValue == 0 && classification == 1) { falsePositive++; } if (correctValue == 0 && classification == 0) { trueNegative++; } } PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(pathNaivebayes, false))); writer.println(naiveBayes.toString()); writer.println(""); writer.println(""); writer.println("Results"); writer.println(eval.toSummaryString()); writer.close(); response.sendRedirect( "NaiveBayes?action=view&corrects=" + corrects + "&totalTest=" + instancesTest.size() + "&totalTrainig=" + instancesTraining.size() + "&range=" + range + "&truePositive=" + truePositive + "&trueNegative=" + trueNegative + "&falsePositive=" + falsePositive + "&falseNegative=" + falseNegative + "&fileName=" + aux + "-naiveBayes.txt"); } catch (Exception e) { System.out.println(e.getMessage()); response.sendRedirect("Navigation?action=naiveBayes"); } break; } default: response.sendError(404); } }
From source file:core.DatabaseSaverEx.java
License:Open Source License
/** * inserts the given instance into the table. * /*from w w w . jav a 2 s. c om*/ * @param inst the instance to insert * @throws Exception if something goes wrong */ public void writeInstance(Instance inst) throws Exception { StringBuffer insert = new StringBuffer(); insert.append("INSERT INTO "); insert.append(m_tableName); insert.append(" VALUES ( "); if (m_id) { insert.append(m_count); insert.append(", "); m_count++; } for (int j = 0; j < inst.numAttributes(); j++) { if (inst.isMissing(j)) insert.append("NULL"); else { if ((inst.attribute(j)).isDate()) insert.append("'" + m_DateFormat.format((long) inst.value(j)) + "'"); else if ((inst.attribute(j)).isNumeric()) insert.append(inst.value(j)); else { String stringInsert = "'" + inst.stringValue(j) + "'"; if (stringInsert.length() > 2) stringInsert = stringInsert.replaceAll("''", "'"); insert.append(stringInsert); } } if (j != inst.numAttributes() - 1) insert.append(", "); } insert.append(" )"); //System.out.println(insert.toString()); if (m_DataBaseConnection.update(insert.toString()) < 1) { throw new IOException("Tuple cannot be inserted."); } else { m_DataBaseConnection.close(); } }
From source file:couchdb.PrepareData.java
/** * Metoda do zmiany danych przechowywanych w dokumentach JSON na dane * przechowywane w obiekcie typu Instances. * * @return Zbir danych typu Instances.// w ww . j a va 2 s .co m */ public Instances getDataForWeka() { CouchDBService cdbs = new CouchDBService(); ArrayList<String> listOfValues = cdbs.getValues(simpleDocumentList); ParseJSON p = new ParseJSON(); ArrayList<String> listOfSimpleAttributes = p.getAttributes(simpleDocumentList.get(0)); ArrayList<String> listOfComplexAttributes = new ArrayList<>(); FastVector listOfAttributes = getAtributes(simpleDocumentList, listOfValues); Instances instances = new Instances(nameData, listOfAttributes, 0); for (int j = 0; j < simpleDocumentList.size(); j++) { Instance instance = new Instance(listOfAttributes.size()); instances.add(instance); } int k = 0; for (int i = 0; i < instances.numInstances(); i++) { Instance ins = instances.instance(i); for (int j = 0; j < ins.numAttributes(); j++) { String s = listOfValues.get(k); if (s.equals("nn")) { return null; } if (s.equals("")) { k++; j--; continue; } if (ins.attribute(j).type() == 0) { double d = 0; try { d = Double.parseDouble(s); } catch (NumberFormatException ex) { d = 0; } ins.setValue(j, d); } else { ins.setValue(j, s); } k++; } } return instances; }
From source file:de.tudarmstadt.ukp.similarity.experiments.coling2012.util.Evaluator.java
License:Open Source License
public static void runClassifierCV(WekaClassifier wekaClassifier, Dataset dataset) throws Exception { // Set parameters int folds = 10; Classifier baseClassifier = getClassifier(wekaClassifier); // Set up the random number generator long seed = new Date().getTime(); Random random = new Random(seed); // Add IDs to the instances AddID.main(new String[] { "-i", MODELS_DIR + "/" + dataset.toString() + ".arff", "-o", MODELS_DIR + "/" + dataset.toString() + "-plusIDs.arff" }); Instances data = DataSource.read(MODELS_DIR + "/" + dataset.toString() + "-plusIDs.arff"); data.setClassIndex(data.numAttributes() - 1); // Instantiate the Remove filter Remove removeIDFilter = new Remove(); removeIDFilter.setAttributeIndices("first"); // Randomize the data data.randomize(random);// w ww . j av a 2 s . c o m // Perform cross-validation Instances predictedData = null; Evaluation eval = new Evaluation(data); for (int n = 0; n < folds; n++) { Instances train = data.trainCV(folds, n, random); Instances test = data.testCV(folds, n); // Apply log filter // Filter logFilter = new LogFilter(); // logFilter.setInputFormat(train); // train = Filter.useFilter(train, logFilter); // logFilter.setInputFormat(test); // test = Filter.useFilter(test, logFilter); // Copy the classifier Classifier classifier = AbstractClassifier.makeCopy(baseClassifier); // Instantiate the FilteredClassifier FilteredClassifier filteredClassifier = new FilteredClassifier(); filteredClassifier.setFilter(removeIDFilter); filteredClassifier.setClassifier(classifier); // Build the classifier filteredClassifier.buildClassifier(train); // Evaluate eval.evaluateModel(filteredClassifier, test); // Add predictions AddClassification filter = new AddClassification(); filter.setClassifier(filteredClassifier); filter.setOutputClassification(true); filter.setOutputDistribution(false); filter.setOutputErrorFlag(true); filter.setInputFormat(train); Filter.useFilter(train, filter); // trains the classifier Instances pred = Filter.useFilter(test, filter); // performs predictions on test set if (predictedData == null) predictedData = new Instances(pred, 0); for (int j = 0; j < pred.numInstances(); j++) predictedData.add(pred.instance(j)); } // Prepare output classification String[] scores = new String[predictedData.numInstances()]; for (Instance predInst : predictedData) { int id = new Double(predInst.value(predInst.attribute(0))).intValue() - 1; int valueIdx = predictedData.numAttributes() - 2; String value = predInst.stringValue(predInst.attribute(valueIdx)); scores[id] = value; } // Output StringBuilder sb = new StringBuilder(); for (String score : scores) sb.append(score.toString() + LF); FileUtils.writeStringToFile( new File(OUTPUT_DIR + "/" + dataset.toString() + "/" + wekaClassifier.toString() + "/output.csv"), sb.toString()); }
From source file:de.uniheidelberg.cl.swp.mlprocess.AblationTesting.java
License:Apache License
/** * Determines the attribute value for a Instance object and the specified attribute name. * /*from ww w . ja v a2 s . co m*/ * @param inst The instance object from which the value is extracted. * @param featureName The name of the attribute. * @return A double representation of the value used by WEKA. */ private double getAttributeValue(Instance inst, String featureName) { for (int i = 0; i < inst.numAttributes(); i++) { if (inst.attribute(i).name().equals(featureName)) return inst.value(i); } return 0; }
From source file:de.uni_potsdam.hpi.bpt.promnicat.util.WeightedEuclideanDistance.java
License:Open Source License
/** * Calculates the distance between two instances. Offers speed up (if the * distance function class in use supports it) in nearest neighbour search by * taking into account the cutOff or maximum distance. Depending on the * distance function class, post processing of the distances by * postProcessDistances(double []) may be required if this function is used. * * @param first the first instance//from ww w. ja va 2 s . c o m * @param second the second instance * @param cutOffValue If the distance being calculated becomes larger than * cutOffValue then the rest of the calculation is * discarded. * @param stats the performance stats object * @return the distance between the two given instances or * Double.POSITIVE_INFINITY if the distance being * calculated becomes larger than cutOffValue. */ public double distance(Instance first, Instance second, double cutOffValue, PerformanceStats stats) { double distance = 0; int firstI, secondI; int firstNumValues = first.numValues(); int secondNumValues = second.numValues(); int numAttributes = m_Data.numAttributes(); int classIndex = m_Data.classIndex(); double weights = 1; validate(); for (int p1 = 0, p2 = 0; p1 < firstNumValues || p2 < secondNumValues;) { weights += first.attribute(p1).weight(); if (p1 >= firstNumValues) firstI = numAttributes; else firstI = first.index(p1); if (p2 >= secondNumValues) secondI = numAttributes; else secondI = second.index(p2); if (firstI == classIndex) { p1++; continue; } if ((firstI < numAttributes) && !m_ActiveIndices[firstI]) { p1++; continue; } if (secondI == classIndex) { p2++; continue; } if ((secondI < numAttributes) && !m_ActiveIndices[secondI]) { p2++; continue; } double diff; if (firstI == secondI) { diff = difference(firstI, first.valueSparse(p1), second.valueSparse(p2)); p1++; p2++; } else if (firstI > secondI) { diff = difference(secondI, 0, second.valueSparse(p2)); p2++; } else { diff = difference(firstI, first.valueSparse(p1), 0); p1++; } if (stats != null) stats.incrCoordCount(); distance = updateDistance(distance, diff); if (distance > cutOffValue) return Double.POSITIVE_INFINITY; } if (weights > 1) { return distance / (weights - 1); } return distance / weights; }
From source file:dkpro.similarity.experiments.rte.util.Evaluator.java
License:Open Source License
public static void runClassifierCV(WekaClassifier wekaClassifier, Dataset dataset) throws Exception { // Set parameters int folds = 10; Classifier baseClassifier = ClassifierSimilarityMeasure.getClassifier(wekaClassifier); // Set up the random number generator long seed = new Date().getTime(); Random random = new Random(seed); // Add IDs to the instances AddID.main(new String[] { "-i", MODELS_DIR + "/" + dataset.toString() + ".arff", "-o", MODELS_DIR + "/" + dataset.toString() + "-plusIDs.arff" }); Instances data = DataSource.read(MODELS_DIR + "/" + dataset.toString() + "-plusIDs.arff"); data.setClassIndex(data.numAttributes() - 1); // Instantiate the Remove filter Remove removeIDFilter = new Remove(); removeIDFilter.setAttributeIndices("first"); // Randomize the data data.randomize(random);/*w w w . jav a2s .c o m*/ // Perform cross-validation Instances predictedData = null; Evaluation eval = new Evaluation(data); for (int n = 0; n < folds; n++) { Instances train = data.trainCV(folds, n, random); Instances test = data.testCV(folds, n); // Apply log filter // Filter logFilter = new LogFilter(); // logFilter.setInputFormat(train); // train = Filter.useFilter(train, logFilter); // logFilter.setInputFormat(test); // test = Filter.useFilter(test, logFilter); // Copy the classifier Classifier classifier = AbstractClassifier.makeCopy(baseClassifier); // Instantiate the FilteredClassifier FilteredClassifier filteredClassifier = new FilteredClassifier(); filteredClassifier.setFilter(removeIDFilter); filteredClassifier.setClassifier(classifier); // Build the classifier filteredClassifier.buildClassifier(train); // Evaluate eval.evaluateModel(filteredClassifier, test); // Add predictions AddClassification filter = new AddClassification(); filter.setClassifier(classifier); filter.setOutputClassification(true); filter.setOutputDistribution(false); filter.setOutputErrorFlag(true); filter.setInputFormat(train); Filter.useFilter(train, filter); // trains the classifier Instances pred = Filter.useFilter(test, filter); // performs predictions on test set if (predictedData == null) predictedData = new Instances(pred, 0); for (int j = 0; j < pred.numInstances(); j++) predictedData.add(pred.instance(j)); } System.out.println(eval.toSummaryString()); System.out.println(eval.toMatrixString()); // Prepare output scores String[] scores = new String[predictedData.numInstances()]; for (Instance predInst : predictedData) { int id = new Double(predInst.value(predInst.attribute(0))).intValue() - 1; int valueIdx = predictedData.numAttributes() - 2; String value = predInst.stringValue(predInst.attribute(valueIdx)); scores[id] = value; } // Output classifications StringBuilder sb = new StringBuilder(); for (String score : scores) sb.append(score.toString() + LF); FileUtils.writeStringToFile(new File(OUTPUT_DIR + "/" + dataset.toString() + "/" + wekaClassifier.toString() + "/" + dataset.toString() + ".csv"), sb.toString()); // Output prediction arff DataSink.write(OUTPUT_DIR + "/" + dataset.toString() + "/" + wekaClassifier.toString() + "/" + dataset.toString() + ".predicted.arff", predictedData); // Output meta information sb = new StringBuilder(); sb.append(baseClassifier.toString() + LF); sb.append(eval.toSummaryString() + LF); sb.append(eval.toMatrixString() + LF); FileUtils.writeStringToFile(new File(OUTPUT_DIR + "/" + dataset.toString() + "/" + wekaClassifier.toString() + "/" + dataset.toString() + ".meta.txt"), sb.toString()); }
From source file:dkpro.similarity.experiments.sts2013.util.Evaluator.java
License:Open Source License
public static void runLinearRegressionCV(Mode mode, Dataset... datasets) throws Exception { for (Dataset dataset : datasets) { // Set parameters int folds = 10; Classifier baseClassifier = new LinearRegression(); // Set up the random number generator long seed = new Date().getTime(); Random random = new Random(seed); // Add IDs to the instances AddID.main(new String[] { "-i", MODELS_DIR + "/" + mode.toString().toLowerCase() + "/" + dataset.toString() + ".arff", "-o", MODELS_DIR + "/" + mode.toString().toLowerCase() + "/" + dataset.toString() + "-plusIDs.arff" }); Instances data = DataSource.read( MODELS_DIR + "/" + mode.toString().toLowerCase() + "/" + dataset.toString() + "-plusIDs.arff"); data.setClassIndex(data.numAttributes() - 1); // Instantiate the Remove filter Remove removeIDFilter = new Remove(); removeIDFilter.setAttributeIndices("first"); // Randomize the data data.randomize(random);//from w ww . ja v a 2s . c o m // Perform cross-validation Instances predictedData = null; Evaluation eval = new Evaluation(data); for (int n = 0; n < folds; n++) { Instances train = data.trainCV(folds, n, random); Instances test = data.testCV(folds, n); // Apply log filter Filter logFilter = new LogFilter(); logFilter.setInputFormat(train); train = Filter.useFilter(train, logFilter); logFilter.setInputFormat(test); test = Filter.useFilter(test, logFilter); // Copy the classifier Classifier classifier = AbstractClassifier.makeCopy(baseClassifier); // Instantiate the FilteredClassifier FilteredClassifier filteredClassifier = new FilteredClassifier(); filteredClassifier.setFilter(removeIDFilter); filteredClassifier.setClassifier(classifier); // Build the classifier filteredClassifier.buildClassifier(train); // Evaluate eval.evaluateModel(classifier, test); // Add predictions AddClassification filter = new AddClassification(); filter.setClassifier(classifier); filter.setOutputClassification(true); filter.setOutputDistribution(false); filter.setOutputErrorFlag(true); filter.setInputFormat(train); Filter.useFilter(train, filter); // trains the classifier Instances pred = Filter.useFilter(test, filter); // performs predictions on test set if (predictedData == null) { predictedData = new Instances(pred, 0); } for (int j = 0; j < pred.numInstances(); j++) { predictedData.add(pred.instance(j)); } } // Prepare output scores double[] scores = new double[predictedData.numInstances()]; for (Instance predInst : predictedData) { int id = new Double(predInst.value(predInst.attribute(0))).intValue() - 1; int valueIdx = predictedData.numAttributes() - 2; double value = predInst.value(predInst.attribute(valueIdx)); scores[id] = value; // Limit to interval [0;5] if (scores[id] > 5.0) { scores[id] = 5.0; } if (scores[id] < 0.0) { scores[id] = 0.0; } } // Output StringBuilder sb = new StringBuilder(); for (Double score : scores) { sb.append(score.toString() + LF); } FileUtils.writeStringToFile( new File(OUTPUT_DIR + "/" + mode.toString().toLowerCase() + "/" + dataset.toString() + ".csv"), sb.toString()); } }