List of usage examples for weka.core Instance setValue
public void setValue(Attribute att, String value);
From source file:dataHandlers.DataClusterHandler.java
private Instance toInstance(User user, Instances dataSet) { Instance tempInstance = new Instance(userPoints.numAttributes()); tempInstance.setDataset(userPoints); String userDataString = user.getUserID() + user.getTasteString(LastFMDataHandler.getTagCount()); String[] dataArray = userDataString.split(","); for (int index = 0; index < dataArray.length; index++) { tempInstance.setValue(index, Integer.parseInt(dataArray[index])); }//from w w w . java 2s . c om return tempInstance; }
From source file:dataMining.KMeans.java
/** * Metoda do wyznaczania nowych rodkw grup. * * @return lista zawierajca nowe rodki.//from w ww .jav a2 s . co m */ private ArrayList<Instance> makeNewMeans() { ArrayList<Instance> listOfMeans = new ArrayList<>(); for (Instance i : groups.keySet()) { ArrayList<Instance> list = groups.get(i); double[] tab = new double[i.numAttributes()]; for (Instance in : list) { for (int j = 0; j < tab.length; j++) { double d = 0; try { d = Double.parseDouble(in.toString(j)); } catch (NumberFormatException ex) { d = 0; } tab[j] = tab[j] + d; } } for (int j = 0; j < tab.length; j++) { tab[j] = tab[j] / list.size(); } Instance ins = new Instance(tab.length); for (int j = 0; j < tab.length; j++) { ins.setValue(j, tab[j]); } listOfMeans.add(ins); } return listOfMeans; }
From source file:dataMining.kNN.java
/** * Metoda, w ktrej wyszukiwane jest k najbliszych ssiadw ze zbioru * danych treningowych dla danych testowych, nastpnie proponowana jest * decyzja ktra wystpia na najwikszej liczbie spord k ssiadw. * * @return Zestaw zmienionych danych typu String. *//*from w ww.ja va 2 s .c o m*/ public String reviewData() { String st = ""; int type = 0; numAtt = 0; for (int i = 0; i < testData.numAttributes(); i++) { String s = testData.attribute(i).name(); if (s.equals(attributName)) { numAtt = i; type = testData.attribute(i).type(); break; } } for (int i = 0; i < testData.numInstances(); i++) { try { Instance ins = testData.instance(i); if (type == 0) { ins.setValue(numAtt, selectValue(findNeighbors(ins))); } else { ins.setValue(numAtt, selectValue(findNeighbors(ins))); } st = st + ins.toString() + "\n"; } catch (IndexOutOfBoundsException e) { } } return st; }
From source file:DataMining_FP.interfaz.java
private void Evaluacion_Actividad(String SimonDijo) { String resultado_prediccion = null; try {//w w w.ja v a2s . com int num = 1150;//Un numero cualquiera Instance nueva_entrada = data.instance(num);//Crear una copia //Modificando los valores //Agregar los valores de los bins float[] bins_result_x = bins(maxmin(accelerometer_x_array), accelerometer_x_array); float[] bins_result_y = bins(maxmin(accelerometer_y_array), accelerometer_y_array); float[] bins_result_z = bins(maxmin(accelerometer_z_array), accelerometer_z_array); for (int i = 0; i < 10; i++) { nueva_entrada.setValue(i, bins_result_x[i]);//bins_x nueva_entrada.setValue(i + 10, bins_result_y[i]);//bins_y nueva_entrada.setValue(i + 20, bins_result_z[i]);//bins_z } //AVG nueva_entrada.setValue(30, get_avg(accelerometer_x_array)); nueva_entrada.setValue(31, get_avg(accelerometer_y_array)); nueva_entrada.setValue(32, get_avg(accelerometer_z_array)); //PEAK nueva_entrada.setValue(33, tiempo_entre_picos(accelerometer_x_array, tiempo_de_lecturas)); nueva_entrada.setValue(34, tiempo_entre_picos(accelerometer_y_array, tiempo_de_lecturas)); nueva_entrada.setValue(35, tiempo_entre_picos(accelerometer_z_array, tiempo_de_lecturas)); //ABSOLDEV nueva_entrada.setValue(36, get_avg_absolute_difference(accelerometer_x_array)); nueva_entrada.setValue(37, get_avg_absolute_difference(accelerometer_y_array)); nueva_entrada.setValue(38, get_avg_absolute_difference(accelerometer_z_array)); //STANDDEV nueva_entrada.setValue(39, get_std_deviation(accelerometer_x_array)); nueva_entrada.setValue(40, get_std_deviation(accelerometer_y_array)); nueva_entrada.setValue(41, get_std_deviation(accelerometer_z_array)); //RESULTANT nueva_entrada.setValue(42, get_avg_resultant_acceleration(accelerometer_x_array, accelerometer_y_array, accelerometer_z_array)); double clsLabel = tree.classifyInstance(nueva_entrada); // Clasificando una nueva instancia resultado_prediccion = data.classAttribute().value((int) clsLabel); //Se actualiza lo que predijo en el textfield prediccion.setText(resultado_prediccion); //System.out.println("\n\n\n[resultado = "+resultado_prediccion+"]"); } catch (Exception e) { e.printStackTrace(); System.out.println("Error al predecir la instancia capturada"); } //verificamos si el algoritmo logro predecir la actividad correctamente if (SimonDijo.equals(resultado_prediccion)) {//Predijo correctamente! if (!ult_prediccion)//Si en la ultima vez que jugo no se predijo correctamente solo sumamos un punto. puntos_acumulados += 15; else//se ha predecido bien >= 2 veces puntos_acumulados += 30; } else {//No se predijo bien if ((puntos_acumulados - 4) > 0)//si no tiene puntos acumulados, no se le resta nada, de lo contrario se le quita un punto puntos_acumulados -= 4; else puntos_acumulados = 0; } tf_puntaje.setText(String.valueOf(puntos_acumulados)); }
From source file:de.fub.maps.project.detector.model.inference.processhandler.EvaluationProcessHandler.java
License:Open Source License
protected Instance getInstance(String className, TrackSegment dataset) { Instance instance = new DenseInstance(getInferenceModel().getAttributes().size()); for (FeatureProcess feature : getInferenceModel().getFeatureList()) { feature.setInput(dataset);// w w w .ja va 2 s . c o m feature.run(); String featureName = feature.getName(); Attribute attribute = getInferenceModel().getAttributeMap().get(featureName); Double result = feature.getResult(); instance.setValue(attribute, result); } instance.setValue(getInferenceModel().getAttributeMap().get(AbstractInferenceModel.CLASSES_ATTRIBUTE_NAME), className); return instance; }
From source file:de.fub.maps.project.detector.model.inference.processhandler.InferenceDataProcessHandler.java
License:Open Source License
private Instance getInstance(TrackSegment segment) { Instance instance = new DenseInstance(getInferenceModel().getAttributes().size()); for (FeatureProcess feature : getInferenceModel().getFeatureList()) { feature.setInput(segment);/*from w w w. ja v a 2 s . c o m*/ feature.run(); String featureName = feature.getName(); Attribute attribute = getInferenceModel().getAttributeMap().get(featureName); Double result = feature.getResult(); instance.setValue(attribute, result); } return instance; }
From source file:de.ugoe.cs.cpdp.dataprocessing.MORPH.java
License:Apache License
/** * <p>//www.j av a 2 s. c om * Applies MORPH to a single instance * </p> * * @param instance * instance that is morphed * @param data * data based on which the instance is morphed */ public void morphInstance(Instance instance, Instances data) { Instance nearestUnlikeNeighbor = getNearestUnlikeNeighbor(instance, data); if (nearestUnlikeNeighbor == null) { throw new RuntimeException( "could not find nearest unlike neighbor within the data: " + data.relationName()); } for (int j = 0; j < data.numAttributes(); j++) { if (data.attribute(j) != data.classAttribute() && data.attribute(j).isNumeric()) { double randVal = rand.nextDouble() * (beta - alpha) + alpha; instance.setValue(j, instance.value(j) + randVal * (instance.value(j) - nearestUnlikeNeighbor.value(j))); } } }
From source file:de.ugoe.cs.cpdp.dataprocessing.NormalizationUtil.java
License:Apache License
/** * <p>/*from w ww. j a v a2 s . c o m*/ * Min-Max normalization to scale all data to the interval [0,1] (N1 in Transfer Defect Learning * by Nam et al.). * </p> * * @param data * data that is normalized */ public static void minMax(Instances data) { for (int j = 0; j < data.numAttributes(); j++) { if (data.classIndex() != j) { double min = data.attributeStats(j).numericStats.min; double max = data.attributeStats(j).numericStats.max; for (int i = 0; i < data.numInstances(); i++) { Instance inst = data.instance(i); double newValue = (inst.value(j) - min) / (max - min); inst.setValue(j, newValue); } } } }
From source file:de.ugoe.cs.cpdp.dataprocessing.NormalizationUtil.java
License:Apache License
/** * <p>/*from w w w . ja v a2 s .c om*/ * Internal helper function * </p> */ private static void applyZScore(Instances data, double[] mean, double[] std) { for (int i = 0; i < data.numInstances(); i++) { Instance instance = data.instance(i); for (int j = 0; j < data.numAttributes(); j++) { if (data.classIndex() != j) { instance.setValue(j, instance.value(j) - mean[j] / std[j]); } } } }
From source file:decisiontree.MyC45.java
/** * Splits a dataset according to the values of a numeric attribute. * * @param data the data which is to be split * @param att the attribute to be used for splitting * @return the sets of instances produced by the split *//* w ww . j av a 2s . co m*/ private Instances[] splitData(Instances data, Attribute att, double threshold) { Instances[] splitData = new Instances[2]; for (int i = 0; i < 2; i++) { splitData[i] = new Instances(data, data.numInstances()); } Enumeration instEnum = data.enumerateInstances(); while (instEnum.hasMoreElements()) { Instance inst = (Instance) instEnum.nextElement(); if (inst.value(att) >= threshold) { inst.setValue(att, threshold); splitData[1].add(inst); } else { inst.setValue(att, 0); splitData[0].add(inst); } } for (int i = 0; i < splitData.length; i++) { splitData[i].compactify(); } return splitData; }