Example usage for weka.core Instances relationName

List of usage examples for weka.core Instances relationName

Introduction

In this page you can find the example usage for weka.core Instances relationName.

Prototype


publicString relationName() 

Source Link

Document

Returns the relation's name.

Usage

From source file:meka.core.MLUtils.java

License:Open Source License

/**
 * Fixes the relation name by adding the "-C" attribute to it if necessary.
 *
 * @param data the dataset to fix/* www .ja  v  a  2 s.  c om*/
 * @param numClassAtts the number of class attributes (0 for none, >0 for attributes at start, <0 for attributes at end)
 */
public static void fixRelationName(Instances data, int numClassAtts) {
    if (data.relationName().indexOf(":") == -1)
        data.setRelationName(data.relationName() + ": -C " + numClassAtts);
}

From source file:meka.core.MLUtils.java

License:Open Source License

/**
 * Prepares the class index of the data.
 * // w w  w .  ja  v a 2s.  c o m
 * @param data the data to prepare
 * @throws Exception if preparation fails
 */
public static void prepareData(Instances data) throws Exception {
    String doptions[] = null;
    try {
        doptions = MLUtils.getDatasetOptions(data);
    } catch (Exception e) {
        throw new Exception("[Error] Failed to Get Options from @Relation Name", e);
    }

    try {
        int c = (Utils.getOptionPos('C', doptions) >= 0) ? Integer.parseInt(Utils.getOption('C', doptions))
                : Integer.parseInt(Utils.getOption('c', doptions));
        // if negative, then invert
        if (c < 0) {
            c = -c;
            data = F.mulan2meka(data, c);
        }
        // set c
        data.setClassIndex(c);
    } catch (Exception e) {
        throw new Exception(
                "Failed to parse options stored in relation name; expected format for relation name:\n"
                        + "  'name: options'\n" + "But found:\n" + "  '" + data.relationName() + "'\n"
                        + "Format example:\n" + "  'Example_Dataset: -C 3 -split-percentage 50'\n"
                        + "'-C 3' specifies the number of target attributes to be 3. See tutorial for more information.",
                e);
    }
}

From source file:meka.experiment.DefaultExperiment.java

License:Open Source License

/**
 * Runs the experiment.//from  ww w  .  ja  va 2 s  . c  o m
 *
 * @return          null if successfully run, otherwise error message
 */
public String run() {
    String result;
    Instances dataset;
    List<EvaluationStatistics> stats;
    boolean incremental;

    debug("pre: run");

    result = null;
    m_Running = true;
    incremental = (m_StatisticsHandler instanceof IncrementalEvaluationStatisticsHandler)
            && (((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).supportsIncrementalUpdate());
    debug("Incremental statistics? " + incremental);

    notifyExecutionStageListeners(ExecutionStageEvent.Stage.RUNNING);

    while (m_DatasetProvider.hasNext()) {
        // next dataset
        debug("pre: next-dataset");
        dataset = m_DatasetProvider.next();
        debug("post: next-dataset");
        if (dataset == null) {
            result = "Failed to obtain next dataset!";
            log(result);
            m_Running = false;
            break;
        }
        log("Using dataset: " + dataset.relationName());

        // iterate classifiers
        for (MultiLabelClassifier classifier : m_Classifiers) {
            // evaluation required?
            if (incremental) {
                if (!((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).requires(classifier,
                        dataset)) {
                    log("Already present, skipping: " + Utils.toCommandLine(classifier) + " --> "
                            + dataset.relationName());
                    List<EvaluationStatistics> priorStats = ((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler)
                            .retrieve(classifier, dataset);
                    m_Statistics.addAll(priorStats);
                    notifyStatisticsNotificationListeners(priorStats);
                    continue;
                }
            }

            try {
                classifier = (MultiLabelClassifier) AbstractClassifier.makeCopy(classifier);
            } catch (Exception e) {
                result = handleException(
                        "Failed to create copy of classifier: " + classifier.getClass().getName(), e);
                log(result);
                m_Running = false;
                break;
            }

            if (m_Running && !m_Stopping) {
                // notify listeners
                notifyIterationNotificationListeners(classifier, dataset);
                log("Using classifier: " + OptionUtils.toCommandLine(classifier));

                // perform evaluation
                debug("pre: evaluator init");
                result = m_Evaluator.initialize();
                debug("post: evaluator init");
                if (result != null) {
                    m_Running = false;
                    break;
                }
                try {
                    debug("pre: evaluator evaluate");
                    stats = m_Evaluator.evaluate(classifier, dataset);
                    debug("post: evaluator evaluate");
                } catch (Exception e) {
                    result = handleException("Failed to evaluate dataset '" + dataset.relationName()
                            + "' with classifier: " + Utils.toCommandLine(classifier), e);
                    log(result);
                    m_Running = false;
                    break;
                }
                if (stats != null) {
                    m_Statistics.addAll(stats);
                    if (incremental)
                        ((IncrementalEvaluationStatisticsHandler) m_StatisticsHandler).append(stats);
                    notifyStatisticsNotificationListeners(stats);
                }
            }

            if (!m_Running || m_Stopping)
                break;
        }
        if (!m_Running || m_Stopping)
            break;
    }

    if (m_Running && !m_Stopping) {
        if (!incremental)
            m_StatisticsHandler.write(m_Statistics);
    }
    if (!m_Running) {
        if (result == null)
            result = "Experiment interrupted!";
        else
            result = "Experiment interrupted: " + result;
    }

    if (result != null)
        log(result);

    m_Running = false;
    m_Stopping = false;

    debug("post: run");

    return result;
}

From source file:meka.experiment.evaluationstatistics.EvaluationStatistics.java

License:Open Source License

/**
 * Extracts the statistics from the Result object.
 *
 * @param classifier    the classifier/*w  w  w . j a  v a2  s. c om*/
 * @param dataset       the dataset
 * @param result        the evaluation
 */
public EvaluationStatistics(MultiLabelClassifier classifier, Instances dataset, Result result) {
    this(classifier, (dataset != null) ? dataset.relationName() : null, result);
}

From source file:meka.experiment.evaluationstatistics.EvaluationStatisticsUtils.java

License:Open Source License

/**
 * Returns all the values of a specific measurement for the specified classifier/dataset combination.
 *
 * @param stats         the stats to inspect
 * @param classifier    the classifier to look for
 * @param dataset       the dataset to look for
 * @param measurement   the measurement to retrieve
 * @return              the values//from   ww w . ja v  a  2  s.  c o m
 */
public static List<Number> measurements(List<EvaluationStatistics> stats, MultiLabelClassifier classifier,
        Instances dataset, String measurement) {
    return measurements(stats, OptionUtils.toCommandLine(classifier), dataset.relationName(), measurement);
}

From source file:meka.experiment.evaluationstatistics.KeyValuePairs.java

License:Open Source License

/**
 * Checks whether the specified combination of classifier and dataset is required for evaluation
 * or already present from previous evaluation.
 *
 * @param classifier    the classifier to check
 * @param dataset       the dataset to check
 * @return              true if it needs evaluating
 *///from ww  w  .j  a  v  a2 s.  co  m
public boolean requires(MultiLabelClassifier classifier, Instances dataset) {
    boolean result;
    String cls;
    String rel;

    result = true;

    cls = Utils.toCommandLine(classifier);
    rel = dataset.relationName();

    for (EvaluationStatistics stat : m_Statistics) {
        if (stat.getCommandLine().equals(cls) && stat.getRelation().equals(rel)) {
            result = false;
            break;
        }
    }

    return result;
}

From source file:meka.experiment.evaluationstatistics.KeyValuePairs.java

License:Open Source License

/**
 * Retrieves the statis for the specified combination of classifier and dataset.
 *
 * @param classifier    the classifier to check
 * @param dataset       the dataset to check
 * @return              the stats, null if not available
 */// w w  w.ja va 2 s  .  co m
public List<EvaluationStatistics> retrieve(MultiLabelClassifier classifier, Instances dataset) {
    List<EvaluationStatistics> result;
    String cls;
    String rel;

    result = new ArrayList<>();

    cls = Utils.toCommandLine(classifier);
    rel = dataset.relationName();

    for (EvaluationStatistics stat : m_Statistics) {
        if (stat.getCommandLine().equals(cls) && stat.getRelation().equals(rel))
            result.add(stat);
    }

    return result;
}

From source file:meka.experiment.evaluators.CrossValidation.java

License:Open Source License

/**
 * Returns the evaluation statistics generated for the dataset (sequential execution).
 *
 * @param classifier    the classifier to evaluate
 * @param dataset       the dataset to evaluate on
 * @return              the statistics//from  w  w w  .j  a v  a2  s  .  com
 */
protected List<EvaluationStatistics> evaluateSequential(MultiLabelClassifier classifier, Instances dataset) {
    List<EvaluationStatistics> result;
    EvaluationStatistics stats;
    Instances train;
    Instances test;
    Result res;
    int i;
    Random rand;
    MultiLabelClassifier current;

    result = new ArrayList<>();
    rand = new Random(m_Seed);
    for (i = 1; i <= m_NumFolds; i++) {
        log("Fold: " + i);
        if (m_PreserveOrder)
            train = dataset.trainCV(m_NumFolds, i - 1);
        else
            train = dataset.trainCV(m_NumFolds, i - 1, rand);
        test = dataset.testCV(m_NumFolds, i - 1);
        try {
            current = (MultiLabelClassifier) OptionUtils.shallowCopy(classifier);
            res = Evaluation.evaluateModel(current, train, test, m_Threshold, m_Verbosity);
            stats = new EvaluationStatistics(classifier, dataset, res);
            stats.put(KEY_FOLD, i);
            result.add(stats);
        } catch (Exception e) {
            handleException("Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: "
                    + Utils.toCommandLine(classifier), e);
            break;
        }

        if (m_Stopped)
            break;
    }

    if (m_Stopped)
        result.clear();

    return result;
}

From source file:meka.experiment.evaluators.CrossValidation.java

License:Open Source License

/**
 * Returns the evaluation statistics generated for the dataset (parallel execution).
 *
 * @param classifier    the classifier to evaluate
 * @param dataset       the dataset to evaluate on
 * @return              the statistics/*w w w  .  j ava2  s.c  o m*/
 */
protected List<EvaluationStatistics> evaluateParallel(final MultiLabelClassifier classifier,
        final Instances dataset) {
    List<EvaluationStatistics> result;
    ArrayList<EvaluatorJob> jobs;
    EvaluatorJob job;
    int i;
    Random rand;

    result = new ArrayList<>();

    debug("pre: create jobs");
    jobs = new ArrayList<>();
    rand = new Random(m_Seed);
    for (i = 1; i <= m_NumFolds; i++) {
        final int index = i;
        final Instances train;
        final Instances test;
        final MultiLabelClassifier current;
        if (m_PreserveOrder)
            train = dataset.trainCV(m_NumFolds, index - 1);
        else
            train = dataset.trainCV(m_NumFolds, index - 1, rand);
        test = dataset.testCV(m_NumFolds, index - 1);
        current = (MultiLabelClassifier) OptionUtils.shallowCopy(classifier);
        job = new EvaluatorJob() {
            protected List<EvaluationStatistics> doCall() throws Exception {
                List<EvaluationStatistics> result = new ArrayList<>();
                log("Executing fold #" + index + "...");
                try {
                    Result res = Evaluation.evaluateModel(current, train, test, m_Threshold, m_Verbosity);
                    EvaluationStatistics stats = new EvaluationStatistics(classifier, dataset, res);
                    stats.put(KEY_FOLD, index);
                    result.add(stats);
                } catch (Exception e) {
                    handleException("Failed to evaluate dataset '" + dataset.relationName()
                            + "' with classifier: " + Utils.toCommandLine(classifier), e);
                }
                log("...finished fold #" + index);
                return result;
            }
        };
        jobs.add(job);
    }
    debug("post: create jobs");

    // execute jobs
    m_Executor = Executors.newFixedThreadPool(m_ActualNumThreads);
    debug("pre: submit");
    try {
        for (i = 0; i < jobs.size(); i++)
            m_Executor.submit(jobs.get(i));
    } catch (RejectedExecutionException e) {
        // ignored
    } catch (Exception e) {
        handleException("Failed to start up jobs", e);
    }
    debug("post: submit");

    debug("pre: shutdown");
    m_Executor.shutdown();
    debug("post: shutdown");

    // wait for threads to finish
    debug("pre: wait");
    while (!m_Executor.isTerminated()) {
        try {
            m_Executor.awaitTermination(100, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            // ignored
        } catch (Exception e) {
            handleException("Failed to await termination", e);
        }
    }
    debug("post: wait");

    // collect results
    debug("pre: collect");
    for (i = 0; i < jobs.size(); i++)
        result.addAll(jobs.get(i).getResult());
    debug("post: collect");

    return result;
}

From source file:meka.experiment.evaluators.PercentageSplit.java

License:Open Source License

/**
 * Returns the evaluation statistics generated for the dataset.
 *
 * @param classifier    the classifier to evaluate
 * @param dataset       the dataset to evaluate on
 * @return              the statistics//from  ww  w  .jav a 2s . c  o  m
 */
@Override
public List<EvaluationStatistics> evaluate(MultiLabelClassifier classifier, Instances dataset) {
    List<EvaluationStatistics> result;
    int trainSize;
    Instances train;
    Instances test;
    Result res;

    result = new ArrayList<>();
    if (!m_PreserveOrder) {
        dataset = new Instances(dataset);
        dataset.randomize(new Random(m_Seed));
    }
    trainSize = (int) (dataset.numInstances() * m_TrainPercentage / 100.0);
    train = new Instances(dataset, 0, trainSize);
    test = new Instances(dataset, trainSize, dataset.numInstances() - trainSize);
    try {
        res = Evaluation.evaluateModel(classifier, train, test, m_Threshold, m_Verbosity);
        result.add(new EvaluationStatistics(classifier, dataset, res));
    } catch (Exception e) {
        handleException("Failed to evaluate dataset '" + dataset.relationName() + "' with classifier: "
                + Utils.toCommandLine(classifier), e);
    }

    if (m_Stopped)
        result.clear();

    return result;
}