Example usage for org.apache.lucene.benchmark.quality.utils SubmissionReport flush

List of usage examples for org.apache.lucene.benchmark.quality.utils SubmissionReport flush

Introduction

In this page you can find the example usage for org.apache.lucene.benchmark.quality.utils SubmissionReport flush.

Prototype

public void flush() 

Source Link

Usage

From source file:de.mpii.microblogtrack.task.offline.QualityBenchmark.java

License:Apache License

/**
 * Run the quality benchmark.// w  ww  .jav a 2  s  . c  o  m
 *
 * @param judge the judge that can tell if a certain result doc is relevant
 * for a certain quality query. If null, no judgements would be made.
 * Usually null for a submission run.
 * @param submitRep submission report is created if non null.
 * @param qualityLog If not null, quality run data would be printed for each
 * query.
 * @return QualityStats of each quality query that was executed.
 * @throws Exception if quality benchmark failed to run.
 */
public QualityStats[] execute(Judge judge, SubmissionReport submitRep, PrintWriter qualityLog)
        throws Exception {
    int nQueries = Math.min(maxQueries, qualityQueries.length);
    QualityStats stats[] = new QualityStats[nQueries];
    for (int i = 0; i < nQueries; i++) {
        QualityQuery qq = qualityQueries[i];
        // generate query
        Query q = qqParser.parse(qq);
        // search with this query 
        long t1 = System.currentTimeMillis();
        TopDocs td = searcher.search(q, maxResults);
        long searchTime = System.currentTimeMillis() - t1;
        //most likely we either submit or judge, but check both 
        if (judge != null) {
            stats[i] = analyzeQueryResults(qq, q, td, judge, qualityLog, searchTime);
        }
        if (submitRep != null) {
            submitRep.report(qq, td, docNameField, searcher);
        }
    }
    if (submitRep != null) {
        submitRep.flush();
    }
    return stats;
}

From source file:de.mpii.microblogtrack.task.offline.QualityBenchmark.java

License:Apache License

public QualityStats[] execute(Judge judge, Query[] qs, SubmissionReport submitRep, PrintWriter qualityLog)
        throws Exception {
    int nQueries = Math.min(maxQueries, qs.length);
    QualityStats stats[] = new QualityStats[nQueries];
    for (int i = 0; i < nQueries; i++) {
        QualityQuery qq = qualityQueries[i];
        // generate query
        Query q = qs[i];/*www.  j  a  v  a 2  s . co  m*/
        // search with this query 
        long t1 = System.currentTimeMillis();
        TopDocs td = searcher.search(q, maxResults);
        long searchTime = System.currentTimeMillis() - t1;
        //most likely we either submit or judge, but check both 
        if (judge != null) {
            stats[i] = analyzeQueryResults(qq, q, td, judge, qualityLog, searchTime);
        }
        if (submitRep != null) {
            submitRep.report(qq, td, docNameField, searcher);
        }
    }
    if (submitRep != null) {
        submitRep.flush();
    }
    return stats;
}

From source file:lucene.ri.QualityBenchmark.java

License:Apache License

/**
 * Run the quality benchmark.// w w w  . j a  v  a2 s .c o m
 * @param judge the judge that can tell if a certain result doc is relevant for a certain quality query. 
 *        If null, no judgements would be made. Usually null for a submission run. 
 * @param submitRep submission report is created if non null.
 * @param qualityLog If not null, quality run data would be printed for each query.
 * @return QualityStats of each quality query that was executed.
 * @throws Exception if quality benchmark failed to run.
 */
public QualityStats[] execute(Judge judge, SubmissionReport submitRep, PrintWriter qualityLog)
        throws Exception {

    int nQueries = Math.min(maxQueries, qualityQueries.length);
    QualityStats stats[] = new QualityStats[nQueries];

    for (int i = 0; i < nQueries; i++) {

        QualityQuery qq = qualityQueries[i];
        // generate query
        Query q = qqParser.parse(qq);
        // search with this query 
        long t1 = System.currentTimeMillis();
        TopDocs td = searcher.search("Fred");
        long searchTime = System.currentTimeMillis() - t1;
        //most likely we either submit or judge, but check both 

        if (judge != null) {
            stats[i] = analyzeQueryResults(qq, q, td, judge, qualityLog, searchTime);
        }
        if (submitRep != null) {
            submitRep.report(qq, td, docNameField, searcher.indexSearcher);//(arg0, arg1, arg2, arg3); report(qq,td,docNameField,searcher);
        }
    }
    if (submitRep != null) {
        submitRep.flush();
    }
    return stats;
}

From source file:uib.percisionRecall.QualityBenchmark.java

License:Apache License

/**
 * Run the quality benchmark.//from w  w  w.j  a  v  a 2 s  . co m
 * @param judge the judge that can tell if a certain result doc is relevant for a certain quality query.
 *        If null, no judgements would be made. Usually null for a submission run.
 * @param submitRep submission report is created if non null.
 * @param qualityLog If not null, quality run data would be printed for each query.
 * @return QualityStats of each quality query that was executed.
 * @throws Exception if quality benchmark failed to run.
 */
public QualityStats[] execute(Judge judge, SubmissionReport submitRep, PrintWriter qualityLog)
        throws Exception {
    int nQueries = Math.min(maxQueries, qualityQueries.length);
    QualityStats stats[] = new QualityStats[nQueries];
    for (int i = 0; i < nQueries; i++) {
        QualityQuery qq = qualityQueries[i];
        // generate query
        Query q = qqParser.parse(qq);
        // search with this query
        long t1 = System.currentTimeMillis();
        TopDocs td = searcher.search(q, null, maxResults);
        long searchTime = System.currentTimeMillis() - t1;
        //most likely we either submit or judge, but check both
        if (judge != null) {
            stats[i] = analyzeQueryResults(qq, q, td, judge, qualityLog, searchTime);
        }
        if (submitRep != null) {
            submitRep.report(qq, td, docNameField, searcher);
        }
    }
    if (submitRep != null) {
        submitRep.flush();
    }
    return stats;
}