Example usage for org.apache.lucene.document IntPoint newExactQuery

List of usage examples for org.apache.lucene.document IntPoint newExactQuery

Introduction

In this page you can find the example usage for org.apache.lucene.document IntPoint newExactQuery.

Prototype

public static Query newExactQuery(String field, int value) 

Source Link

Document

Create a query for matching an exact integer value.

Usage

From source file:com.b2international.index.lucene.IntIndexField.java

License:Apache License

@Override
public Query toQuery(Integer value) {
    return IntPoint.newExactQuery(fieldName(), value);
}

From source file:com.epam.catgenome.entity.vcf.VcfFilterForm.java

License:Open Source License

private void addAdditionalFilter(BooleanQuery.Builder builder, Map.Entry<String, Object> entry) {
    String key = entry.getKey().toLowerCase();
    if (entry.getValue() instanceof List) {
        addFiltersFromList(builder, entry, key);
    } else if (entry.getValue() instanceof Integer || entry.getValue() instanceof Long) {
        builder.add(IntPoint.newExactQuery(key, (Integer) entry.getValue()), BooleanClause.Occur.MUST);
    } else if (entry.getValue() instanceof Float || entry.getValue() instanceof Double) {
        builder.add(FloatPoint.newExactQuery(key, (Float) entry.getValue()), BooleanClause.Occur.MUST);
    } else {//from  w  w  w  . j  av a 2 s .  c  o  m
        builder.add(new TermQuery(new Term(key, entry.getValue().toString().toLowerCase())),
                BooleanClause.Occur.MUST);
    }
}

From source file:com.epam.catgenome.entity.vcf.VcfFilterForm.java

License:Open Source License

private void tryAddIntegeralFilter(BooleanQuery.Builder builder, Map.Entry<String, Object> entry, String key,
        Object val) {
    if (val instanceof Integer || entry.getValue() instanceof Long) {
        builder.add(IntPoint.newExactQuery(key, (Integer) entry.getValue()), BooleanClause.Occur.MUST);
    }/*w  w w . j  a  v  a 2 s  .c  om*/
}

From source file:lucenesearch.Mallet.java

public void getMalletOutput() throws IOException {
    int hitsPerPage = 10000000;

    String index = new Searcher().getPostIndexPath();
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();

    //booleanQuery.add(new QueryParser("Body", analyzer).parse(""), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;/*from  w ww . j a va2s. c  om*/

    results = searcher.search(booleanQuery.build(), hitsPerPage);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);

    PrintWriter pw = new PrintWriter("./data/mallet.txt");

    StringBuilder sb = new StringBuilder();
    for (int i = start; i < end; i++) {
        System.out.println("Doc " + i);
        Document doc = searcher.doc(hits[i].doc);
        ArrayList<String> res = LuceneUtils.getAnalyzedRemoveHtml(doc.get("Body"));

        int id = Integer.parseInt(doc.get("SId"));
        sb = new StringBuilder();
        sb.append(id);
        sb.append("\t");
        for (String re : res) {
            re = re.replaceAll("\r\n", " ").replaceAll("\n", " ").replaceAll("<.+?>", "").replaceAll(" +", " ")
                    .replaceAll("[^\\x00-\\x7F]", " ").trim();
            sb.append(re).append(" ");
        }
        sb.append("\n");
        pw.print(sb.toString());

    }
    pw.close();

}

From source file:lucenesearch.NGram.java

public void getNGram(int n, int hitPP) throws IOException, ParseException {
    int hitsPerPage = hitPP;

    String index = new Searcher().getPostIndexPath();
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();

    //booleanQuery.add(new QueryParser("Body", analyzer).parse(""), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;//from   ww w. j  a va  2 s .com

    results = searcher.search(booleanQuery.build(), hitsPerPage);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);

    PrintWriter pw = new PrintWriter("./data/grams/" + n + "gram.csv");

    StringBuilder sb = new StringBuilder();
    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        ArrayList<String[]> tmp = getNGrams(doc, new ExtendedDocument(hits[i].doc, reader), n);
        for (String[] ngrams : tmp) {
            sb = new StringBuilder();
            sb.append(doc.get("SId"));
            sb.append(",");
            sb.append(toTabbedStr(ngrams));
            sb.append(",");
            ArrayList<String> tagg = tags.get(Integer.parseInt(doc.get("SId")));
            sb.append(implodeTabbed(tagg));
            sb.append("\n");
            if (tagg.size() > 1)
                pw.print(sb.toString());
        }

    }
    pw.close();
}

From source file:lucenesearch.RelevantPostFinder.java

public void saveRelevantPost() throws SQLException, IOException, ParseException {
    String url = "jdbc:mysql://localhost:3306/sof17";
    String username = "root";
    String password = "root";
    String folderPath = "./data/rel_posts/";
    String dupNotFound = "./data/dup_not_exist.txt";
    int hitsPerPage = 10000;

    System.out.println("Connecting database...");

    Connection conn = DriverManager.getConnection(url, username, password);
    System.out.println("Database connected!");
    Statement stmt = conn.createStatement();
    String query = "select PostId,PostBody,OriginalPostId from java_test_data";
    ResultSet rs = stmt.executeQuery(query);

    String index = new Searcher().getPostIndexPath();

    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    //        searcher.setSimilarity(new BM25Similarity(0.05f, 0.03f)); //!!!!!!!!!!!!!!
    searcher.setSimilarity(new BM25Similarity()); //!!!!!!!!!!!!!!

    Analyzer analyzer = new StandardAnalyzer();

    int cnt = 0;//from w  w w  . j a  v  a  2  s .c  om

    while (rs.next()) {
        System.out.println("Processing post " + (++cnt));

        int postid = rs.getInt("PostId");
        int dupId = rs.getInt("OriginalPostId");
        ArrayList<String> bd = LuceneUtils.getAnalyzedRemoveHtml(rs.getString("PostBody").replace(':', ' '));

        StringBuilder sb = new StringBuilder();
        int j = 0;
        for (String b : bd) {
            if (++j > 600)
                break;
            sb.append(b);
            sb.append(" ");
        }
        String body = sb.toString();

        BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
        booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 1), BooleanClause.Occur.MUST);
        booleanQuery.add(new QueryParser("Tags", analyzer).parse("java"), BooleanClause.Occur.MUST);
        booleanQuery.add(new QueryParser("Body", analyzer).parse(body), BooleanClause.Occur.MUST);

        TopDocs results;
        results = searcher.search(booleanQuery.build(), hitsPerPage);

        ScoreDoc[] hits = results.scoreDocs;

        int numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        int start = 0;
        int end = Math.min(numTotalHits, hitsPerPage);

        PrintWriter out = new PrintWriter(folderPath + postid + ".txt");

        boolean isFound = false;

        for (int i = start; i < end; i++) {
            Document doc = searcher.doc(hits[i].doc);
            int id = Integer.parseInt(doc.get("SId"));
            String s = doc.get("Body");
            if (id == dupId)
                isFound = true;
            out.println(id);
        }
        out.close();

        if (!isFound) {
            System.out.println("Duplicate not found");
            PrintWriter out2 = new PrintWriter(
                    new FileOutputStream(new File(dupNotFound), true /* append = true */));
            out2.println(postid);
            out2.close();
        }

    }
    rs.close();
    stmt.close();
    conn.close();
}

From source file:lucenesearch.TagBodyCount.java

public void calculateWord(String[] bodyTerms, int N) throws IOException, ParseException {
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(new Searcher().getPostIndexPath())));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    HashSet<Integer> found = new HashSet<>();
    HashSet<Integer> self = new HashSet<>();

    System.out.println("Calculating word itself: " + searchTag);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
    booleanQuery.add(new QueryParser("Body", analyzer).parse(searchTag), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;/*  w  w  w. ja  va2s. c om*/
    results = searcher.search(booleanQuery.build(), N);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, N);

    int count = 0;
    int skip = 0;

    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        if (doc.get("SId") == null) {
            skip++;
            continue;
        }

        int id = Integer.parseInt(doc.get("SId"));
        if (this.acceptedAnswers.contains(id)) {
            self.add(id);
            count++;
        }
    }

    System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
    System.out.println("Total skipped Post = " + skip);

    for (String bodyTerm : bodyTerms) {
        System.out.println("Query for: " + bodyTerm);
        booleanQuery = new BooleanQuery.Builder();
        booleanQuery.add(new QueryParser("Body", analyzer).parse(bodyTerm), BooleanClause.Occur.MUST);
        //        booleanQuery.add(new QueryParser("Tags", analyzer).parse(this.searchTag), BooleanClause.Occur.MUST);
        booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

        results = searcher.search(booleanQuery.build(), N);

        hits = results.scoreDocs;

        numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        start = 0;
        end = Math.min(numTotalHits, N);

        count = 0;
        skip = 0;

        for (int i = start; i < end; i++) {
            Document doc = searcher.doc(hits[i].doc);
            if (doc.get("SId") == null) {
                skip++;
                continue;
            }

            int id = Integer.parseInt(doc.get("SId"));
            if (this.acceptedAnswers.contains(id)) {
                found.add(id);
                count++;
            }
        }
        System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
        System.out.println("Total skipped Post = " + skip);
        System.out.println("-----------------");
    }
    System.out.println("Self Count = " + self.size() + "/" + this.acceptedAnswers.size());
    System.out.println("Final Count = " + found.size() + "/" + this.acceptedAnswers.size());

    HashSet<Integer> intersect = new HashSet<>();
    intersect.addAll(self);
    intersect.retainAll(found);
    HashSet<Integer> q_only = new HashSet<>();
    q_only.addAll(self);
    q_only.removeAll(found);
    System.out.println("Retrieved by normal query only," + q_only.size());
    HashSet<Integer> tr_only = new HashSet<>();
    tr_only.addAll(found);
    tr_only.removeAll(self);
    System.out.println("Retrieved by translations only," + tr_only.size());
    System.out.println("Retrieved by both methods," + intersect.size());
    HashSet<Integer> diff = new HashSet<>();
    diff.addAll(acceptedAnswers);
    diff.removeAll(self);
    diff.removeAll(found);
    System.out.println("Retrieved by no method," + diff.size());
}

From source file:lucenesearch.TagBodyCount.java

public void calculateCount(String[] bodyTerms, int N) throws IOException, ParseException {
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(new Searcher().getPostIndexPath())));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    HashSet<Integer> found = new HashSet<>();
    HashSet<Integer> self = new HashSet<>();

    System.out.println("Calculating word itself: " + searchTag);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
    booleanQuery.add(new QueryParser("Body", analyzer).parse(searchTag), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;//from  www. j av  a 2s  .c  o  m
    results = searcher.search(booleanQuery.build(), N);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, N);

    int count = 0;
    int skip = 0;

    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        if (doc.get("SId") == null) {
            skip++;
            continue;
        }

        int id = Integer.parseInt(doc.get("SId"));
        if (this.acceptedAnswers.contains(id)) {
            self.add(id);
            count++;
        }
    }

    System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
    System.out.println("Total skipped Post = " + skip);

    int[] counts = new int[bodyTerms.length];
    int[] accum_counts = new int[bodyTerms.length];
    int cnt = 0;
    for (String bodyTerm : bodyTerms) {
        HashSet<Integer> temp = new HashSet<>();
        System.out.println("Query for: " + bodyTerm);
        booleanQuery = new BooleanQuery.Builder();
        booleanQuery.add(new QueryParser("Body", analyzer).parse(bodyTerm), BooleanClause.Occur.MUST);
        booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

        results = searcher.search(booleanQuery.build(), N);

        hits = results.scoreDocs;

        numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        start = 0;
        end = Math.min(numTotalHits, N);

        count = 0;
        skip = 0;

        for (int i = start; i < end; i++) {
            Document doc = searcher.doc(hits[i].doc);
            if (doc.get("SId") == null) {
                skip++;
                continue;
            }

            int id = Integer.parseInt(doc.get("SId"));
            if (this.acceptedAnswers.contains(id)) {
                temp.add(id);
            }
        }
        HashSet<Integer> temp2 = new HashSet<>();
        temp2.addAll(temp);
        temp.removeAll(found);
        temp.removeAll(self);
        found.addAll(temp2);
        counts[cnt] = temp.size();
        accum_counts[cnt] = cnt == 0 ? temp.size() : accum_counts[cnt - 1] + temp.size();
        cnt++;
        System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
        System.out.println("Total skipped Post = " + skip);
        System.out.println("-----------------");
    }
    System.out.println("-----Final Count-----");
    System.out.println("Self," + ((double) self.size() / acceptedAnswers.size()) * 100);
    for (int i = 0; i < cnt; i++) {
        System.out.println("Tr" + (i + 1) + "," + ((double) counts[i] / acceptedAnswers.size()) * 100);
    }
    System.out.println("-----Final Accum Count-----");
    //        System.out.println("Self,"+((double)self.size()/acceptedAnswers.size())*100);
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println("Tr"+(i+1)+","+((double)accum_counts[i]/acceptedAnswers.size())*100);
    //        }
    System.out.println("Cnt,Method,Value");
    for (int i = 0; i < cnt; i++) {
        System.out.println((i + 1) + "," + "Translation" + ","
                + ((double) accum_counts[i] / acceptedAnswers.size()) * 100);
        System.out
                .println((i + 1) + "," + "self" + "," + ((double) self.size() / acceptedAnswers.size()) * 100);
    }

}

From source file:lucenesearch.TagBodyCount.java

public void calculatePR(String[] bodyTerms, int N) throws IOException, ParseException {
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(new Searcher().getPostIndexPath())));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    HashSet<Integer> found = new HashSet<>();
    HashSet<Integer> total = new HashSet<>();

    System.out.println("Calculating word itself: " + searchTag);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
    booleanQuery.add(new QueryParser("Body", analyzer).parse(searchTag), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;//www  . ja v  a  2s .co m
    results = searcher.search(booleanQuery.build(), N);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, N);

    int count_r = 0;
    int count_n = 0;
    int skip = 0;

    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        if (doc.get("SId") == null) {
            skip++;
            continue;
        }

        int id = Integer.parseInt(doc.get("SId"));

        if (!hasTag(id, mainTag)) {
            continue;
        }

        if (this.acceptedAnswers.contains(id)) {
            found.add(id);
            count_r++;
        } else {
            count_n++;
        }
        total.add(id);
    }

    System.out.println("Total Post Cnt = " + count_r + "/" + this.acceptedAnswers.size());
    System.out.println("Total skipped Post = " + skip);

    double[] P = new double[bodyTerms.length + 1];
    double[] R = new double[bodyTerms.length + 1];
    int cnt = 0;
    P[cnt] = (double) (count_r) / (count_r + count_n);
    R[cnt] = (double) count_r / (acceptedAnswers.size());
    cnt++;

    for (String bodyTerm : bodyTerms) {
        HashSet<Integer> temp = new HashSet<>();
        System.out.println("Query for: " + bodyTerm);
        booleanQuery = new BooleanQuery.Builder();
        booleanQuery.add(new QueryParser("Body", analyzer).parse(bodyTerm), BooleanClause.Occur.MUST);
        booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

        results = searcher.search(booleanQuery.build(), N);

        hits = results.scoreDocs;

        numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        start = 0;
        end = Math.min(numTotalHits, N);

        count_r = 0;
        count_n = 0;
        skip = 0;

        for (int i = start; i < end; i++) {
            Document doc = searcher.doc(hits[i].doc);
            if (doc.get("SId") == null) {
                skip++;
                continue;
            }

            int id = Integer.parseInt(doc.get("SId"));

            if (!hasTag(id, searchTag)) {
                skip++;
                continue;
            }

            if (this.acceptedAnswers.contains(id)) {
                found.add(id);
                count_r++;
            } else {
                count_n++;
            }
            total.add(id);
        }
        P[cnt] = (double) found.size() / total.size();
        R[cnt] = (double) found.size() / (acceptedAnswers.size());
        cnt++;
        System.out.println("Total Post Cnt = " + count_r + "/" + count_n + "/" + this.acceptedAnswers.size());
        System.out.println("Total skipped Post = " + skip);
        System.out.println("-----------------");
    }
    //        System.out.println("-----Final Count-----");
    //        System.out.println("Self,"+((double)self.size()/acceptedAnswers.size())*100);
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println("Tr"+(i+1)+","+((double)counts[i]/acceptedAnswers.size())*100);
    //        }
    System.out.println("-----Final Accum Count-----");
    //        System.out.println("Self,"+((double)self.size()/acceptedAnswers.size())*100);
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println("Tr"+(i+1)+","+((double)accum_counts[i]/acceptedAnswers.size())*100);
    //        }
    System.out.println("Cnt,Method,Value");
    for (int i = 0; i < cnt; i++) {
        System.out.println((i) + "," + "Precision" + "," + P[i] * 100);
        System.out.println((i) + "," + "Recall" + "," + R[i] * 100);
    }

}

From source file:lucenesearch.TagBodyCount.java

public void calculateVenn(String[] bodyTerms, int N) throws IOException, ParseException {
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(new Searcher().getPostIndexPath())));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    ArrayList<HashSet<Integer>> sets = new ArrayList<>();
    //        HashSet<?>[] sets = new HashSet<?>[bodyTerms.length + 1];

    System.out.println("Calculating word itself: " + searchTag);
    BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
    booleanQuery.add(new QueryParser("Body", analyzer).parse(searchTag), BooleanClause.Occur.MUST);
    booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

    TopDocs results;//w  w  w .  java2 s .  c om
    results = searcher.search(booleanQuery.build(), N);

    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, N);

    int count = 0;
    int skip = 0;

    sets.add(0, acceptedAnswers);

    HashSet<Integer> temp = new HashSet<Integer>();
    sets.add(1, new HashSet<>());

    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        if (doc.get("SId") == null) {
            skip++;
            continue;
        }

        int id = Integer.parseInt(doc.get("SId"));
        if (this.acceptedAnswers.contains(id)) {
            sets.get(1).add(id);
        }
    }

    System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
    System.out.println("Total skipped Post = " + skip);

    int[] counts = new int[bodyTerms.length];
    int[] accum_counts = new int[bodyTerms.length];
    int cnt = 0;
    int arrayIndex = 2;
    for (String bodyTerm : bodyTerms) {
        sets.add(arrayIndex, new HashSet<>());
        System.out.println("Query for: " + bodyTerm);
        booleanQuery = new BooleanQuery.Builder();
        booleanQuery.add(new QueryParser("Body", analyzer).parse(bodyTerm), BooleanClause.Occur.MUST);
        booleanQuery.add(IntPoint.newExactQuery("PostTypeId", 2), BooleanClause.Occur.MUST);

        results = searcher.search(booleanQuery.build(), N);

        hits = results.scoreDocs;

        numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        start = 0;
        end = Math.min(numTotalHits, N);

        count = 0;
        skip = 0;

        for (int i = start; i < end; i++) {
            Document doc = searcher.doc(hits[i].doc);
            if (doc.get("SId") == null) {
                skip++;
                continue;
            }

            int id = Integer.parseInt(doc.get("SId"));
            if (this.acceptedAnswers.contains(id)) {
                sets.get(arrayIndex).add(id);
            }
        }
        arrayIndex++;
        counts[cnt] = temp.size();
        accum_counts[cnt] = cnt == 0 ? temp.size() : accum_counts[cnt - 1] + temp.size();
        cnt++;
        //            System.out.println("Total Post Cnt = " + count + "/" + this.acceptedAnswers.size());
        //            System.out.println("Total skipped Post = " + skip);
        System.out.println("-----------------");
    }

    System.out.println("-------------------\nFinal Res\n-------------\n");
    int pow = 1;
    for (int i = 0; i < bodyTerms.length + 1; i++)
        pow *= 2;

    HashSet<Integer> temp2 = new HashSet<>();
    for (HashSet<Integer> hs : sets) {
        temp2.addAll(hs);
    }
    int size = temp2.size();
    for (int i = 1; i <= pow - 1; i++) {
        ArrayList<Integer> numbers = new ArrayList<>();
        //                int rem = 2;
        int dig = 2;
        int n = i;
        while (n != 0) {
            if (n % 2 == 1) {
                numbers.add(dig);
            }
            n /= 2;
            dig++;
        }
        //            System.out.println(numbers);
        temp = new HashSet<>();
        temp.addAll(sets.get(numbers.get(0) - 2));
        for (Integer number : numbers) {
            temp.retainAll(sets.get(number - 2)); //-1 to include self translation and accepted
        }
        String s = "";
        if (numbers.size() == 1)
            s = "area";
        else
            s = "n";
        for (Integer number : numbers) {
            s = s + (number - 1);
        }
        //            s += "="+((double)temp.size() / acceptedAnswers.size())+",";
        s += "=" + (temp.size()) + ",";
        System.out.println(s);
    }
    String s = "category = c(\"All\",\"" + this.searchTag + "\",";
    for (String t : bodyTerms) {
        s = s + "\"" + t + "\",";
    }
    s += "),";
    System.out.println(s);

    //        System.out.println("-----Final Count-----");
    //        System.out.println("Self," + ((double) self.size() / acceptedAnswers.size()) * 100);
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println("Tr" + (i + 1) + "," + ((double) counts[i] / acceptedAnswers.size()) * 100);
    //        }
    //        System.out.println("-----Final Accum Count-----");
    //        System.out.println("Self,"+((double)self.size()/acceptedAnswers.size())*100);
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println("Tr"+(i+1)+","+((double)accum_counts[i]/acceptedAnswers.size())*100);
    //        }
    //        System.out.println("Cnt,Method,Value");
    //        for (int i = 0; i < cnt; i++)
    //        {
    //            System.out.println((i + 1) + "," + "Translation" + "," + ((double) accum_counts[i] / acceptedAnswers.size()) * 100);
    //            System.out.println((i + 1) + "," + "self" + "," + ((double) self.size() / acceptedAnswers.size()) * 100);
    //        }

}