List of usage examples for org.apache.lucene.index DirectoryReader open
public static DirectoryReader open(final IndexCommit commit) throws IOException
From source file:ch.algotrader.rest.index.SecurityIndexer.java
License:Open Source License
public List<SecurityVO> search(String queryStr) throws ParseException { try (IndexReader reader = DirectoryReader.open(index)) { IndexSearcher searcher = new IndexSearcher(reader); QueryParser queryParser = new MultiFieldQueryParser(FIELDS, new StandardAnalyzer()); queryParser.setAllowLeadingWildcard(true); Query query = queryParser.parse(queryStr); TopDocs results = searcher.search(query, 10); return Arrays.asList(results.scoreDocs).stream().map(sd -> searchDocument(searcher, sd)) .mapToLong(d -> d.getField("id").numericValue().longValue()).mapToObj(securityCache::get) .collect(Collectors.toList()); } catch (IOException ioe) { throw new UnrecoverableCoreException("Unexpected I/O error accessing security index", ioe); }/*from w ww . j a v a2 s. c o m*/ }
From source file:ch.ksfx.web.services.lucene.ObservationSearch.java
License:Open Source License
public void prepare(String allQuery, String scalarValueQuery, Map<String, String> complexValueQuery, Map<String, String> metaDataQuery, Date dateFrom, Date dateTo, String seriesId) { try {// w w w.j ava2 s.c om IndexReader reader = DirectoryReader .open(FSDirectory.open(Paths.get(systemEnvironment.getApplicationIndexfilePath()))); searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(); QueryParser parser = new QueryParser("catch_all", analyzer); System.out.println("Complex value query: " + complexValueQuery); String luceneQuery = buildQuery(allQuery, scalarValueQuery, complexValueQuery, metaDataQuery, dateFrom, dateTo, seriesId); System.out.println("Lucene query: " + luceneQuery); query = parser.parse(luceneQuery); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException("Error in Lucene query PREPARE"); } }
From source file:ci6226.eval_index_reader.java
public eval_index_reader(Analyzer _analyzer, String _dir, String[] _searchList, int _topn) throws IOException, org.apache.lucene.queryparser.classic.ParseException, InvalidTokenOffsetsException { String indexdir = "./" + _dir; String field = "text"; IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(indexdir))); IndexSearcher searcher = new IndexSearcher(reader); PrintWriter writer = new PrintWriter(_dir + ".csv", "UTF-8"); Searchit(reader, searcher, _analyzer, field, _searchList, _topn, writer); searcher.setSimilarity(new similarity_tf_rm()); Searchit(reader, searcher, _analyzer, field, _searchList, _topn, writer); searcher.setSimilarity(new similiarty_queryNorm()); Searchit(reader, searcher, _analyzer, field, _searchList, _topn, writer); writer.close();//from ww w . ja va 2 s. c o m reader.close(); /// searcher.setSimilarity(null); }
From source file:ci6226.facetsearch.java
public static void main(String[] args) throws Exception { String index = "./myindex"; String field = "text"; String queries = null;//from w w w . j ava 2s.co m int hitsPerPage = 10; boolean raw = false; //http://lucene.apache.org/core/4_0_0/facet/org/apache/lucene/facet/doc-files/userguide.html#facet_accumulation IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); // :Post-Release-Update-Version.LUCENE_XY: //TODO: SAME AS HOW U BUILD INDEX Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_47); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } // :Post-Release-Update-Version.LUCENE_XY: QueryParser parser = new QueryParser(Version.LUCENE_47, field, analyzer); while (true) { System.out.println("Enter query: "); String line = in.readLine(); line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); Date start = new Date(); searcher.search(query, null, 100); Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); TopDocs results = searcher.search(query, 5 * hitsPerPage); ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits; //N= max docs //df = totoal matched doc //idf=log(N/df) for (int i = 0; i < hits.length; i++) { Document doc = searcher.doc(hits[i].doc); System.out.println(ANSI_BLUE + (i + 1) + ANSI_RESET + "\nScore=\t" + hits[i].score); String rtext = doc.get(field); System.out.println("Text=\t" + rtext); Terms vector = reader.getTermVector(i, "text"); if (vector == null) continue; // System.out.println(vector.getSumDocFreq()); // Terms vector = reader.getTermVector(hits[i].doc, field); //hits[i].doc=docID TermsEnum termsEnum = vector.iterator(null); termsEnum = vector.iterator(termsEnum); Map<String, Integer> frequencies = new HashMap<>(); BytesRef text = null; while ((text = termsEnum.next()) != null) { String term = text.utf8ToString(); int freq = (int) termsEnum.totalTermFreq(); frequencies.put(term, freq); // System.out.println("Time: "+term + " idef "+freq); } } // String[] facetCatlog={""}; System.out.println(numTotalHits + " total matching documents"); } reader.close(); }
From source file:ci6226.loadIndex.java
public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); System.exit(0);/*www. j ava 2 s . c om*/ } String index = "./myindex"; String field = "text"; String queries = null; int hitsPerPage = 10; boolean raw = false; IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); // :Post-Release-Update-Version.LUCENE_XY: //TODO: SAME AS HOW U BUILD INDEX Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_47); BufferedReader in = null; if (queries != null) { in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } // :Post-Release-Update-Version.LUCENE_XY: QueryParser parser = new QueryParser(Version.LUCENE_47, field, analyzer); while (true) { System.out.println("Enter query: "); String line = in.readLine(); line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); Date start = new Date(); searcher.search(query, null, 100); Date end = new Date(); System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms"); doPagingSearch(in, searcher, query, hitsPerPage, raw, true, analyzer); } reader.close(); }
From source file:cn.codepub.redis.directory.Main.java
License:Apache License
public static void testRedisDirectoryWithShardedJedisPool() throws IOException { long start = System.currentTimeMillis(); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()) .setOpenMode(IndexWriterConfig.OpenMode.CREATE); //indexWriterConfig.setInfoStream(System.out); //indexWriterConfig.setRAMBufferSizeMB(2048); //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); //logByteSizeMergePolicy.setMinMergeMB(1); //logByteSizeMergePolicy.setMaxMergeMB(64); //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64); //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false); //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig(); //?/*w ww.j a va 2s . c om*/ //genericObjectPoolConfig.setMaxWaitMillis(3000); //10s List<JedisShardInfo> shards = new ArrayList<>(); JedisShardInfo si = new JedisShardInfo("localhost", 6379, Constants.TIME_OUT); //JedisShardInfo si2 = new JedisShardInfo("localhost", 6380); shards.add(si); //shards.add(si2); JedisPoolConfig jedisPoolConfig = new JedisPoolConfig(); ShardedJedisPool shardedJedisPool = new ShardedJedisPool(jedisPoolConfig, shards); RedisDirectory redisDirectory = new RedisDirectory(new ShardedJedisPoolStream(shardedJedisPool)); IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig); for (int i = 0; i < 10000000; i++) { indexWriter.addDocument(addDocument(i)); } indexWriter.commit(); indexWriter.close(); redisDirectory.close(); long end = System.currentTimeMillis(); log.error("RedisDirectoryWithShardedJedisPool consumes {}s!", (end - start) / 1000); shardedJedisPool = new ShardedJedisPool(jedisPoolConfig, shards); start = System.currentTimeMillis(); IndexSearcher indexSearcher = new IndexSearcher( DirectoryReader.open(new RedisDirectory(new ShardedJedisPoolStream(shardedJedisPool)))); int total = 0; for (int i = 0; i < 10000000; i++) { TermQuery key1 = new TermQuery(new Term("key1", "key" + i)); TopDocs search = indexSearcher.search(key1, 10); total += search.totalHits; } System.out.println(total); end = System.currentTimeMillis(); log.error("RedisDirectoryWithShardedJedisPool search consumes {}ms!", (end - start)); }
From source file:cn.fql.blogspider.SearchMain.java
License:Open Source License
public static void main(String[] args) throws Exception { String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details."; if ((args.length > 0) && ((("-h".equals(args[0])) || ("-help".equals(args[0]))))) { System.out.println(usage); System.exit(0);/* ww w . j a v a 2s . com*/ } String index = "D:\\test\\index"; String field = "contents"; String queries = null; String queryString = null; int hitsPerPage = 10; // for (int i = 0; i < args.length; ++i) // if ("-index".equals(args[i])) { // index = args[(i + 1)]; // ++i; // } else if ("-field".equals(args[i])) { // field = args[(i + 1)]; // ++i; // } else if ("-queries".equals(args[i])) { // queries = args[(i + 1)]; // ++i; // } else if ("-query".equals(args[i])) { // queryString = args[(i + 1)]; // ++i; // } else if ("-repeat".equals(args[i])) { // repeat = Integer.parseInt(args[(i + 1)]); // ++i; // } else if ("-raw".equals(args[i])) { // raw = true; // } else if ("-paging".equals(args[i])) { // hitsPerPage = Integer.parseInt(args[(i + 1)]); // if (hitsPerPage <= 0) { // System.err.println("There must be at least 1 hit per page."); // System.exit(1); // } // ++i; // } IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index))); IndexSearcher searcher = new IndexSearcher(reader); //Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); Analyzer analyzer = new IKAnalyzer(); BufferedReader in = null; if (queries != null) in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8")); else in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer); while (true) { if ((queries == null) && (queryString == null)) { System.out.println("Enter query: "); } String line = (queryString != null) ? queryString : in.readLine(); if (line == null) break; if (line.length() == -1) { break; } line = line.trim(); if (line.length() == 0) { break; } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); doPagingSearch(in, searcher, query, hitsPerPage, (queries == null) && (queryString == null)); if (queryString != null) break; } reader.close(); }
From source file:cn.hbu.cs.esearch.index.IndexReaderDispenser.java
License:Apache License
/** * constructs a new IndexReader instance * <p/>/* w w w .j a va 2s . co m*/ * Where the index is. * * @return Constructed IndexReader instance. * * @throws java.io.IOException */ private EsearchMultiReader<R> newReader(DirectoryManager dirMgr, IndexReaderDecorator<R> decorator, IndexSignature signature) throws IOException { if (!dirMgr.exists()) { return null; } Directory dir = dirMgr.getDirectory(); if (!DirectoryReader.indexExists(dir)) { return null; } int numTries = INDEX_OPEN_NUM_RETRIES; EsearchMultiReader<R> reader = null; // try max of 5 times, there might be a case where the segment file is being updated while (reader == null) { if (numTries == 0) { LOGGER.error("Problem refreshing disk index, all attempts failed."); throw new IOException("problem opening new index"); } numTries--; try { if (LOGGER.isDebugEnabled()) { LOGGER.debug("opening index reader at: " + dirMgr.getPath()); } DirectoryReader srcReader = DirectoryReader.open(dir); try { reader = new EsearchMultiReader<R>(srcReader, decorator); _currentSignature = signature; } catch (IOException ioe) { // close the source reader if EsearchMultiReader construction fails if (srcReader != null) { srcReader.close(); } throw ioe; } } catch (IOException ioe) { try { Thread.sleep(100); } catch (InterruptedException e) { LOGGER.warn("thread interrupted."); continue; } } } return reader; }
From source file:cn.hbu.cs.esearch.index.RAMSearchIndex.java
License:Apache License
private EsearchMultiReader<R> openIndexReaderInternal() throws IOException { if (DirectoryReader.indexExists(_directory)) { DirectoryReader srcReader = null; EsearchMultiReader<R> finalReader = null; try {//from ww w. j a v a2 s. c o m // for RAM indexes, just get a new index reader srcReader = DirectoryReader.open(_directory); finalReader = new EsearchMultiReader<R>(srcReader, _decorator); DocIDMapper mapper = _idxMgr._docIDMapperFactory.getDocIDMapper(finalReader); finalReader.setDocIDMapper(mapper); return finalReader; } catch (IOException ioe) { // if reader decoration fails, still need to close the source reader if (srcReader != null) { srcReader.close(); } throw ioe; } } else { return null; // null indicates no index exist, following the contract } }
From source file:cn.util.test.java
License:Open Source License
public static void main(String[] args) throws IOException { String lirePath = "F:\\Lire-0.9.5\\demo\\"; String indexPath = lirePath + "index"; String picsPath = lirePath + "pics"; String reco = lirePath + "test.jpg"; // Checking if arg[0] is there and if it is an image. BufferedImage img = null;// w w w. ja v a2 s . com File f = new File(reco); if (f.exists()) { try { img = ImageIO.read(f); } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } IndexReader ir = DirectoryReader.open(FSDirectory.open(Paths.get(indexPath))); ImageSearcher searcher = new GenericFastImageSearcher(5, CEDD.class); // ImageSearcher searcher = new GenericFastImageSearcher(30, AutoColorCorrelogram.class); // searching with a image file ... ImageSearchHits hits = searcher.search(img, ir); // searching with a Lucene document instance ... // ImageSearchHits hits = searcher.search(ir.document(0), ir); for (int i = 0; i < hits.length(); i++) { String fileName = ir.document(hits.documentID(i)).getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]; if (hits.score(i) > 10)//??? { System.out.println(hits.score(i) + ": \t" + fileName); } } }