List of usage examples for org.apache.lucene.index IndexWriter close
@Override public void close() throws IOException
From source file:com.shaie.PhraseVsSpanQuery.java
License:Apache License
@SuppressWarnings("resource") public static void main(String[] args) throws Exception { final Directory dir = new RAMDirectory(); final IndexWriterConfig conf = new IndexWriterConfig(new WhitespaceAnalyzer()); final IndexWriter writer = new IndexWriter(dir, conf); final Document doc = new Document(); doc.add(new TextField("f", new TokenStream() { final PositionIncrementAttribute pos = addAttribute(PositionIncrementAttribute.class); final CharTermAttribute term = addAttribute(CharTermAttribute.class); boolean first = true, done = false; @Override//w w w .j av a2s .com public boolean incrementToken() throws IOException { if (done) { return false; } if (first) { term.setEmpty().append("a"); pos.setPositionIncrement(1); first = false; } else { term.setEmpty().append("b"); pos.setPositionIncrement(0); done = true; } return true; } })); writer.addDocument(doc); writer.close(); final DirectoryReader reader = DirectoryReader.open(dir); final IndexSearcher searcher = new IndexSearcher(reader); final LeafReader ar = reader.leaves().get(0).reader(); final TermsEnum te = ar.terms("f").iterator(); BytesRef scratch = new BytesRef(); while ((scratch = te.next()) != null) { System.out.println(scratch.utf8ToString()); final PostingsEnum dape = ar.postings(new Term("f", scratch.utf8ToString())); System.out.println(" doc=" + dape.nextDoc() + ", pos=" + dape.nextPosition()); } System.out.println(); // try a phrase query with a slop final PhraseQuery pqNoSlop = buildPhraseQuery(0); System.out.println("searching for \"a b\"; num results = " + searcher.search(pqNoSlop, 10).totalHits); final PhraseQuery pqSlop1 = buildPhraseQuery(1); System.out.println("searching for \"a b\"~1; num results = " + searcher.search(pqSlop1, 10).totalHits); final PhraseQuery pqSlop3 = buildPhraseQuery(3); System.out.println("searching for \"a b\"~3; num results = " + searcher.search(pqSlop3, 10).totalHits); final SpanNearQuery snqUnOrdered = new SpanNearQuery( new SpanQuery[] { new SpanTermQuery(new Term("f", "a")), new SpanTermQuery(new Term("f", "b")) }, 1, false); System.out.println("searching for SpanNearUnordered('a', 'b'), slop=1; num results = " + searcher.search(snqUnOrdered, 10).totalHits); final SpanNearQuery snqOrdered = new SpanNearQuery( new SpanQuery[] { new SpanTermQuery(new Term("f", "a")), new SpanTermQuery(new Term("f", "b")) }, 1, true); System.out.println("searching for SpanNearOrdered('a', 'b'), slop=1; num results = " + searcher.search(snqOrdered, 10).totalHits); reader.close(); }
From source file:com.shaie.UTF8Indexing.java
License:Apache License
@SuppressWarnings("resource") public static void main(String[] args) throws Exception { final Directory dir = new RAMDirectory(); final StandardAnalyzer analyzer = new StandardAnalyzer(); final IndexWriterConfig conf = new IndexWriterConfig(analyzer); final IndexWriter writer = new IndexWriter(dir, conf); final Document doc = new Document(); doc.add(new TextField("f", "Russia\u2013United States relations", Store.YES)); writer.addDocument(doc);//from w w w.ja v a 2 s. c om writer.close(); final DirectoryReader reader = DirectoryReader.open(dir); final IndexSearcher searcher = new IndexSearcher(reader); final QueryParser qp = new QueryParser("f", analyzer); search(searcher, qp, "Russia United States relations"); search(searcher, qp, "\"Russia United states relations\""); search(searcher, qp, "\"Russia-United states relations\""); search(searcher, qp, "\"Russia\u2013United states relations\""); reader.close(); dir.close(); }
From source file:com.shmsoft.dmass.main.FileProcessor.java
License:Apache License
/** * Search metadata and file contents//from w ww . j a v a 2 s. co m * * @param metadata * @return true if match is found else false */ private boolean isResponsive(Metadata metadata) { // set true if search finds a match boolean isResponsive = false; // get culling parameters String queryString = Project.getProject().getCullingAsTextBlock(); // TODO parse important parameters to mappers and reducers individually, not globally IndexWriter writer = null; RAMDirectory idx = null; try { // construct a RAMDirectory to hold the in-memory representation of the index. idx = new RAMDirectory(); // make a writer to create the index writer = new IndexWriter(idx, new StandardAnalyzer(Version.LUCENE_30), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(createDocument(metadata)); // optimize and close the writer to finish building the index writer.optimize(); writer.close(); //adding the build index to FS if (Project.getProject().isLuceneFSIndexEnabled() && luceneIndex != null) { luceneIndex.addToIndex(idx); } SolrIndex.getInstance().addBatchData(metadata); if (queryString == null || queryString.trim().isEmpty()) { return true; } // build an IndexSearcher using the in-memory index Searcher searcher = new IndexSearcher(idx); // search directory isResponsive = search(searcher, queryString); searcher.close(); } catch (Exception e) { // TODO handle this better // if anything happens - don't stop processing e.printStackTrace(System.out); } finally { try { if (writer != null) { writer.close(); } if (idx != null) { idx.close(); } } catch (Exception e) { // swallow exception, what else can you do now? } } return isResponsive; }
From source file:com.silverwrist.dynamo.index.IndexServiceImpl.java
License:Mozilla Public License
IndexServiceImpl(IndexManagerObject base, QualifiedNameKey identity, int ndx, IndexOps ops, ReferenceQueue rq, boolean create) throws DatabaseException, IndexException { m_analyzer = createAnalyzer(ops.getAnalyzerClassName(ndx)); m_base = base;/*ww w . ja v a2 s . co m*/ m_identity = identity; m_directory = new IndexDirectoryImpl(ndx, ops); m_cleanup = new DirectoryAutoCleanup(this, m_directory, rq); if (create) { // create the new index try { // Use an IndexWriter to create the index for the first time. IndexWriter iwr = new IndexWriter(m_directory, m_analyzer, true); iwr.close(); } // end try catch (IOException e) { // translate Lucene's IOException here IndexException ie = new IndexException(IndexServiceImpl.class, "IndexMessages", "indexCreate.fail", e); ie.setParameter(0, m_identity.toString()); throw ie; } // end catch } // end if }
From source file:com.silverwrist.dynamo.index.IndexServiceImpl.java
License:Mozilla Public License
public void addItem(String item_namespace, String item_name, Object item, String scope, java.util.Date date, DynamoUser owner, String text) throws IndexException { // Create a new Lucene Document containing the item information. Document doc = new Document(); doc.add(Field.Keyword("id", createTag(item_namespace, item_name, item))); doc.add(Field.Keyword("date", date)); doc.add(Field.Keyword("owner", owner.getName())); doc.add(Field.Keyword("scope", scope)); doc.add(Field.UnStored("text", text)); try { // Use an IndexWriter to write it to the index. IndexWriter iwr = new IndexWriter(m_directory, m_analyzer, false); iwr.addDocument(doc);//from ww w .j a v a 2s . c om iwr.close(); } // end try catch (IOException e) { // translate Lucene's IOException here IndexException ie = new IndexException(IndexServiceImpl.class, "IndexMessages", "addItem.fail", e); ie.setParameter(0, item_namespace); ie.setParameter(1, item_name); ie.setParameter(2, m_identity.toString()); throw ie; } // end catch }
From source file:com.slieer.app.lecene3x.IndexFiles.java
License:Apache License
/** Index all text files under a directory. */ public static void main(String[] args) { String usage = "java org.apache.lucene.demo.IndexFiles" + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n" + "This indexes the documents in DOCS_PATH, creating a Lucene index" + "in INDEX_PATH that can be searched with SearchFiles"; String indexPath = "index"; String docsPath = null;//from ww w.jav a 2s . c o m boolean create = true; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { indexPath = args[i + 1]; i++; } else if ("-docs".equals(args[i])) { docsPath = args[i + 1]; i++; } else if ("-update".equals(args[i])) { create = false; } } if (docsPath == null) { System.err.println("Usage: " + usage); System.exit(1); } final File docDir = new File(docsPath); if (!docDir.exists() || !docDir.canRead()) { System.out.println("Document directory '" + docDir.getAbsolutePath() + "' does not exist or is not readable, please check the path"); System.exit(1); } Date start = new Date(); try { System.out.println("Indexing to directory '" + indexPath + "'..."); Directory dir = FSDirectory.open(new File(indexPath)); // :Post-Release-Update-Version.LUCENE_XY: Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_9, analyzer); if (create) { // Create a new index in the directory, removing any // previously indexed documents: iwc.setOpenMode(OpenMode.CREATE); } else { // Add new documents to an existing index: iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); } // Optional: for better indexing performance, if you // are indexing many documents, increase the RAM // buffer. But if you do this, increase the max heap // size to the JVM (eg add -Xmx512m or -Xmx1g): // // iwc.setRAMBufferSizeMB(256.0); IndexWriter writer = new IndexWriter(dir, iwc); indexDocs(writer, docDir); // NOTE: if you want to maximize search performance, // you can optionally call forceMerge here. This can be // a terribly costly operation, so generally it's only // worth it when your index is relatively static (ie // you're done adding documents to it): // // writer.forceMerge(1); writer.close(); Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); } catch (IOException e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
From source file:com.slieer.app.lecene3x.LuceneIndexAndSearchDemo.java
License:Apache License
/** * ???/*from w w w.jav a 2 s.c om*/ * * @param args */ public static void main(String[] args) { // Lucene Document?? String fieldName = "text"; // String text = "IK Analyzer???????"; String text1 = "? (Chinese Word Segmentation) ???????????"; String text2 = "?????,,??,?"; // IKAnalyzer? Analyzer analyzer = new IKAnalyzer(true); Directory directory = null; IndexWriter iwriter = null; IndexReader ireader = null; IndexSearcher isearcher = null; try { // directory = new RAMDirectory(); // ?IndexWriterConfig IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_4_9, analyzer); iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND); iwriter = new IndexWriter(directory, iwConfig); // Document doc = new Document(); //document.add(new Field("content", content, Field.Store.YES, Field.Index.ANALYZED)); Field strField = new StringField("ID", "10000", Field.Store.YES); Field textFild = new StringField(fieldName, text, Field.Store.YES); //textFild.setBoost(2); doc.add(strField); doc.add(textFild); iwriter.addDocument(doc); doc = new Document(); strField = new StringField("ID", "10001", Field.Store.YES); textFild = new StringField(fieldName, text1, Field.Store.YES); //strField.setBoost(1); doc.add(strField); doc.add(textFild); iwriter.addDocument(doc); doc = new Document(); strField = new StringField("ID", "10002", Field.Store.YES); // textFild = new TextField(fieldName, text2, Field.Store.YES); textFild = new StringField(fieldName, text2, Field.Store.YES); //strField.setBoost(1); doc.add(strField); doc.add(textFild); iwriter.addDocument(doc); iwriter.close(); // ?********************************** // ? ireader = DirectoryReader.open(directory); isearcher = new IndexSearcher(ireader); String keyword = "?"; // QueryParser?Query QueryParser qp = new QueryParser(Version.LUCENE_4_9, fieldName, analyzer); qp.setDefaultOperator(QueryParser.AND_OPERATOR); Query query = qp.parse(keyword); System.out.println("Query = " + query); // ?5? TopDocs topDocs = isearcher.search(query, 5); System.out.println("" + topDocs.totalHits); // ScoreDoc[] scoreDocs = topDocs.scoreDocs; for (int i = 0; i < topDocs.totalHits; i++) { Document targetDoc = isearcher.doc(scoreDocs[i].doc); System.out.println("" + targetDoc.toString()); } } catch (CorruptIndexException e) { e.printStackTrace(); } catch (LockObtainFailedException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } catch (ParseException e) { e.printStackTrace(); } finally { if (ireader != null) { try { ireader.close(); } catch (IOException e) { e.printStackTrace(); } } if (directory != null) { try { directory.close(); } catch (IOException e) { e.printStackTrace(); } } } }
From source file:com.slieer.lucene.apachedemo.IndexFiles.java
License:Apache License
/** Index all text files under a directory. */ public static void main(String[] args) { String usage = "java org.apache.lucene.demo.IndexFiles" + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n" + "This indexes the documents in DOCS_PATH, creating a Lucene index" + "in INDEX_PATH that can be searched with SearchFiles"; String indexPath = "index"; String docsPath = null;//from w w w . j a v a2s .c o m boolean create = true; for (int i = 0; i < args.length; i++) { if ("-index".equals(args[i])) { indexPath = args[i + 1]; i++; } else if ("-docs".equals(args[i])) { docsPath = args[i + 1]; i++; } else if ("-update".equals(args[i])) { create = false; } } if (docsPath == null) { System.err.println("Usage: " + usage); System.exit(1); } final File docDir = new File(docsPath); if (!docDir.exists() || !docDir.canRead()) { System.out.println("Document directory '" + docDir.getAbsolutePath() + "' does not exist or is not readable, please check the path"); System.exit(1); } Date start = new Date(); try { System.out.println("Indexing to directory '" + indexPath + "'..."); Directory dir = FSDirectory.open(new File(indexPath)); // :Post-Release-Update-Version.LUCENE_XY: Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_47); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, analyzer); if (create) { // Create a new index in the directory, removing any // previously indexed documents: iwc.setOpenMode(OpenMode.CREATE); } else { // Add new documents to an existing index: iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); } // Optional: for better indexing performance, if you // are indexing many documents, increase the RAM // buffer. But if you do this, increase the max heap // size to the JVM (eg add -Xmx512m or -Xmx1g): // // iwc.setRAMBufferSizeMB(256.0); IndexWriter writer = new IndexWriter(dir, iwc); indexDocs(writer, docDir); // NOTE: if you want to maximize search performance, // you can optionally call forceMerge here. This can be // a terribly costly operation, so generally it's only // worth it when your index is relatively static (ie // you're done adding documents to it): // // writer.forceMerge(1); writer.close(); Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); } catch (IOException e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
From source file:com.soebes.supose.core.index.IndexHelper.java
License:Open Source License
/** * Merge all given indexes together to a single index. * * @param destination/*from w w w . java2 s .co m*/ * This will define the destination directory of the index where * all other indexes will be merged to. * @param indexList * This is the list of indexes which are merged into the * destination index. */ public static void mergeIndex(File destination, List<File> indexList) { LOGGER.debug("We are trying to merge indexes to the destination: " + destination); Index index = new Index(); // We assume an existing index... index.setCreate(false); IndexWriter indexWriter = index.createIndexWriter(destination.getAbsolutePath()); try { LOGGER.info("Merging of indexes started."); FSDirectory[] fsDirs = new FSDirectory[indexList.size()]; for (int i = 0; i < indexList.size(); i++) { fsDirs[i] = FSDirectory.getDirectory(indexList.get(i)); } indexWriter.addIndexesNoOptimize(fsDirs); indexWriter.optimize(); indexWriter.close(); LOGGER.info("Merging of indexes succesfull."); } catch (Exception e) { LOGGER.error("Something wrong during merge of index: ", e); } }
From source file:com.soebes.supose.core.InitRepository.java
License:Open Source License
public void scanRepos() throws SVNException { Index index = new Index(); //We will create a new one if --create is given on command line //otherwise we will append to the existing index. Analyzer analyzer = AnalyzerFactory.createInstance(); index.setAnalyzer(analyzer);/* ww w . j a v a 2 s . co m*/ //For the test we allways create the index. index.setCreate(true); IndexWriter indexWriter = index.createIndexWriter(getIndexDirectory()); ISVNAuthenticationManager authManager = SVNWCUtil.createDefaultAuthenticationManager("", ""); String repositoryDir = getRepositoryDirectory(); SVNURL url = SVNURL.fromFile(new File(repositoryDir)); Repository repository = new Repository("file://" + url.getURIEncodedPath(), authManager); scanRepository.setRepository(repository); //We start with the revision which is given on the command line. //If it is not given we will start with revision 1. scanRepository.setStartRevision(1); //We will scan the repository to the current HEAD of the repository. scanRepository.setEndRevision(-1); InputStream filter = InitRepository.class.getResourceAsStream("/filter.xml"); Filter filterConfiguration = null; try { filterConfiguration = FilterFile.getFilter(filter); } catch (FileNotFoundException e) { LOGGER.error("FileNotFoundException", e); } catch (IOException e) { LOGGER.error("IOException", e); } catch (XmlPullParserException e) { LOGGER.error("XmlPullParserException", e); } Filtering filtering = new Filtering(filterConfiguration); scanRepository.setFiltering(filtering); LOGGER.info("Scanning started."); scanRepository.scan(indexWriter); LOGGER.info("Scanning ready."); try { indexWriter.optimize(); indexWriter.close(); } catch (CorruptIndexException e) { LOGGER.error("CorruptIndexException: Error during optimization of index: ", e); } catch (IOException e) { LOGGER.error("IOException: Error during optimization of index: ", e); } }