Example usage for org.apache.lucene.index IndexWriter addDocument

List of usage examples for org.apache.lucene.index IndexWriter addDocument

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter addDocument.

Prototype

public long addDocument(Iterable<? extends IndexableField> doc) throws IOException 

Source Link

Document

Adds a document to this index.

Usage

From source file:com.nokia.vikram.lire.LireIndexer.java

License:Open Source License

public static void indexImages(String inputFramesRootDir, String indexDirectory) {
    try {//  ww w . jav a 2s .  c  o  m
        // Getting all images from a directory and its sub directories.
        ArrayList<String> images = FileUtils.getAllImages(new File(inputFramesRootDir), true);

        // Creating a CEDD document builder and indexing all files.
        DocumentBuilder builder = DocumentBuilderFactory.getCEDDDocumentBuilder();

        // Creating an Lucene IndexWriter
        IndexWriterConfig conf = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
                new WhitespaceAnalyzer(LuceneUtils.LUCENE_VERSION));
        conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);

        IndexWriter iw = new IndexWriter(FSDirectory.open(new File(indexDirectory)), conf);

        // Iterating through images building the low level features
        for (Iterator<String> it = images.iterator(); it.hasNext();) {
            String imageFilePath = it.next();
            System.out.println("Indexing image: " + imageFilePath);
            try {
                BufferedImage img = ImageIO.read(new FileInputStream(imageFilePath));
                Document document = builder.createDocument(img, imageFilePath);
                iw.addDocument(document);
            } catch (Exception e) {
                System.err.println("ERR: Could not read image or index it!");
                e.printStackTrace();
            }
        }

        // Close the IndexWriter
        iw.close();
        System.out.println("Indexing completed.");
    } catch (IOException e) {
        System.out.println("ERR: IOException while attempting to build index!");
        e.printStackTrace();
    }
}

From source file:com.nuvolect.deepdive.lucene.Index.java

public static JSONObject index(final String volumeId, final String searchPath, final boolean forceIndex) {

    if (m_interrupt[0]) {

        LogUtil.log(LogUtil.LogType.INDEX, "Index canceled post interrupt");

        m_interrupt[0] = false;/*  ww  w .jav  a  2 s.c om*/
        return responseInterruptIndexing();
    }

    OmniFile cacheDir = IndexUtil.getCacheDir(volumeId, searchPath);
    boolean cacheDirCreated = false;
    try {
        cacheDirCreated = OmniUtil.forceMkdir(cacheDir);
    } catch (IOException e) {
        return responseFolderCreateError(searchPath);
    }

    final String luceneDirPath = cacheDir.getAbsolutePath();

    boolean cacheDirExists = !cacheDirCreated;
    boolean indexingOngoing = m_indexThread != null && m_indexThread.isAlive();
    boolean indexingRequired = !cacheDirExists || forceIndex;

    synchronized (m_lock) {

        if (indexingOngoing) {

            if (m_fileTreeActive)
                m_index_state = INDEX_STATE.filetree;
            else
                m_index_state = INDEX_STATE.indexing;
        } else {
            if (indexingRequired)
                m_index_state = INDEX_STATE.indexing;
            else
                m_index_state = INDEX_STATE.complete;
        }
    }

    if (indexingRequired || indexingOngoing) {

        if (indexingOngoing) {

            // Nothing to do, let the background process run. Monitor m_indexedDocs for progress.
        } else {

            synchronized (m_lock) {
                m_index_state = INDEX_STATE.filetree;
                m_totalDocs[0] = 0;
                m_indexedDocs[0] = 0;
                m_error[0] = "";
            }
            m_threadGroup = new ThreadGroup(INDEX_THREAD_GROUP);
            m_indexThread = new Thread(m_threadGroup, new Runnable() {
                @Override
                public void run() {

                    //                        Analyzer analyzer = new org.apache.lucene.analysis.core.WhitespaceAnalyzer();
                    //                        Analyzer analyzer = new org.apache.lucene.analysis.core.KeywordAnalyzer();
                    //                        Analyzer analyzer = new org.apache.lucene.analysis.standard.StandardAnalyzer();
                    Analyzer analyzer = new org.apache.lucene.analysis.core.SimpleAnalyzer();
                    IndexWriterConfig config = new IndexWriterConfig(analyzer);
                    IndexWriter iwriter = null;

                    try {
                        Directory m_directory = FSDirectory.open(Paths.get(luceneDirPath));
                        iwriter = new IndexWriter(m_directory, config);
                        iwriter.deleteAll();
                        iwriter.commit();
                    } catch (IOException e) {
                        LogUtil.logException(LogUtil.LogType.INDEX, e);
                        m_error[0] = "IndexWriter constructor exception";
                    }

                    synchronized (m_lock) {
                        m_fileTreeActive = true;
                        m_index_state = INDEX_STATE.filetree;
                    }
                    Collection<OmniFile> files = IndexUtil.getFilePaths(volumeId, searchPath);

                    synchronized (m_lock) {
                        m_index_state = INDEX_STATE.indexing;
                        m_fileTreeActive = false;
                        m_totalDocs[0] = files.size();
                        m_indexedDocs[0] = 0;
                    }

                    try {

                        for (OmniFile file : files) {

                            if (m_interrupt[0]) {
                                LogUtil.log(LogUtil.LogType.INDEX, "Iterator loop canceled");
                                break;
                            }

                            String path = file.getPath();

                            //                                LogUtil.log(LogUtil.LogType.INDEX, "indexing: " + path);// this is a bit excessive
                            iwriter.addDocument(makeDoc(volumeId, path));
                            synchronized (m_lock) {
                                ++m_indexedDocs[0];
                            }
                        }

                        iwriter.commit();
                        iwriter.close();
                        synchronized (m_lock) {
                            m_index_state = m_interrupt[0] ? INDEX_STATE.interrupted : INDEX_STATE.complete;
                            m_totalDocs[0] = m_indexedDocs[0];
                        }

                    } catch (Exception e) {
                        LogUtil.logException(LogUtil.LogType.INDEX, e);
                        m_error[0] = "IndexWriter addDocument exception";
                    }
                }
            }, INDEX_THREAD, STACK_SIZE);

            m_indexThread.setPriority(Thread.MAX_PRIORITY);
            m_indexThread.start();
        }
    } else {

        // Indexing is complete
        // Get number of documents indexed
        try {
            Directory directory = FSDirectory.open(Paths.get(luceneDirPath));
            DirectoryReader ireader = DirectoryReader.open(directory);
            synchronized (m_lock) {
                m_indexedDocs[0] = ireader.numDocs();
                m_totalDocs[0] = m_indexedDocs[0];
                m_index_state = INDEX_STATE.complete;
            }
            ireader.close();
            directory.close();
        } catch (IOException e) {
            LogUtil.logException(LogUtil.LogType.INDEX, e);
        }
    }

    JSONObject result = new JSONObject();
    try {
        synchronized (m_lock) {
            result.put("index_state", m_index_state.toString());
            result.put("error", m_error[0]);
            result.put("indexed_docs", m_indexedDocs[0]);
            result.put("total_docs", m_totalDocs[0]);
            //                result.put("full_path", cacheDir.getAbsolutePath());
            result.put("search_path", searchPath);
        }
    } catch (JSONException e) {
        e.printStackTrace();
    }

    return result;
}

From source file:com.o19s.solr.swan.highlight.TermVectorFun.java

License:Apache License

@Test
public void testBlah() throws IOException {
    RAMDirectory ramDir = new RAMDirectory();
    // Index some made up content
    IndexWriterConfig iwf = new IndexWriterConfig(Version.LUCENE_47, new StandardAnalyzer(Version.LUCENE_47));
    IndexWriter writer = new IndexWriter(ramDir, iwf);
    FieldType ft = new FieldType();
    ft.setIndexed(true);//from   ww  w  .  j a  v a 2  s.c o m
    ft.setTokenized(true);
    ft.setStored(true);
    ft.setStoreTermVectorOffsets(true);
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.freeze();
    for (int i = 0; i < DOCS.length; i++) {
        Document doc = new Document();
        StringField id = new StringField("id", "doc_" + i, StringField.Store.YES);
        doc.add(id);
        // Store both position and offset information
        Field text = new Field("content", DOCS[i], ft);
        //               Field.Index.ANALYZED,
        //               Field.TermVector.WITH_POSITIONS_OFFSETS);
        doc.add(text);
        writer.addDocument(doc);
    }
    //writer.close();
    // Get a searcher
    AtomicReader dr = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
    IndexSearcher searcher = new IndexSearcher(dr);
    // Do a search using SpanQuery
    SpanTermQuery fleeceQ = new SpanTermQuery(new Term("content", "fleece"));
    TopDocs results = searcher.search(fleeceQ, 10);
    for (int i = 0; i < results.scoreDocs.length; i++) {
        ScoreDoc scoreDoc = results.scoreDocs[i];
        System.out.println("Score Doc: " + scoreDoc);
    }
    IndexReader reader = searcher.getIndexReader();
    Bits acceptDocs = null;
    Map<Term, TermContext> termContexts = new HashMap<Term, TermContext>();
    Spans spans = fleeceQ.getSpans(dr.getContext(), acceptDocs, termContexts);

    while (spans.next()) {
        System.out.println("Doc: " + spans.doc() + " Start: " + spans.start() + " End: " + spans.end());
        DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor("content");
        reader.document(spans.doc(), visitor);
        Terms terms = reader.getTermVector(spans.doc(), "content");
        TermsEnum tenum = terms.iterator(null);
        //         AttributeSource as = tenum.attributes();

        while (tenum.next() != null) {
            System.out.println(tenum.term().utf8ToString());
        }
        for (long pos = 0L; pos < spans.end(); pos++) {
            //            tenum.next();
            //            if (tenum.ord()<pos) continue;
            //            System.out.println(tenum.term());
            //            
        }

        reader.document(spans.doc(), visitor);
        //         String[] values = visitor.getDocument().getValues("content");
        //         List<String> a = new ArrayList<String>();
        //         // build up the window
        //         tvm.start = spans.start() - window;
        //         tvm.end = spans.end() + window;
        //         reader.getTermFreqVector(spans.doc(), "content", tvm);
        //         for (WindowEntry entry : tvm.entries.values()) {
        //            System.out.println("Entry: " + entry);
        //         }
        //         // clear out the entries for the next round
        //         tvm.entries.clear();
    }
}

From source file:com.orientechnologies.lucene.test.LuceneNativeFacet.java

License:Apache License

/** Build the example index. */
private void index() throws IOException {
    IndexWriter indexWriter = new IndexWriter(indexDir,
            new IndexWriterConfig(Version.LUCENE_47, new WhitespaceAnalyzer(Version.LUCENE_47))
                    .setOpenMode(OpenMode.CREATE));

    // Writes facet ords to a separate directory from the main index
    DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);

    Document doc = new Document();
    doc.add(new FacetField("Author", "Bob"));
    doc.add(new FacetField("Publish Date", "2010", "10", "15"));
    indexWriter.addDocument(config.build(taxoWriter, doc));

    doc = new Document();
    doc.add(new FacetField("Author", "Lisa"));
    doc.add(new FacetField("Publish Date", "2010", "10", "20"));
    indexWriter.addDocument(config.build(taxoWriter, doc));

    doc = new Document();
    doc.add(new FacetField("Author", "Lisa"));
    doc.add(new FacetField("Publish Date", "2012", "1", "1"));
    indexWriter.addDocument(config.build(taxoWriter, doc));

    doc = new Document();
    doc.add(new FacetField("Author", "Susan"));
    doc.add(new FacetField("Publish Date", "2012", "1", "7"));
    indexWriter.addDocument(config.build(taxoWriter, doc));

    doc = new Document();
    doc.add(new FacetField("Author", "Frank"));
    doc.add(new FacetField("Publish Date", "1999", "5", "5"));
    indexWriter.addDocument(config.build(taxoWriter, doc));

    indexWriter.close();//from w  w  w.j a  v a 2  s  . c  om
    taxoWriter.close();
}

From source file:com.orientechnologies.spatial.sandbox.LuceneGeoTest.java

License:Apache License

@Test
public void geoIntersectTest() throws IOException, ParseException {

    RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(
            new GeohashPrefixTree(JtsSpatialContext.GEO, 11), "location");

    strategy.setDistErrPct(0);/* ww w.j a v  a  2 s  .  c  o  m*/

    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    final RAMDirectory directory = new RAMDirectory();
    final IndexWriter writer = new IndexWriter(directory, conf);

    Shape point = JtsSpatialContext.GEO.getWktShapeParser().parse("POINT (9.4714708 47.6819432)");

    Shape polygon = JtsSpatialContext.GEO.getWktShapeParser().parse(
            "POLYGON((9.481201171875 47.64885294675266,9.471416473388672 47.65128140482982,9.462661743164062 47.64781214443791,9.449443817138672 47.656947367880335,9.445838928222656 47.66110972448931,9.455795288085938 47.667352637215,9.469013214111328 47.67255449415724,9.477081298828125 47.679142768657066,9.490299224853516 47.678680460743834,9.506263732910156 47.679258344995326,9.51364517211914 47.68191653011071,9.518795013427734 47.677177931734406,9.526691436767578 47.679489496903706,9.53390121459961 47.67139857075435,9.50918197631836 47.66180341832901,9.50815200805664 47.6529003141482,9.51192855834961 47.64654002455372,9.504375457763672 47.64237650648966,9.49270248413086 47.649662445325035,9.48617935180664 47.65151268066222,9.481201171875 47.64885294675266))");

    Document doc = new Document();

    Assert.assertNotEquals(point.relate(polygon), SpatialRelation.INTERSECTS);
    for (IndexableField f : strategy.createIndexableFields(point)) {
        doc.add(f);
    }

    writer.addDocument(doc);
    writer.commit();

    SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, polygon.getBoundingBox());
    Filter filter = strategy.makeFilter(args);
    IndexReader reader = DirectoryReader.open(directory);

    IndexSearcher searcher = new IndexSearcher(reader);

    TopDocs search = searcher.search(new MatchAllDocsQuery(), filter, 1000);
    Assert.assertEquals(search.totalHits, 0);

    reader.close();
    writer.close();
}

From source file:com.orientechnologies.spatial.sandbox.LuceneGeoTest.java

License:Apache License

@Test
public void geoSpeedTest() throws IOException, ParseException {

    RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(
            new GeohashPrefixTree(JtsSpatialContext.GEO, 11), "location");

    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    final RAMDirectory directory = new RAMDirectory();
    final IndexWriter writer = new IndexWriter(directory, conf);

    Shape multiPolygon = JtsSpatialContext.GEO.getWktShapeParser().parse(
            "MULTIPOLYGON (((15.520376 38.231155, 15.160243 37.444046, 15.309898 37.134219, 15.099988 36.619987, 14.335229 36.996631, 13.826733 37.104531, 12.431004 37.61295, 12.570944 38.126381, 13.741156 38.034966, 14.761249 38.143874, 15.520376 38.231155)), ((9.210012 41.209991, 9.809975 40.500009, 9.669519 39.177376, 9.214818 39.240473, 8.806936 38.906618, 8.428302 39.171847, 8.388253 40.378311, 8.159998 40.950007, 8.709991 40.899984, 9.210012 41.209991)), ((12.376485 46.767559, 13.806475 46.509306, 13.69811 46.016778, 13.93763 45.591016, 13.141606 45.736692, 12.328581 45.381778, 12.383875 44.885374, 12.261453 44.600482, 12.589237 44.091366, 13.526906 43.587727, 14.029821 42.761008, 15.14257 41.95514, 15.926191 41.961315, 16.169897 41.740295, 15.889346 41.541082, 16.785002 41.179606, 17.519169 40.877143, 18.376687 40.355625, 18.480247 40.168866, 18.293385 39.810774, 17.73838 40.277671, 16.869596 40.442235, 16.448743 39.795401, 17.17149 39.4247, 17.052841 38.902871, 16.635088 38.843572, 16.100961 37.985899, 15.684087 37.908849, 15.687963 38.214593, 15.891981 38.750942, 16.109332 38.964547, 15.718814 39.544072, 15.413613 40.048357, 14.998496 40.172949, 14.703268 40.60455, 14.060672 40.786348, 13.627985 41.188287, 12.888082 41.25309, 12.106683 41.704535, 11.191906 42.355425, 10.511948 42.931463, 10.200029 43.920007, 9.702488 44.036279, 8.888946 44.366336, 8.428561 44.231228, 7.850767 43.767148, 7.435185 43.693845, 7.549596 44.127901, 7.007562 44.254767, 6.749955 45.028518, 7.096652 45.333099, 6.802355 45.70858, 6.843593 45.991147, 7.273851 45.776948, 7.755992 45.82449, 8.31663 46.163642, 8.489952 46.005151, 8.966306 46.036932, 9.182882 46.440215, 9.922837 46.314899, 10.363378 46.483571, 10.442701 46.893546, 11.048556 46.751359, 11.164828 46.941579, 12.153088 47.115393, 12.376485 46.767559)))");

    Document doc = new Document();

    for (IndexableField f : strategy.createIndexableFields(multiPolygon)) {
        doc.add(f);/*w  w  w.  jav  a  2 s. c o  m*/
    }

    writer.addDocument(doc);
    writer.commit();

    writer.close();
}

From source file:com.orientechnologies.spatial.sandbox.LuceneGeoTest.java

License:Apache License

@Test
public void geoSpeedTestInternal() throws IOException, ParseException {

    RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(
            new GeohashPrefixTree(JtsSpatialContext.GEO, 11), "location");

    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    final RAMDirectory directory = new RAMDirectory();
    final IndexWriter writer = new IndexWriter(directory, conf);

    ODocument entries = loadMultiPolygon();

    OMultiPolygonShapeBuilder builder = new OMultiPolygonShapeBuilder();

    Shape multiPolygon = builder.fromDoc(entries);

    Document doc = new Document();

    for (IndexableField f : strategy.createIndexableFields(multiPolygon)) {
        doc.add(f);//from w  ww  .j a v  a 2 s. c  om
    }

    writer.addDocument(doc);
    writer.commit();

    writer.close();
}

From source file:com.paladin.common.LuceneHelper.java

License:Apache License

/**
 * ?//from  w  w w.ja va 2s .c  om
 * //TODO:???
 *
 * @param writer
 * @param table
 */
private static void indexTable(IndexWriter writer, String table) throws IOException {
    String sql = "SELECT ID, TITLE, CONTENT, TAG, CREATE_DATE FROM " + table.toUpperCase();

    if (table.equalsIgnoreCase("motto"))
        sql = "SELECT ID, CONTENT, TAG, CREATE_DATE FROM " + table.toUpperCase();

    List<Map<String, Object>> blogs = QueryHelper.queryList(sql);

    for (Map<String, Object> blog : blogs) {
        Document doc = new Document();
        Field id_field = new Field("id", blog.get("ID").toString(), Field.Store.YES,
                Field.Index.NOT_ANALYZED_NO_NORMS);
        // ?
        StringBuilder builder = new StringBuilder();

        if (table.equalsIgnoreCase("motto"))
            builder.append(blog.get("CONTENT"));
        else
            builder.append(blog.get("TITLE"));
        builder.append(Constants.LUCENE_FIELD_SEP);
        builder.append(blog.get("CONTENT"));
        builder.append(Constants.LUCENE_FIELD_SEP);
        builder.append(blog.get("TAG"));

        Field t_c_t_field = new Field("title_content_tag", builder.toString(), Field.Store.YES,
                Field.Index.ANALYZED);

        doc.add(id_field);
        doc.add(t_c_t_field);

        if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE)
            writer.addDocument(doc);
        else// id??
            writer.updateDocument(new Term("id", blog.get("ID").toString()), doc);
    }
}

From source file:com.paladin.sys.lucene.IndexFiles.java

License:Apache License

/**
 * Indexes the given file using the given writer, or if a directory is given,
 * recurses over files and directories found under the given directory.
 * <p/>/*from   w  w w  . j a v  a2s . c o  m*/
 * NOTE: This method indexes one document per input file.  This is slow.  For good
 * throughput, put multiple documents into your input file(s).  An example of this is
 * in the benchmark module, which can create "line doc" files, one document per line,
 * using the
 * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
 * >WriteLineDocTask</a>.
 *
 * @param writer Writer to the index where the given file/dir info will be stored
 * @param file   The file to index, or the directory to recurse into to find files to index
 * @throws IOException
 */

static void indexDocs(IndexWriter writer, File file) throws IOException {
    // do not try to index files that cannot be read
    if (!file.canRead())
        return;

    if (file.isDirectory() && file.list() != null) {
        String[] files = file.list();
        for (int i = 0; i < files.length; i++)
            indexDocs(writer, new File(file, files[i]));
    } else {
        FileInputStream fis;
        try {
            fis = new FileInputStream(file);
        } catch (FileNotFoundException e) {
            // at least on windows, some temporary files raise this exception with an "access denied" message
            // checking if the file can be read doesn't help
            return;
        }

        try {
            Document doc = new Document();
            // Add the path of the file as a field named "path".  Use a field that is indexed (i.e. searchable), but don't tokenize
            // the field into separate words and don't index term frequency or positional information:
            Field pathField = new Field("path", file.getPath(), Field.Store.YES,
                    Field.Index.NOT_ANALYZED_NO_NORMS);
            pathField.setOmitTermFreqAndPositions(true);
            doc.add(pathField);

            // Add the last modified date of the file a field named "modified". Use a NumericField that is indexed (i.e. efficiently filterable with
            // NumericRangeFilter).  This indexes to milli-second resolution, which is often too fine.  You could instead create a number based on
            // year/month/day/hour/minutes/seconds, down the resolution you require. For example the long value 2011021714 would mean
            // February 17, 2011, 2-3 PM.
            NumericField modifiedField = new NumericField("modified");
            modifiedField.setLongValue(file.lastModified());
            doc.add(modifiedField);

            // Add the contents of the file to a field named "contents".  Specify a Reader, so that the text of the file is tokenized and indexed, but not stored.
            // Note that FileReader expects the file to be in UTF-8 encoding. If that's not the case searching for special characters will fail.
            doc.add(new Field("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));

            if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
                out.println("adding " + file);
                writer.addDocument(doc);
            } else {
                out.println("updating " + file);
                writer.updateDocument(new Term("path", file.getPath()), doc);
            }
        } finally {
            fis.close();
        }
    }
}

From source file:com.parc.uplib.indexing.UpLibQueryParser.java

License:Open Source License

private static void update(File index_file, File doc_root_dir, String[] ids, int start) {

    ExtractIndexingInfo.DocumentIterator docit;
    String number;//from   ww w  .  ja  v a 2  s.c om

    remove(index_file, ids, start);

    try {

        // Now add the documents to the index
        IndexWriter writer = new IndexWriter(index_file, new StandardAnalyzer(), !index_file.exists());
        if (debug_mode)
            writer.setInfoStream(System.err);
        writer.setMaxFieldLength(Integer.MAX_VALUE);

        try {
            for (int i = start; i < ids.length; i++) {
                docit = build_document_iterator(doc_root_dir, ids[i]);
                int count = 0;
                while (docit.hasNext()) {
                    writer.addDocument((Document) (docit.next()));
                    count += 1;
                }
                System.out.println("Added " + docit.id + " (" + count + " versions)");
                System.out.flush();
            }
        } finally {
            // And close the index
            System.out.println("Optimizing...");
            // See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 about optimize
            // Can fail if low on disk space
            writer.optimize();
            writer.close();
        }

    } catch (Exception e) {
        if (debug_mode) {
            e.printStackTrace(System.err);
        } else {
            System.out.println(
                    "* Lucene search engine raised " + e.getClass() + " with message " + e.getMessage());
            System.err.println(" 'update' caught a " + e.getClass() + "\n with message: " + e.getMessage());
            System.out.flush();
        }
        System.exit(JAVA_EXCEPTION);
    }
    System.out.flush();
}