Example usage for org.apache.lucene.index IndexWriter close

List of usage examples for org.apache.lucene.index IndexWriter close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Closes all open resources and releases the write lock.

Usage

From source file:com.soebes.supose.core.jobs.RepositoryScanJob.java

License:Open Source License

private void subexecute(JobExecutionContext context) throws Exception {
    LOGGER.info("[" + context.getJobDetail().getName() + "/" + context.getJobDetail().getFullName() + "]");
    // Ok we get the JobConfiguration information.
    JobDataMap jobDataMap = context.getJobDetail().getJobDataMap();

    // Get the Repository object which has been initialized in SuposeCLI
    // (runSchedule)
    Repository repos = (Repository) jobDataMap.get(JobDataNames.REPOSITORY);
    RepositoryConfiguration reposConfig = (RepositoryConfiguration) jobDataMap
            .get(JobDataNames.REPOSITORYCONFIGURATION);

    String baseDir = (String) jobDataMap.get(JobDataNames.BASEDIR);

    LOGGER.info("baseDir:" + baseDir + " URL: " + repos.getUrl() + " Name: " + reposConfig.getRepositoryName());

    // Read the job configuration, create it if it hasn't existed before...
    jobConfig = new RepositoryJobConfiguration(
            baseDir + File.separator + reposConfig.getRepositoryName() + ".ini", reposConfig);

    String jobIndexName = baseDir + File.separator + "index." + reposConfig.getRepositoryName();
    String resultIndexName = baseDir + File.separator + reposConfig.getResultIndex();

    LOGGER.info("Repository Revision: " + repos.getRepository().getLatestRevision()
            + " Configuration File FromRev:" + jobConfig.getConfigData().getFromrev());
    long fromRev = Long.parseLong(jobConfig.getConfigData().getFromrev());
    if (repos.getRepository().getLatestRevision() > fromRev) {

        long startRev = 0;
        if (jobConfig.isNewCreated()) {
            LOGGER.info("This is the first time we scan the repository.");
            startRev = jobConfig.getReposConfig().getFromRev();
        } else {//from  www. j a  v  a  2  s .c o  m
            LOGGER.info("This is n'th time we scan the repository.");
            startRev = fromRev + 1;
        }
        long endRev = repos.getRepository().getLatestRevision();
        scanRepos.setRepository(repos);
        scanRepos.setStartRevision(startRev);
        scanRepos.setEndRevision(endRev);
        scanRepos.setName(reposConfig.getRepositoryName());

        LOGGER.info("Scanning: startRev:" + startRev + " endRev:" + endRev);

        ScheduleInterceptor interceptor = new ScheduleInterceptor();
        scanRepos.registerScanInterceptor(interceptor);

        SchedulerLogEntryInterceptor logEntryInterceptor = new SchedulerLogEntryInterceptor();
        scanRepos.registerLogEntryInterceptor(logEntryInterceptor);

        // CLIChangeSetInterceptor changeSetInterceptor = new
        // CLIChangeSetInterceptor();
        // scanRepository.registerChangeSetInterceptor(changeSetInterceptor);

        Index index = new Index();
        // We will allways create a new index.
        index.setCreate(true);
        IndexWriter indexWriter = index.createIndexWriter(jobIndexName);

        // New revision exist 'till the last scanning...
        // scan the content
        scanRepos.scan(indexWriter);

        // The last step after scanning will be to optimize this index and
        // close it.
        try {
            indexWriter.optimize();
            indexWriter.close();
        } catch (CorruptIndexException e) {
            LOGGER.error("Corrupted index: ", e);
        } catch (IOException e) {
            LOGGER.error("IOException during closing of index: ", e);
        }

        // Merge the created index into the target index...
        IndexHelper.mergeIndex(new File(resultIndexName), new File(jobIndexName));

        // save the configuration file with the new revision numbers.
        jobConfig.getConfigData().setFromrev(Long.toString(endRev));
        // store the changed configuration items.

        LOGGER.info("Revision: FromRev:" + jobConfig.getConfigData().getFromrev() + " ToRev:"
                + jobConfig.getConfigData().getTorev());
        jobConfig.save();
    } else {
        LOGGER.info("Nothing to do, cause no changes had been made at the repository.");
        // Nothing to do, cause no new revision are existing...
    }
    LOGGER.info("RepositoryScanJob: scanning repository done...");
}

From source file:com.soebes.supose.core.lucene.LuceneTest.java

License:Open Source License

@BeforeClass
public void beforeClass() throws CorruptIndexException, LockObtainFailedException, IOException {
    Analyzer analyzer = AnalyzerFactory.createInstance();

    // To store an index on disk, use this instead:
    // Directory directory = FSDirectory.getDirectory("/tmp/testindex");
    IndexWriter iwriter = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
    iwriter.setMaxFieldLength(25000);//ww w.  j  a  v  a2 s  .  c o  m

    Document doc = new Document();
    String text = "This is the text to be indexed.";
    addUnTokenizedField(doc, FieldNames.REVISION.getValue(), NumberUtils.pad(1));
    addTokenizedField(doc, FieldNames.CONTENTS.getValue(), text);
    addUnTokenizedField(doc, FieldNames.FILENAME.getValue(), "/trunk/doc/testXML.doc");
    iwriter.addDocument(doc);

    doc = new Document();
    text = "This is different text.";
    addUnTokenizedField(doc, FieldNames.REVISION.getValue(), NumberUtils.pad(2));
    addTokenizedField(doc, FieldNames.CONTENTS.getValue(), text);
    addUnTokenizedField(doc, FieldNames.FILENAME.getValue(), "/tags/docs/XYZabc.java");
    iwriter.addDocument(doc);

    doc = new Document();
    text = "This is more different text.";
    addUnTokenizedField(doc, FieldNames.REVISION.getValue(), NumberUtils.pad(3));
    addTokenizedField(doc, FieldNames.CONTENTS.getValue(), text);
    addUnTokenizedField(doc, FieldNames.FILENAME.getValue(), "/tags/docs/SCMPlan.doc");
    iwriter.addDocument(doc);

    doc = new Document();
    text = "This is the third text.";
    addUnTokenizedField(doc, FieldNames.REVISION.getValue(), NumberUtils.pad(4));
    addTokenizedField(doc, FieldNames.CONTENTS.getValue(), text);
    addUnTokenizedField(doc, FieldNames.FILENAME.getValue(), "/trunk/subdir/elviraXML.doc");
    iwriter.addDocument(doc);

    iwriter.optimize();
    iwriter.close();

    isearcher = new IndexSearcher(directory);
}

From source file:com.soebes.supose.core.scan.IndexMergeTest.java

License:Open Source License

public void testIndex1() throws Exception {
    Index index = new Index();
    IndexWriter indexWriter = index.createIndexWriter("index1");
    Document doc = new Document();
    addTokenizedField(doc, "revision", "1");
    addTokenizedField(doc, "revision", "2");
    indexWriter.addDocument(doc);//from ww w . ja va  2  s .c om
    indexWriter.close();
}

From source file:com.soebes.supose.core.scan.IndexMergeTest.java

License:Open Source License

public void testIndex2() throws Exception {
    Index index = new Index();
    IndexWriter indexWriter = index.createIndexWriter("index2");
    Document doc = new Document();
    addTokenizedField(doc, "revision", "3");
    addTokenizedField(doc, "revision", "4");
    indexWriter.addDocument(doc);/*from  ww  w  . ja v a 2 s.  c  om*/
    indexWriter.close();
}

From source file:com.soebes.supose.core.scan.IndexMergeTest.java

License:Open Source License

@Test(dependsOnMethods = { "testIndex1", "testIndex2" })
public void testMergeIndexes() throws Exception {
    Index index = new Index();

    IndexWriter indexWriter = index.createIndexWriter("result");

    FSDirectory fsDirs[] = { FSDirectory.getDirectory("index1"), FSDirectory.getDirectory("index2") };

    indexWriter.addIndexesNoOptimize(fsDirs);
    indexWriter.optimize();/*ww  w .  jav  a  2  s  .  c om*/
    indexWriter.close();
}

From source file:com.soebes.supose.core.scan.ScanSingleRepository.java

License:Open Source License

/**
 * @param fromRev// www .  j a  v a 2 s .com
 * @param toRev
 * @param indexDirectory
 * @param create
 * @param repository
 */
public static void scanSingleRepos(ScanRepository scanRepository, String indexDirectory, boolean create) {
    // BLOCK ANFANG

    Index index = new Index();
    //We will create a new one if --create is given on command line
    //otherwise we will append to the existing index.
    Analyzer analyzer = AnalyzerFactory.createInstance();
    index.setAnalyzer(analyzer);

    index.setCreate(create);
    IndexWriter indexWriter = index.createIndexWriter(indexDirectory);

    try {
        LOGGER.info("Scanning started.");
        scanRepository.scan(indexWriter);
        LOGGER.info("Scanning ready.");
        try {
            long startTime = System.currentTimeMillis();
            LOGGER.info("Index optimizing started.");
            indexWriter.optimize();
            indexWriter.close();
            long stopTime = System.currentTimeMillis();
            LOGGER.info("Index optimizing done.");
            long ms = (stopTime - startTime);
            long seconds = ms / 1000;
            LOGGER.info("The Index optimizing has taken " + seconds + " seconds.");
        } catch (CorruptIndexException e) {
            LOGGER.error("CorruptIndexException: Error during optimization of index: ", e);
        } catch (IOException e) {
            LOGGER.error("IOException: Error during optimization of index: ", e);
        }
    } catch (SVNAuthenticationException svnae) {
        LOGGER.error("Authentication has failed. ", svnae);
    } catch (Exception e) {
        LOGGER.error("Something unexpected went wrong ", e);
    }
}

From source file:com.spike.text.lucene.util.LuceneTestBookIndexingUtil.java

License:Apache License

public static void main(String[] args) throws IOException {
    String dataDir = LuceneAppConstants.BOOK_DATA_DIR;
    String indexDir = LuceneAppConstants.BOOK_INDEX_DIR;

    List<File> results = new ArrayList<File>();
    findFiles(results, new File(dataDir));
    System.out.println(results.size() + " books to index");
    Directory directory = FSDirectory.open(Paths.get(indexDir));

    IndexWriterConfig config = new IndexWriterConfig(new MyStandardAnalyzer());
    config.setCommitOnClose(true);/*from  w  w  w.j a v  a2 s. com*/
    IndexWriter indexWriter = new IndexWriter(directory, config);

    for (File file : results) {
        Document document = getDocument(dataDir, file);
        indexWriter.addDocument(document);
    }

    indexWriter.close();
    directory.close();
}

From source file:com.study.lucene.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "index";
    String docsPath = null;//  ww  w  .  j  a v  a  2 s .co m
    boolean create = true;
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final Path docDir = Paths.get(docsPath);
    if (!Files.isReadable(docDir)) {
        System.out.println("Document directory '" + docDir.toAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(Paths.get(indexPath));
        Analyzer analyzer = new StandardAnalyzer();
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
        iwc.setInfoStream(System.out);
        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);
        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call forceMerge here.  This can be
        // a terribly costly operation, so generally it's only
        // worth it when your index is relatively static (ie
        // you're done adding documents to it):
        //
        // writer.forceMerge(1);

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.sun.javaee.blueprints.carstore.search.UpdateIndex.java

License:Berkeley License

public void updateDocTag(String indexFile, String sxTagField, String tagString, String sxDocId, String type)
        throws IOException {
    if (bDebug)//from  w w w . j av  a2  s .com
        System.out.println("Tagging document:" + sxDocId + " with \"" + sxTagField + " - " + tagString + "\"");
    Document doc = deleteIndex(indexFile, sxDocId);

    /*
    // get document to update, so data can be added
    SearchIndex si=new SearchIndex();
    si.query(indexFile, sxDocId, "uid");
            
    Hits hits=si.getHitsNative();
    // should only have one return
    if(hits.length() > 1) {
    // exception, should only be one
       throw new IllegalStateException("Should only have one document in index with uid=" + sxDocId);
    }
            
    Document doc=(Document)hits.doc(0);
    if(bDebug) System.out.println("HAVE DOC " + doc);
            
    // Read index and delete targeted doc through a term
    IndexReader reader=IndexReader.open(indexFile);
    // delete document by term
    int del=reader.deleteDocuments(new Term("uid", sxDocId));
    if(bDebug) {
    System.out.println("return Number of items deleted:"  + del);
    int deleted=0;
    for(int ii=0; ii < reader.numDocs(); ii++) {
        if(reader.isDeleted(ii)) {
            deleted++;
        }
    }
    if(bDebug) System.out.println("Number of deleted items in the whole index:" + deleted);
    }
    reader.close();
    */

    // update document with tag information or add to tag that exists
    // NOTE: The tag information should be persisted in another place, 
    // incase indexes need to be rebuilt
    Field field = doc.getField(sxTagField);
    if (field == null) {
        // create new tag field
        field = new Field(sxTagField, tagString, Field.Store.YES, Field.Index.TOKENIZED);
    } else {
        if (type.equals(APPEND_FIELD)) {
            // get existing field and append new tag with space
            tagString = field.stringValue() + " " + tagString;
        }
        doc.removeField(sxTagField);
        field = new Field(sxTagField, tagString, Field.Store.YES, Field.Index.TOKENIZED);
    }

    doc.add(field);
    if (bDebug)
        System.out.println("Added field \n" + field + " doc to index = \n" + doc);
    // open writer to re-add document (no update in Lucene)
    Analyzer analyzer = new StandardAnalyzer();
    IndexWriter writer = new IndexWriter(indexFile, analyzer, false);
    if (bDebug)
        System.out.println("Before optimize = " + writer.docCount());
    writer.optimize();
    if (bDebug)
        System.out.println("Before add = " + writer.docCount());
    writer.addDocument(doc);
    if (bDebug)
        System.out.println("after add = " + writer.docCount());
    writer.close();
}

From source file:com.sun.socialsite.business.impl.LuceneSearchManagerImpl.java

License:Open Source License

public void initialize() throws InitializationException {

    String s = Config.getProperty("socialsite.search.writer.timeout");
    if (s != null)
        IndexWriter.setDefaultWriteLockTimeout(Long.parseLong(s));

    indexDir = new File(Config.getProperty("socialsite.search.index.path"));
    boolean needNewIndex = false;
    if (!indexDir.exists()) {
        log.info("Creating new indexDir: " + indexDir.getAbsolutePath());
        indexDir.mkdir();//w w  w.j  av a  2 s  .  c o  m
        needNewIndex = true;
    } else if (indexDir.list().length == 0) {
        log.info("Populating empty indexDir: " + indexDir.getAbsolutePath());
        needNewIndex = true;
    } else {
        log.info("Using existing indexDir: " + indexDir.getAbsolutePath());
    }

    IndexWriter writer = null;
    try {
        writer = new IndexWriter(indexDir, new StandardAnalyzer(), needNewIndex);
    } catch (CorruptIndexException ex) {
        throw new InitializationException("Corrupt search index", ex);
    } catch (IOException ex) {
        throw new InitializationException("Failed to create search index", ex);
    } finally {
        if (writer != null)
            try {
                writer.close();
            } catch (Exception e) {
            }
        ;
    }

    int frequency = Config.getIntProperty("socialsite.search.indexer.pass.frequency");
    int itemsPerPass = Config.getIntProperty("socialsite.search.indexer.pass.items");

    // If creating a new index, make sure it's populated
    if ((needNewIndex) && (itemsPerPass > 0)) {
        log.info("Starting one-time foreground indexer");
        BulkIndexer tempIndexer = new BulkIndexer(this);
        tempIndexer.start();
        tempIndexer.run();
        tempIndexer.stop();
    }

    if ((frequency > 0) && (itemsPerPass > 0)) {
        log.trace("Starting ongoing background indexer");
        backgroundIndexer = new BulkIndexer(this);
        scheduler.scheduleWithFixedDelay(backgroundIndexer, 0, frequency, TimeUnit.SECONDS);
    }
}