Example usage for org.apache.lucene.index IndexWriterConfig setOpenMode

List of usage examples for org.apache.lucene.index IndexWriterConfig setOpenMode

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig setOpenMode.

Prototype

public IndexWriterConfig setOpenMode(OpenMode openMode) 

Source Link

Document

Specifies OpenMode of the index.

Usage

From source file:di.uniba.it.nlpita.index.BuildSeoDwarfIndex.java

public void start() throws Exception {
    long t = System.currentTimeMillis();
    //loadBasicTypesMapping();
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadTriples();/*from  w w w  . j  ava 2  s  . c o  m*/
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadPropertyLabels();
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadClassLabels();
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadClassHierarchy();
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadEntityLabels();
    System.out.println(System.currentTimeMillis() - t + " ms.");
    t = System.currentTimeMillis();
    loadEntityClasses();
    System.out.println(System.currentTimeMillis() - t + " ms.");

    t = System.currentTimeMillis();
    entityIdFromUriWithPrefix = null;
    classIdFromUri = null;
    propertyIdFromUri = null;
    System.gc();
    processTriples();
    System.out.println(System.currentTimeMillis() - t + " ms.");

    t = System.currentTimeMillis();
    HashMap<String, Analyzer> analyzerMap = new HashMap<>();
    analyzerMap.put("label", new EnglishAnalyzer(CharArraySet.EMPTY_SET));
    analyzerMap.put("id", new WhitespaceAnalyzer());
    analyzerMap.put("type", new WhitespaceAnalyzer());
    analyzerMap.put("domainOfProperty", new WhitespaceAnalyzer());
    analyzerMap.put("rangeOfProperty", new WhitespaceAnalyzer());
    analyzerMap.put("propertyDomain", new WhitespaceAnalyzer());
    Analyzer analyzer = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), analyzerMap);
    HashMap<Integer, IndexedToken> elements = new HashMap<>();
    try (FSDirectory directory = FSDirectory.open(Paths.get(basePathOutput + "lucene"))) {
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
        iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        try (IndexWriter writer = new IndexWriter(directory, iwc)) {
            System.out.println("Indexing entities");
            indexEntities(writer, elements);
            System.out.println(System.currentTimeMillis() - t);
            t = System.currentTimeMillis();
            System.out.println("Indexing classes");
            indexClasses(writer, elements);
            System.out.println(System.currentTimeMillis() - t);
            t = System.currentTimeMillis();
            System.out.println("Indexing propertys");
            indexProperties(writer, elements);
            System.out.println(System.currentTimeMillis() - t);
            t = System.currentTimeMillis();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    //save elements to file
    System.out.println("Creating the trie");
    Trie trie = new Trie();

    int c = 0;
    for (IndexedToken it : elements.values()) {
        trie.add(it.getText());
        c++;
        if (c % 100000 == 0) {
            System.out.println(c + " elements added to the trie");
        }
    }
    System.out.println(c + " elements added to the trie");
    c = 0;
    for (IndexedToken it : elements.values()) {
        String suffix = trie.getOneSuffix(it.getText());
        if (suffix != null) {
            it.setPrefix(true);
            c++;
        }
    }
    System.out.println(c + " are prefix of another element");
    System.out.println("Serializing the tokens");
    try (ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(basePathOutput + "elements"))) {
        oos.writeObject(elements);
        oos.writeInt(IndexedToken.counter);
    }
}

From source file:di.uniba.it.tee2.index.TemporalEventIndexing.java

License:Open Source License

/**
 * @param lang//  w ww .  jav a2 s.c o m
 * @param mainDir
 * @throws IOException
 *
 */
public void init(String lang, String mainDir) throws IOException {
    tempExtractor = new TemporalExtractor(lang);
    tempExtractor.init();
    time_index = FSDirectory.open(new File(mainDir + "/time"));
    doc_index = FSDirectory.open(new File(mainDir + "/doc"));
    docrep_index = FSDirectory.open(new File(mainDir + "/repo"));
    switch (lang) {
    case "italian":
        analyzer = new ItalianNoStemAnalyzer(Version.LUCENE_48);
        break;
    case "english":
        analyzer = new EnglishNoStemAnalyzer(Version.LUCENE_48);
        break;
    default:
        analyzer = new StandardAnalyzer(Version.LUCENE_48);
        break;
    }
    IndexWriterConfig configTime = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configTime.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    time_writer = new IndexWriter(time_index, configTime);
    IndexWriterConfig configDoc = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configDoc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    doc_writer = new IndexWriter(doc_index, configDoc);
    IndexWriterConfig configDocRep = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configDoc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    docrep_writer = new IndexWriter(docrep_index, configDocRep);

}

From source file:di.uniba.it.tee2.index.TemporalEventIndexingTS.java

License:Open Source License

/**
 * @param lang/*ww  w. j av a  2  s.com*/
 * @param mainDir
 * @throws IOException
 *
 */
public void init(String lang, String mainDir) throws IOException {
    //tempExtractor = new TemporalExtractor(lang);
    //tempExtractor.init();
    this.lang = lang;
    time_index = FSDirectory.open(new File(mainDir + "/time"));
    doc_index = FSDirectory.open(new File(mainDir + "/doc"));
    docrep_index = FSDirectory.open(new File(mainDir + "/repo"));
    switch (lang) {
    case "italian":
        analyzer = new ItalianNoStemAnalyzer(Version.LUCENE_48);
        break;
    case "english":
        analyzer = new EnglishNoStemAnalyzer(Version.LUCENE_48);
        break;
    default:
        analyzer = new StandardAnalyzer(Version.LUCENE_48);
        break;
    }
    IndexWriterConfig configTime = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configTime.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    time_writer = new IndexWriter(time_index, configTime);
    IndexWriterConfig configDoc = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configDoc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    doc_writer = new IndexWriter(doc_index, configDoc);
    IndexWriterConfig configDocRep = new IndexWriterConfig(Version.LUCENE_48, analyzer);
    configDoc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    docrep_writer = new IndexWriter(docrep_index, configDocRep);

}

From source file:dk.defxws.fgslucene.IndexWriterCache.java

License:Open Source License

/**
 * get IndexWriter for given indexPath and write it into cache.
 * /*from w  w  w. jav  a2 s .  c om*/
 * @param indexName
 *            name of index to open.
 * @param config
 *            gsearch config-Object.
 * @throws GenericSearchException
 *             e
 */
private IndexWriter getIndexWriter(final String indexName, final boolean create, final Config config)
        throws GenericSearchException {
    if (indexWriters.get(indexName) == null) {
        IndexWriter iw = null;
        try {
            IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Constants.LUCENE_VERSION,
                    getAnalyzer(config.getAnalyzer(indexName)));
            if (create) {
                indexWriterConfig.setOpenMode(OpenMode.CREATE);
            } else {
                indexWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
            }
            if (config.getMaxBufferedDocs(indexName) > 1) {
                indexWriterConfig.setMaxBufferedDocs(config.getMaxBufferedDocs(indexName));
            }
            if (config.getRamBufferSizeMb(indexName) > 1) {
                indexWriterConfig.setRAMBufferSizeMB(config.getRamBufferSizeMb(indexName));
            }

            if (config.getMergeFactor(indexName) > 1 || config.getMaxMergeDocs(indexName) > 1
                    || config.getMaxMergeMb(indexName) > 1) {
                LogByteSizeMergePolicy logMergePolicy = new LogByteSizeMergePolicy();
                if (config.getMergeFactor(indexName) > 1) {
                    logMergePolicy.setMergeFactor(config.getMergeFactor(indexName));
                }
                if (config.getMaxMergeDocs(indexName) > 1) {
                    logMergePolicy.setMaxMergeDocs(config.getMaxMergeDocs(indexName));
                }
                if (config.getMaxMergeMb(indexName) > 1) {
                    logMergePolicy.setMaxMergeMB(config.getMaxMergeMb(indexName));
                }
                indexWriterConfig.setMergePolicy(logMergePolicy);
            }
            if (config.getDefaultWriteLockTimeout(indexName) > 1) {
                indexWriterConfig.setWriteLockTimeout(config.getDefaultWriteLockTimeout(indexName));
            }
            if (config.getLuceneDirectoryImplementation(indexName) != null) {
                // Initialize IndexWriter with configured FSDirectory
                FSDirectory directory = getDirectoryImplementation(
                        config.getLuceneDirectoryImplementation(indexName),
                        new File(config.getIndexDir(indexName)));
                iw = new IndexWriter(directory, indexWriterConfig);
            } else {
                // Initialize IndexWriter with default FSDirectory
                iw = new IndexWriter(FSDirectory.open(new File(config.getIndexDir(indexName))),
                        indexWriterConfig);
            }
            if (config.getMaxChunkSize(indexName) > 1) {
                if (iw.getDirectory() instanceof MMapDirectory) {
                    ((MMapDirectory) iw.getDirectory()).setMaxChunkSize(config.getMaxChunkSize(indexName));
                }
            }
        } catch (Exception e) {
            iw = null;
            throw new GenericSearchException(
                    "IndexWriter new error, creating index indexName=" + indexName + " :\n", e);
        }
        indexWriters.put(indexName, iw);
        if (logger.isDebugEnabled())
            logger.debug("getIndexWriter put to map " + iw);
        return iw;
    }
    return indexWriters.get(indexName);
}

From source file:dk.dma.msinm.lucene.AbstractLuceneIndex.java

License:Open Source License

/**
 * Creates and returns a Lucene writer// w  w w.ja v  a 2s  .  c  o  m
 */
public IndexWriter getNewWriter() throws IOException {

    StandardAnalyzer analyzer = new StandardAnalyzer(LuceneUtils.LUCENE_VERSION);
    IndexWriterConfig iwc = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION, analyzer);
    // Add new documents to an existing index:
    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);

    Path indexFolder = getIndexFolder();
    try {
        Directory dir = FSDirectory.open(indexFolder.toFile());
        return new IndexWriter(dir, iwc);
    } catch (IOException ex) {
        log.error("Failed to create Customer Lucene Index in folder " + indexFolder, ex);
        throw ex;
    }
}

From source file:dk.dma.msinm.lucene.CommitUserDataTest.java

License:Open Source License

@Test
public void test() throws IOException {

    File indexFolder = Files.createTempDir();
    Directory directory = FSDirectory.open(indexFolder);

    // Create an index writer
    IndexWriterConfig iwc = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
            new StandardAnalyzer(LuceneUtils.LUCENE_VERSION));
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter indexWriter = new IndexWriter(directory, iwc);

    // Write a document
    Document doc = new Document();
    doc.add(new IntField("id", 100, Field.Store.YES));
    indexWriter.addDocument(doc);//from  w ww.  j a  v a 2 s.c  o m

    // Add user data
    Map<String, String> userData = new HashMap<>();
    userData.put("A", "B");
    indexWriter.setCommitData(userData);
    indexWriter.close();

    // Check if we can read user data
    DirectoryReader indexReader = DirectoryReader.open(FSDirectory.open(indexFolder));
    assertEquals("B", indexReader.getIndexCommit().getUserData().get("A"));

}

From source file:Dl4j.Doc2VecWithAutoEncoder.java

public static void main(String[] args) throws FileNotFoundException, IOException {

    if (args.length < 1) {
        args = new String[1];
        args[0] = "/home/procheta/NetBeansProjects/Dl4jTest/src/dl4jtest/init.properties";
    }/*  w  ww .  j  a  va 2  s.c om*/
    String[] docs = { "The cat sat on the mat", "The dog sat on the mat", "The chicken ate the corn",
            "The corn was sweet", "The milk was sweet", "The dog sat on the mat", "The cat drank the milk",
            "The dog ate the bone" };

    try {
        Properties prop = new Properties();
        prop.load(new FileReader(args[0]));
        LuceneDocFetcher luceneDocFetcher;

        // test loading a simple collection of docs...
        // Create in-memory index
        RAMDirectory ramdir = new RAMDirectory();

        IndexWriterConfig iwcfg = new IndexWriterConfig(new EnglishAnalyzer());
        iwcfg.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(ramdir, iwcfg);
        for (String doc : docs) {
            try {
                Document lDoc = new Document();
                lDoc.add(new Field(LuceneDocFetcher.CONTENET_FIELD_NAME, doc, Field.Store.NO,
                        Field.Index.ANALYZED, Field.TermVector.YES));
                writer.addDocument(lDoc);
            } catch (Exception e) {
            }
        }
        writer.close();
        Path path = Paths.get(prop.getProperty("index"));
        Directory dir = FSDirectory.open(path);

        Doc2VecWithAutoEncoder dva = new Doc2VecWithAutoEncoder();
        System.out.println(prop.getProperty("depth"));
        ArrayList<String> docIds;
        dva.getDocIds(prop.getProperty("qid"), prop.getProperty("qrel"));
        //   docIds = dva.subsample(Integer.parseInt(prop.getProperty("depth")), prop.getProperty("fileList"), prop.getProperty("qid"), prop.getProperty("folderPath"));
        //  dva.saveSampleDocId(docIds, prop.getProperty("sampleOutput"));
        // pass the in-mem index reader to the vectorizer
        //  luceneDocFetcher = new LuceneDocFetcher(dir, dva.docIds);
        luceneDocFetcher = new LuceneDocFetcher(dir, dva.docIds, dva.labels);

        DataSetIterator iter = new BaseDatasetIterator(1, 50, luceneDocFetcher);
        while (iter.hasNext()) {
            DataSet v = iter.next();

            System.out.println(v.getFeatures());
        }

        // test auto-encoding
        final int vocabSize = luceneDocFetcher.getDimension();
        //int seed = Random.nextInt(vocabSize);
        int iterations = 2;
        int listenerFreq = iterations / 5;

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                //.seed(seed)
                .iterations(iterations).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .list(2)
                .layer(0,
                        new RBM.Builder().nIn(vocabSize).nOut(5)
                                .lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                .layer(1,
                        new RBM.Builder().nIn(5).nOut(10).lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                                .build())
                //.pretrain(true)
                //.backprop(true)

                //.layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                //.layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                //.layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) 

                /*
                 //encoding stops
                 .layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())    
                        
                 //decoding starts
                 .layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                 .layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                 .layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
                 .layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT).nIn(1000).nOut(vocabSize).build())
                 .pretrain(true).backprop(true)
                 */
                .build();

        MultiLayerNetwork model = new MultiLayerNetwork(conf);
        model.init();

        model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
        model.fit(iter);

        System.out.println("Output layer: ");
        iter.reset();
        while (iter.hasNext()) {
            DataSet v = iter.next();

            // System.out.println(model.output(v.getFeatures()));
        }
        //++Procheta
        iter.reset();
        dva.saveModel(iter, prop.getProperty("output"), model);//*/
    } catch (Exception ex) {
        ex.printStackTrace();
    }

}

From source file:edu.albany.ir.example.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "index";
    String docsPath = "TREC";
    boolean create = true;
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];/*from   www. j  a va 2  s  .  c o  m*/
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final File docDir = new File(docsPath);
    if (!docDir.exists() || !docDir.canRead()) {
        System.out.println("Document directory '" + docDir.getAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(new File(indexPath));
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_31, analyzer);
        create = true;

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer. But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call optimize here. This can be
        // a costly operation, so generally it's only worth
        // it when your index is relatively static (ie you're
        // done adding documents to it):
        //
        // writer.optimize();

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:edu.cmu.cs.in.hoop.hoops.save.HoopDocumentIndexer.java

License:Open Source License

/**
 * /*ww  w.j  av a2s  .  c  om*/
 */
private void initSearch() {
    debug("initSearch ()");

    if (searchStore.isEmpty() == true) {
        searchStore = getProjectPath() + "/system/search";

        File checker = new File(searchStore);

        if (checker.exists() == false) {
            if (HoopLink.fManager.createDirectory(searchStore) == false) {
                debug("Error creating search directory: " + searchStore);
                return;
            }
        } else
            debug("Document search directory exists, excellent");
    }

    debug("Indexing to directory '" + searchStore + "'...");

    if (writer == null) {
        Directory dir = null;

        try {
            dir = FSDirectory.open(new File(searchStore));
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
            return;
        }

        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        try {
            writer = new IndexWriter(dir, iwc);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
            writer = null; // Disable
        }
    }
}

From source file:edu.cmu.cs.in.search.HoopLuceneIndex.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "index";
    String docsPath = null;//  ww  w  .  ja v a2s.c  om
    boolean create = true;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final File docDir = new File(docsPath);

    if (!docDir.exists() || !docDir.canRead()) {
        System.out.println("Document directory '" + docDir.getAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();

    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(new File(indexPath));
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call forceMerge here.  This can be
        // a terribly costly operation, so generally it's only
        // worth it when your index is relatively static (ie
        // you're done adding documents to it):
        //
        // writer.forceMerge(1);

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");
    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}