Example usage for org.apache.lucene.index IndexWriterConfig setOpenMode

List of usage examples for org.apache.lucene.index IndexWriterConfig setOpenMode

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig setOpenMode.

Prototype

public IndexWriterConfig setOpenMode(OpenMode openMode) 

Source Link

Document

Specifies OpenMode of the index.

Usage

From source file:com.plug.Plug_8_5_2.java

License:Apache License

private IndexWriter getIndexWriter(boolean p_create) throws IOException {
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_44, m_analyzer);
    OpenMode om = p_create ? OpenMode.CREATE : OpenMode.CREATE_OR_APPEND;
    conf.setOpenMode(om);

    IndexWriter result = null;/*www  .java2s.c om*/
    boolean deleteFiles = false;
    try {
        result = new IndexWriter(m_fsDir, conf);
    } catch (EOFException eofe) {
        deleteFiles = true;
    } catch (IndexFormatTooOldException ie) {
        deleteFiles = true;
    }

    if (deleteFiles) {
        deleteFile(m_directory);

        result = new IndexWriter(m_fsDir, conf);
    }

    return result;
}

From source file:com.qwazr.search.bench.LuceneCommonIndex.java

License:Apache License

LuceneCommonIndex(final Path rootDirectory, final String schemaName, final String indexName,
        final double ramBufferSize, final boolean useCompoundFile) throws IOException {

    final Path schemaDirectory = Files.createDirectory(rootDirectory.resolve(schemaName));
    this.indexDirectory = Files.createDirectory(schemaDirectory.resolve(indexName));
    this.luceneDirectory = indexDirectory.resolve("data");
    this.dataDirectory = FSDirectory.open(luceneDirectory);
    final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(
            new PerFieldAnalyzerWrapper(new StandardAnalyzer()));
    indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    indexWriterConfig.setRAMBufferSizeMB(ramBufferSize);

    final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
    mergeScheduler.setMaxMergesAndThreads(MAX_SSD_MERGE_THREADS, MAX_SSD_MERGE_THREADS);
    indexWriterConfig.setMergeScheduler(mergeScheduler);
    indexWriterConfig.setUseCompoundFile(useCompoundFile);

    final TieredMergePolicy mergePolicy = new TieredMergePolicy();
    indexWriterConfig.setMergePolicy(mergePolicy);

    // We use snapshots deletion policy
    final SnapshotDeletionPolicy snapshotDeletionPolicy = new SnapshotDeletionPolicy(
            indexWriterConfig.getIndexDeletionPolicy());
    indexWriterConfig.setIndexDeletionPolicy(snapshotDeletionPolicy);

    this.indexWriter = new IndexWriter(this.dataDirectory, indexWriterConfig);
    this.localReplicator = new LocalReplicator();
}

From source file:com.qwazr.search.index.IndexInstance.java

License:Apache License

/**
 * @param schema//  w  w w  .j a v a2 s.c o m
 * @param indexDirectory
 * @return
 */
final static IndexInstance newInstance(SchemaInstance schema, File indexDirectory,
        IndexSettingsDefinition settings)
        throws ServerException, IOException, ReflectiveOperationException, InterruptedException {
    UpdatableAnalyzer indexAnalyzer = null;
    UpdatableAnalyzer queryAnalyzer = null;
    IndexWriter indexWriter = null;
    Directory dataDirectory = null;
    try {

        if (!indexDirectory.exists())
            indexDirectory.mkdir();
        if (!indexDirectory.isDirectory())
            throw new IOException(
                    "This name is not valid. No directory exists for this location: " + indexDirectory);

        FileSet fileSet = new FileSet(indexDirectory);

        //Loading the settings
        if (settings == null) {
            settings = fileSet.settingsFile.exists()
                    ? JsonMapper.MAPPER.readValue(fileSet.settingsFile, IndexSettingsDefinition.class)
                    : IndexSettingsDefinition.EMPTY;
        } else {
            JsonMapper.MAPPER.writeValue(fileSet.settingsFile, settings);
        }

        //Loading the fields
        File fieldMapFile = new File(indexDirectory, FIELDS_FILE);
        LinkedHashMap<String, FieldDefinition> fieldMap = fieldMapFile.exists()
                ? JsonMapper.MAPPER.readValue(fieldMapFile, FieldDefinition.MapStringFieldTypeRef)
                : new LinkedHashMap<>();

        //Loading the fields
        File analyzerMapFile = new File(indexDirectory, ANALYZERS_FILE);
        LinkedHashMap<String, AnalyzerDefinition> analyzerMap = analyzerMapFile.exists()
                ? JsonMapper.MAPPER.readValue(analyzerMapFile, AnalyzerDefinition.MapStringAnalyzerTypeRef)
                : new LinkedHashMap<>();

        AnalyzerContext context = new AnalyzerContext(analyzerMap, fieldMap);
        indexAnalyzer = new UpdatableAnalyzer(context, context.indexAnalyzerMap);
        queryAnalyzer = new UpdatableAnalyzer(context, context.queryAnalyzerMap);

        // Open and lock the data directory
        dataDirectory = FSDirectory.open(fileSet.dataDirectory.toPath());

        // Set
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(indexAnalyzer);
        if (settings != null && settings.similarity_class != null)
            indexWriterConfig.setSimilarity(IndexUtils.findSimilarity(settings.similarity_class));
        indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
        SnapshotDeletionPolicy snapshotDeletionPolicy = new SnapshotDeletionPolicy(
                indexWriterConfig.getIndexDeletionPolicy());
        indexWriterConfig.setIndexDeletionPolicy(snapshotDeletionPolicy);
        indexWriter = new IndexWriter(dataDirectory, indexWriterConfig);
        if (indexWriter.hasUncommittedChanges())
            indexWriter.commit();

        // Finally we build the SearchSearcherManger
        SearcherManager searcherManager = new SearcherManager(indexWriter, true, null);

        return new IndexInstance(schema, dataDirectory, settings, analyzerMap, fieldMap, fileSet, indexWriter,
                searcherManager, queryAnalyzer);
    } catch (IOException | ServerException | ReflectiveOperationException | InterruptedException e) {
        // We failed in opening the index. We close everything we can
        if (queryAnalyzer != null)
            IOUtils.closeQuietly(queryAnalyzer);
        if (indexAnalyzer != null)
            IOUtils.closeQuietly(indexAnalyzer);
        if (indexWriter != null)
            IOUtils.closeQuietly(indexWriter);
        if (dataDirectory != null)
            IOUtils.closeQuietly(dataDirectory);
        throw e;
    }
}

From source file:com.radialpoint.word2vec.lucene.IndexFiles.java

License:Open Source License

/** Index all text files under a directory. */
@SuppressWarnings("deprecation")
public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "index";
    String docsPath = null;/* ww w. j  ava 2  s.c om*/
    boolean create = true;
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final File docDir = new File(docsPath);
    if (!docDir.exists() || !docDir.canRead()) {
        System.out.println("Document directory '" + docDir.getAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(new File(indexPath));
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call forceMerge here.  This can be
        // a terribly costly operation, so generally it's only
        // worth it when your index is relatively static (ie
        // you're done adding documents to it):
        //
        // writer.forceMerge(1);

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.rapidminer.search.GlobalSearchIndexer.java

License:Open Source License

/**
 * Creates an instance of {@link IndexWriter}.
 *
 * @return the writer, never {@code null}
 * @throws IOException//from www  .j av a 2s  .  c om
 *       if something goes wrong
 */
private IndexWriter createIndexWriter() throws IOException {
    Directory dir = FSDirectory.open(indexDirectoryPath);
    IndexWriterConfig config = new IndexWriterConfig(GlobalSearchUtilities.ANALYZER);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    return new IndexWriter(dir, config);
}

From source file:com.ricky.codelab.lucene.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 * /*from w  ww.  j  a v a  2s.co m*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new StringField("ID", "10000", Field.Store.YES));
        doc.add(new TextField(fieldName, text, Field.Store.YES));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?   
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat2.java

License:Apache License

@Override
public void tearDown() throws Exception {
    iw.close();/*from  ww w .j  ava  2 s  .c o  m*/
    TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    iwc.setOpenMode(OpenMode.APPEND);
    IndexWriter iw = new IndexWriter(dir, iwc);
    iw.forceMerge(1);
    iw.close();
    dir.close(); // just force a checkindex for now
    super.tearDown();
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

public void test() throws Exception {
    Directory dir = newDirectory();/*from w  w w .  ja va 2  s.c o  m*/
    Analyzer analyzer = new Analyzer(Analyzer.PER_FIELD_REUSE_STRATEGY) {
        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer();
            if (fieldName.contains("payloadsFixed")) {
                TokenFilter filter = new MockFixedLengthPayloadFilter(new Random(0), tokenizer, 1);
                return new TokenStreamComponents(tokenizer, filter);
            } else if (fieldName.contains("payloadsVariable")) {
                TokenFilter filter = new MockVariableLengthPayloadFilter(new Random(0), tokenizer);
                return new TokenStreamComponents(tokenizer, filter);
            } else {
                return new TokenStreamComponents(tokenizer);
            }
        }
    };
    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    // TODO we could actually add more fields implemented with different PFs
    // or, just put this test into the usual rotation?
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
    Document doc = new Document();
    FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsOnlyType.setStoreTermVectors(true);
    docsOnlyType.setIndexOptions(IndexOptions.DOCS);

    FieldType docsAndFreqsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsAndFreqsType.setStoreTermVectors(true);
    docsAndFreqsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);

    FieldType positionsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn these on for a cross-check
    positionsType.setStoreTermVectors(true);
    positionsType.setStoreTermVectorPositions(true);
    positionsType.setStoreTermVectorOffsets(true);
    positionsType.setStoreTermVectorPayloads(true);
    FieldType offsetsType = new FieldType(positionsType);
    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    Field field1 = new Field("field1docs", "", docsOnlyType);
    Field field2 = new Field("field2freqs", "", docsAndFreqsType);
    Field field3 = new Field("field3positions", "", positionsType);
    Field field4 = new Field("field4offsets", "", offsetsType);
    Field field5 = new Field("field5payloadsFixed", "", positionsType);
    Field field6 = new Field("field6payloadsVariable", "", positionsType);
    Field field7 = new Field("field7payloadsFixedOffsets", "", offsetsType);
    Field field8 = new Field("field8payloadsVariableOffsets", "", offsetsType);
    doc.add(field1);
    doc.add(field2);
    doc.add(field3);
    doc.add(field4);
    doc.add(field5);
    doc.add(field6);
    doc.add(field7);
    doc.add(field8);
    for (int i = 0; i < MAXDOC; i++) {
        String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ')
                + " " + TestUtil.randomSimpleString(random());
        field1.setStringValue(stringValue);
        field2.setStringValue(stringValue);
        field3.setStringValue(stringValue);
        field4.setStringValue(stringValue);
        field5.setStringValue(stringValue);
        field6.setStringValue(stringValue);
        field7.setStringValue(stringValue);
        field8.setStringValue(stringValue);
        iw.addDocument(doc);
    }
    iw.close();
    verify(dir);
    TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
    iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    iwc.setOpenMode(OpenMode.APPEND);
    IndexWriter iw2 = new IndexWriter(dir, iwc);
    iw2.forceMerge(1);
    iw2.close();
    verify(dir);
    dir.close();
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testStressPerFieldCodec() throws IOException {
    Directory dir = newDirectory(random());
    final int docsPerRound = 97;
    int numRounds = atLeast(1);
    for (int i = 0; i < numRounds; i++) {
        int num = TestUtil.nextInt(random(), 30, 60);
        IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random()));
        config.setOpenMode(OpenMode.CREATE_OR_APPEND);
        IndexWriter writer = newWriter(dir, config);
        for (int j = 0; j < docsPerRound; j++) {
            final Document doc = new Document();
            for (int k = 0; k < num; k++) {
                FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
                customType.setTokenized(random().nextBoolean());
                customType.setOmitNorms(random().nextBoolean());
                Field field = newField("" + k, TestUtil.randomRealisticUnicodeString(random(), 128),
                        customType);/*from   w  w w. ja  v  a 2 s. c o  m*/
                doc.add(field);
            }
            writer.addDocument(doc);
        }
        if (random().nextBoolean()) {
            writer.forceMerge(1);
        }
        writer.commit();
        assertEquals((i + 1) * docsPerRound, writer.maxDoc());
        writer.close();
    }
    dir.close();
}

From source file:com.searchcode.app.service.CodeIndexer.java

License:Open Source License

/**
 * Deletes all files that belong to a repository.
 * TODO I don't think this clears anything from the facets, which it should
 *///from   w  w w. j  a  v a  2s.  c  o  m
public synchronized void deleteByReponame(String repoName) throws IOException {
    Directory dir = FSDirectory.open(Paths
            .get(Properties.getProperties().getProperty(Values.INDEXLOCATION, Values.DEFAULTINDEXLOCATION)));

    Analyzer analyzer = new CodeAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);

    IndexWriter writer = new IndexWriter(dir, iwc);

    writer.deleteDocuments(new Term(Values.REPONAME, repoName));
    writer.close();
}