Example usage for org.apache.lucene.index DirectoryReader getIndexCommit

List of usage examples for org.apache.lucene.index DirectoryReader getIndexCommit

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader getIndexCommit.

Prototype

public abstract IndexCommit getIndexCommit() throws IOException;

Source Link

Document

Expert: return the IndexCommit that this reader has opened.

Usage

From source file:com.github.flaxsearch.api.IndexData.java

License:Apache License

public IndexData(String indexpath, ReaderManager readerManager) throws IOException {
    this.indexpath = indexpath;
    DirectoryReader reader = (DirectoryReader) readerManager.getIndexReader();
    this.generation = reader.getIndexCommit().getGeneration();
    this.numDocs = reader.numDocs();
    this.numDeletedDocs = reader.numDeletedDocs();

    segments = new ArrayList<>();
    for (LeafReaderContext ctx : readerManager.getIndexReader().leaves()) {
        segments.add(new SegmentData(ctx));
    }//from   ww w . ja  va2s  .com
}

From source file:com.github.rnewson.couchdb.lucene.DatabaseIndexer.java

License:Apache License

public void info(final HttpServletRequest req, final HttpServletResponse resp)
        throws IOException, JSONException {
    final IndexState state = getState(req, resp);
    if (state == null)
        return;/*from  w w  w .  j  a v a  2 s .c  om*/
    final DirectoryReader reader = state.borrowReader(true);
    try {
        final JSONObject result = new JSONObject();
        result.put("current", reader.isCurrent());
        result.put("disk_size", Utils.directorySize(reader.directory()));
        result.put("doc_count", reader.numDocs());
        result.put("doc_del_count", reader.numDeletedDocs());
        result.put("uuid", state.getUuid());
        result.put("digest", state.getDigest());
        result.put("update_seq", getUpdateSequence(reader.getIndexCommit().getUserData()));
        final JSONArray fields = new JSONArray();
        for (AtomicReaderContext leaf : reader.leaves()) {
            for (FieldInfo info : leaf.reader().getFieldInfos()) {
                if (info.name.startsWith("_")) {
                    continue;
                }
                if (info.isIndexed()) {
                    fields.put(info.name);
                }
            }
        }
        result.put("fields", fields);
        result.put("version", reader.getVersion());
        result.put("ref_count", reader.getRefCount());

        final JSONObject info = new JSONObject();
        info.put("code", 200);
        info.put("json", result);

        ServletUtils.setResponseContentTypeAndEncoding(req, resp);
        final Writer writer = resp.getWriter();
        try {
            writer.write(result.toString());
        } finally {
            writer.close();
        }
    } finally {
        state.returnReader(reader);
    }
}

From source file:dk.dma.msinm.lucene.AbstractLuceneIndex.java

License:Open Source License

/**
 * Returns the last updated time//from   www  .  ja va  2 s .c om
 * @return the last updated time
 */
private Date getLastUpdated() {
    try {
        DirectoryReader reader = getIndexReader();
        if (reader.getIndexCommit().getUserData().containsKey(LAST_UPDATE)) {
            return new Date(Long.valueOf(reader.getIndexCommit().getUserData().get(LAST_UPDATE)));
        }
    } catch (Exception e) {
        log.debug("Could not get last-updated flag from index reader");
    }
    return new Date(0);
}

From source file:dk.dma.msinm.lucene.CommitUserDataTest.java

License:Open Source License

@Test
public void test() throws IOException {

    File indexFolder = Files.createTempDir();
    Directory directory = FSDirectory.open(indexFolder);

    // Create an index writer
    IndexWriterConfig iwc = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
            new StandardAnalyzer(LuceneUtils.LUCENE_VERSION));
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter indexWriter = new IndexWriter(directory, iwc);

    // Write a document
    Document doc = new Document();
    doc.add(new IntField("id", 100, Field.Store.YES));
    indexWriter.addDocument(doc);//from   w ww  .java  2s .  c  o  m

    // Add user data
    Map<String, String> userData = new HashMap<>();
    userData.put("A", "B");
    indexWriter.setCommitData(userData);
    indexWriter.close();

    // Check if we can read user data
    DirectoryReader indexReader = DirectoryReader.open(FSDirectory.open(indexFolder));
    assertEquals("B", indexReader.getIndexCommit().getUserData().get("A"));

}

From source file:org.apache.blur.index.IndexDeletionPolicyReader.java

License:Apache License

public DirectoryReader register(DirectoryReader reader) throws IOException {
    final long generation = reader.getIndexCommit().getGeneration();
    register(generation);/*from  ww w  .j  a  v a 2s .  c om*/
    reader.addReaderClosedListener(new ReaderClosedListener() {
        @Override
        public void onClose(IndexReader reader) {
            unregister(generation);
        }
    });
    return reader;
}

From source file:org.apache.blur.manager.writer.SnapshotIndexDeletionPolicy.java

License:Apache License

public void createSnapshot(String name, DirectoryReader reader, String context) throws IOException {
    if (_namesToGenerations.containsKey(name)) {
        throw new IOException("Snapshot [" + name + "] already exists.");
    }// ww w .j  a  v  a  2 s .c  om
    LOG.info("Creating snapshot [{0}] in [{1}].", name, context);
    IndexCommit indexCommit = reader.getIndexCommit();
    long generation = indexCommit.getGeneration();
    _namesToGenerations.put(name, generation);
    Set<String> names = _generationsToNames.get(generation);
    if (names == null) {
        names = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
        _generationsToNames.put(generation, names);
    }
    names.add(name);
    storeGenerations();
}

From source file:org.apache.blur.store.BaseDirectoryTestSuite.java

License:Apache License

@Test
public void testCreateIndex() throws IOException {
    long s = System.nanoTime();
    IndexWriterConfig conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, new KeywordAnalyzer());
    IndexDeletionPolicyReader indexDeletionPolicy = new IndexDeletionPolicyReader(
            new KeepOnlyLastCommitDeletionPolicy());
    conf.setIndexDeletionPolicy(indexDeletionPolicy);
    FSDirectory control = FSDirectory.open(fileControl);
    Directory dir = getControlDir(control, directory);
    // The serial merge scheduler can be useful for debugging.
    // conf.setMergeScheduler(new SerialMergeScheduler());
    IndexWriter writer = new IndexWriter(dir, conf);
    int numDocs = 1000;
    DirectoryReader reader = null;
    long gen = 0;
    for (int i = 0; i < 100; i++) {
        if (reader == null) {
            reader = DirectoryReader.open(writer, true);
            gen = reader.getIndexCommit().getGeneration();
            indexDeletionPolicy.register(gen);
        } else {//  w w w.j av  a 2  s .  c o  m
            DirectoryReader old = reader;
            reader = DirectoryReader.openIfChanged(old, writer, true);
            if (reader == null) {
                reader = old;
            } else {
                long newGen = reader.getIndexCommit().getGeneration();
                indexDeletionPolicy.register(newGen);
                indexDeletionPolicy.unregister(gen);
                old.close();
                gen = newGen;
            }
        }
        assertEquals(i * numDocs, reader.numDocs());
        IndexSearcher searcher = new IndexSearcher(reader);
        NumericRangeQuery<Integer> query = NumericRangeQuery.newIntRange("id", 42, 42, true, true);
        TopDocs topDocs = searcher.search(query, 10);
        assertEquals(i, topDocs.totalHits);
        addDocuments(writer, numDocs);
    }
    writer.close(false);
    reader.close();
    long e = System.nanoTime();
    System.out.println("Total time [" + (e - s) / 1000000.0 + " ms]");
}

From source file:org.apache.solr.handler.admin.LukeRequestHandler.java

License:Apache License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();//w w  w .j a v a 2 s .  c o m
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }
    return indexInfo;
}

From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandler.java

License:Open Source License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();//  ww  w  . j av a2s  .co  m
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this
    // different then:
    // IndexReader.getCurrentVersion(
    // dir )?
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }
    return indexInfo;
}

From source file:org.apache.solr.handler.ReplicationHandler.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
    this.core = core;
    registerFileStreamResponseWriter();//ww  w  .j  a v a  2 s  .  c o m
    registerCloseHook();
    Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
    if (nbtk != null) {
        numberBackupsToKeep = Integer.parseInt(nbtk.toString());
    } else {
        numberBackupsToKeep = 0;
    }
    NamedList slave = (NamedList) initArgs.get("slave");
    boolean enableSlave = isEnabled(slave);
    if (enableSlave) {
        tempSnapPuller = snapPuller = new SnapPuller(slave, this, core);
        isSlave = true;
    }
    NamedList master = (NamedList) initArgs.get("master");
    boolean enableMaster = isEnabled(master);

    if (enableMaster || enableSlave) {
        if (core.getCoreDescriptor().getCoreContainer().getZkController() != null) {
            LOG.warn("SolrCloud is enabled for core " + core.getName()
                    + " but so is old-style replication. Make sure you"
                    + " intend this behavior, it usually indicates a mis-configuration. Master setting is "
                    + Boolean.toString(enableMaster) + " and slave setting is "
                    + Boolean.toString(enableSlave));
        }
    }

    if (!enableSlave && !enableMaster) {
        enableMaster = true;
        master = new NamedList<Object>();
    }

    if (enableMaster) {
        includeConfFiles = (String) master.get(CONF_FILES);
        if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
            List<String> files = Arrays.asList(includeConfFiles.split(","));
            for (String file : files) {
                if (file.trim().length() == 0)
                    continue;
                String[] strs = file.trim().split(":");
                // if there is an alias add it or it is null
                confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
            }
            LOG.info("Replication enabled for following config files: " + includeConfFiles);
        }
        List backup = master.getAll("backupAfter");
        boolean backupOnCommit = backup.contains("commit");
        boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
        List replicateAfter = master.getAll(REPLICATE_AFTER);
        replicateOnCommit = replicateAfter.contains("commit");
        replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");

        if (!replicateOnCommit && !replicateOnOptimize) {
            replicateOnCommit = true;
        }

        // if we only want to replicate on optimize, we need the deletion policy to
        // save the last optimized commit point.
        if (replicateOnOptimize) {
            IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
            IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
            if (policy instanceof SolrDeletionPolicy) {
                SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
                if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
                    solrPolicy.setMaxOptimizedCommitsToKeep(1);
                }
            } else {
                LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
            }
        }

        if (replicateOnOptimize || backupOnOptimize) {
            core.getUpdateHandler()
                    .registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
        }
        if (replicateOnCommit || backupOnCommit) {
            replicateOnCommit = true;
            core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
        }
        if (replicateAfter.contains("startup")) {
            replicateOnStart = true;
            RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
            try {
                DirectoryReader reader = s == null ? null : s.get().getIndexReader();
                if (reader != null && reader.getIndexCommit() != null
                        && reader.getIndexCommit().getGeneration() != 1L) {
                    try {
                        if (replicateOnOptimize) {
                            Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
                            for (IndexCommit ic : commits) {
                                if (ic.getSegmentCount() == 1) {
                                    if (indexCommitPoint == null
                                            || indexCommitPoint.getGeneration() < ic.getGeneration())
                                        indexCommitPoint = ic;
                                }
                            }
                        } else {
                            indexCommitPoint = reader.getIndexCommit();
                        }
                    } finally {
                        // We don't need to save commit points for replication, the SolrDeletionPolicy
                        // always saves the last commit point (and the last optimized commit point, if needed)
                        /***
                        if(indexCommitPoint != null){
                          core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
                        }
                        ***/
                    }
                }

                // ensure the writer is init'd so that we have a list of commit points
                RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
                iw.decref();

            } catch (IOException e) {
                LOG.warn("Unable to get IndexCommit on startup", e);
            } finally {
                if (s != null)
                    s.decref();
            }
        }
        String reserve = (String) master.get(RESERVE);
        if (reserve != null && !reserve.trim().equals("")) {
            reserveCommitDuration = SnapPuller.readInterval(reserve);
        }
        LOG.info("Commits will be reserved for  " + reserveCommitDuration);
        isMaster = true;
    }
}