Example usage for org.apache.lucene.index DirectoryReader directory

List of usage examples for org.apache.lucene.index DirectoryReader directory

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader directory.

Prototype

Directory directory

To view the source code for org.apache.lucene.index DirectoryReader directory.

Click Source Link

Document

The index directory.

Usage

From source file:com.github.rnewson.couchdb.lucene.DatabaseIndexer.java

License:Apache License

public void info(final HttpServletRequest req, final HttpServletResponse resp)
        throws IOException, JSONException {
    final IndexState state = getState(req, resp);
    if (state == null)
        return;//from   ww w  .jav  a  2  s.c o  m
    final DirectoryReader reader = state.borrowReader(true);
    try {
        final JSONObject result = new JSONObject();
        result.put("current", reader.isCurrent());
        result.put("disk_size", Utils.directorySize(reader.directory()));
        result.put("doc_count", reader.numDocs());
        result.put("doc_del_count", reader.numDeletedDocs());
        result.put("uuid", state.getUuid());
        result.put("digest", state.getDigest());
        result.put("update_seq", getUpdateSequence(reader.getIndexCommit().getUserData()));
        final JSONArray fields = new JSONArray();
        for (AtomicReaderContext leaf : reader.leaves()) {
            for (FieldInfo info : leaf.reader().getFieldInfos()) {
                if (info.name.startsWith("_")) {
                    continue;
                }
                if (info.isIndexed()) {
                    fields.put(info.name);
                }
            }
        }
        result.put("fields", fields);
        result.put("version", reader.getVersion());
        result.put("ref_count", reader.getRefCount());

        final JSONObject info = new JSONObject();
        info.put("code", 200);
        info.put("json", result);

        ServletUtils.setResponseContentTypeAndEncoding(req, resp);
        final Writer writer = resp.getWriter();
        try {
            writer.write(result.toString());
        } finally {
            writer.close();
        }
    } finally {
        state.returnReader(reader);
    }
}

From source file:com.lucid.solr.sidecar.SidecarIndexReader.java

License:Apache License

public SidecarIndexReader(SidecarIndexReaderFactory factory, DirectoryReader main,
        AtomicReader[] sidecarReaders, AtomicReader[] parallelReaders, String boostData, File sidecarDir)
        throws IOException {
    super(main.directory(), parallelReaders);
    assert assertSaneReaders(parallelReaders);
    //LOG.info("SidecarIndexReader: new " + this);
    this.factory = factory;
    this.main = main;
    this.parallelReaders = parallelReaders;
    this.sidecarReaders = sidecarReaders;
    //this.parallel = parallel;
    this.mainReaders = getSequentialSubReaders(main);
    resourcesLastModified = main.getVersion();
    this.version = resourcesLastModified;
    this.boostData = boostData;
    this.dir = main.directory();
    this.sidecarDir = sidecarDir;
}

From source file:com.lucid.solr.sidecar.SidecarIndexReaderFactory.java

License:Apache License

DirectoryReader buildParallelReader(DirectoryReader main, SolrIndexSearcher source, boolean rebuild) {
    try {/*  w  w  w .  j av  a2 s  . c  o m*/
        if (source == null) {
            throw new Exception("Source collection is missing.");
        }
        // create as a sibling path of the main index
        Directory d = main.directory();
        File primaryDir = null;
        if (d instanceof FSDirectory) {
            String path = ((FSDirectory) d).getDirectory().getPath();
            primaryDir = new File(path);
            sidecarIndex = new File(primaryDir.getParentFile(), sidecarIndexLocation);
        } else {
            String secondaryPath = System.getProperty("java.io.tmpdir") + File.separator + sidecarIndexLocation
                    + "-" + System.currentTimeMillis();
            sidecarIndex = new File(secondaryPath);
        }
        // create a new tmp dir for the secondary indexes
        File secondaryIndex = new File(sidecarIndex, System.currentTimeMillis() + "-index");
        if (rebuild) {
            safeDelete(sidecarIndex);
        }
        parallelFields.addAll(source.getFieldNames());
        parallelFields.remove("id");
        LOG.debug("building a new index");
        Directory dir = FSDirectory.open(secondaryIndex);
        if (IndexWriter.isLocked(dir)) {
            // try forcing unlock
            try {
                IndexWriter.unlock(dir);
            } catch (Exception e) {
                LOG.warn("Failed to unlock " + secondaryIndex);
            }
        }
        int[] mergeTargets;
        AtomicReader[] subReaders = SidecarIndexReader.getSequentialSubReaders(main);
        if (subReaders == null || subReaders.length == 0) {
            mergeTargets = new int[] { main.maxDoc() };
        } else {
            mergeTargets = new int[subReaders.length];
            for (int i = 0; i < subReaders.length; i++) {
                mergeTargets[i] = subReaders[i].maxDoc();
            }
        }
        Version ver = currentCore.getLatestSchema().getDefaultLuceneMatchVersion();
        IndexWriterConfig cfg = new IndexWriterConfig(ver, currentCore.getLatestSchema().getAnalyzer());
        //cfg.setInfoStream(System.err);
        cfg.setMergeScheduler(new SerialMergeScheduler());
        cfg.setMergePolicy(new SidecarMergePolicy(mergeTargets, false));
        IndexWriter iw = new IndexWriter(dir, cfg);
        LOG.info("processing " + main.maxDoc() + " docs / " + main.numDeletedDocs() + " dels in main index");
        int boostedDocs = 0;
        Bits live = MultiFields.getLiveDocs(main);

        int targetPos = 0;
        int nextTarget = mergeTargets[targetPos];
        BytesRef idRef = new BytesRef();
        for (int i = 0; i < main.maxDoc(); i++) {
            if (i == nextTarget) {
                iw.commit();
                nextTarget = nextTarget + mergeTargets[++targetPos];
            }
            if (live != null && !live.get(i)) {
                addDummy(iw); // this is required to preserve doc numbers.
                continue;
            } else {
                DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(docIdField);
                main.document(i, visitor);
                Document doc = visitor.getDocument();
                // get docId
                String id = doc.get(docIdField);
                if (id == null) {
                    LOG.debug("missing id, docNo=" + i);
                    addDummy(iw);
                    continue;
                } else {
                    // find the data, if any
                    doc = lookup(source, id, idRef, parallelFields);
                    if (doc == null) {
                        LOG.debug("missing boost data, docId=" + id);
                        addDummy(iw);
                        continue;
                    } else {
                        LOG.debug("adding boost data, docId=" + id + ", b=" + doc);
                        iw.addDocument(doc);
                        boostedDocs++;
                    }
                }
            }
        }
        iw.close();
        DirectoryReader other = DirectoryReader.open(dir);
        LOG.info("SidecarIndexReader with " + boostedDocs + " boosted documents.");
        SidecarIndexReader pr = createSidecarIndexReader(main, other, sourceCollection, secondaryIndex);
        return pr;
    } catch (Exception e) {
        LOG.warn("Unable to build parallel index: " + e.toString(), e);
        LOG.warn("Proceeding with single main index.");
        try {
            return new SidecarIndexReader(this, main, null, SidecarIndexReader.getSequentialSubReaders(main),
                    sourceCollection, null);
        } catch (Exception e1) {
            LOG.warn("Unexpected exception, returning single main index", e1);
            return main;
        }
    }
}

From source file:org.apache.blur.command.TableCopyCommand.java

License:Apache License

private Directory getDiretory(IndexReader indexReader) throws IOException {
    if (indexReader instanceof DirectoryReader) {
        DirectoryReader reader = (DirectoryReader) indexReader;
        return getStorageDir(reader.directory());
    }// w w w. jav a 2s  .c om
    throw new IOException("Reader Not DirectoryReader.");
}

From source file:org.apache.blur.mapreduce.lib.BlurInputFormatSplitCommand.java

License:Apache License

private Directory getDirectory(IndexReader indexReader) {
    DirectoryReader directoryReader = (DirectoryReader) indexReader;
    return directoryReader.directory();
}

From source file:org.apache.solr.handler.admin.LukeRequestHandler.java

License:Apache License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }/* w  w w.  ja v a2s  .c  o  m*/
    return indexInfo;
}

From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandler.java

License:Open Source License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this
    // different then:
    // IndexReader.getCurrentVersion(
    // dir )?//from w  ww. ja  v  a  2s  . c o m
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }
    return indexInfo;
}

From source file:org.apache.solr.handler.ReplicationHandler.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public void inform(SolrCore core) {
    this.core = core;
    registerFileStreamResponseWriter();//w  w w .j  a va 2  s.com
    registerCloseHook();
    Object nbtk = initArgs.get(NUMBER_BACKUPS_TO_KEEP_INIT_PARAM);
    if (nbtk != null) {
        numberBackupsToKeep = Integer.parseInt(nbtk.toString());
    } else {
        numberBackupsToKeep = 0;
    }
    NamedList slave = (NamedList) initArgs.get("slave");
    boolean enableSlave = isEnabled(slave);
    if (enableSlave) {
        tempSnapPuller = snapPuller = new SnapPuller(slave, this, core);
        isSlave = true;
    }
    NamedList master = (NamedList) initArgs.get("master");
    boolean enableMaster = isEnabled(master);

    if (enableMaster || enableSlave) {
        if (core.getCoreDescriptor().getCoreContainer().getZkController() != null) {
            LOG.warn("SolrCloud is enabled for core " + core.getName()
                    + " but so is old-style replication. Make sure you"
                    + " intend this behavior, it usually indicates a mis-configuration. Master setting is "
                    + Boolean.toString(enableMaster) + " and slave setting is "
                    + Boolean.toString(enableSlave));
        }
    }

    if (!enableSlave && !enableMaster) {
        enableMaster = true;
        master = new NamedList<Object>();
    }

    if (enableMaster) {
        includeConfFiles = (String) master.get(CONF_FILES);
        if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
            List<String> files = Arrays.asList(includeConfFiles.split(","));
            for (String file : files) {
                if (file.trim().length() == 0)
                    continue;
                String[] strs = file.trim().split(":");
                // if there is an alias add it or it is null
                confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
            }
            LOG.info("Replication enabled for following config files: " + includeConfFiles);
        }
        List backup = master.getAll("backupAfter");
        boolean backupOnCommit = backup.contains("commit");
        boolean backupOnOptimize = !backupOnCommit && backup.contains("optimize");
        List replicateAfter = master.getAll(REPLICATE_AFTER);
        replicateOnCommit = replicateAfter.contains("commit");
        replicateOnOptimize = !replicateOnCommit && replicateAfter.contains("optimize");

        if (!replicateOnCommit && !replicateOnOptimize) {
            replicateOnCommit = true;
        }

        // if we only want to replicate on optimize, we need the deletion policy to
        // save the last optimized commit point.
        if (replicateOnOptimize) {
            IndexDeletionPolicyWrapper wrapper = core.getDeletionPolicy();
            IndexDeletionPolicy policy = wrapper == null ? null : wrapper.getWrappedDeletionPolicy();
            if (policy instanceof SolrDeletionPolicy) {
                SolrDeletionPolicy solrPolicy = (SolrDeletionPolicy) policy;
                if (solrPolicy.getMaxOptimizedCommitsToKeep() < 1) {
                    solrPolicy.setMaxOptimizedCommitsToKeep(1);
                }
            } else {
                LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
            }
        }

        if (replicateOnOptimize || backupOnOptimize) {
            core.getUpdateHandler()
                    .registerOptimizeCallback(getEventListener(backupOnOptimize, replicateOnOptimize));
        }
        if (replicateOnCommit || backupOnCommit) {
            replicateOnCommit = true;
            core.getUpdateHandler().registerCommitCallback(getEventListener(backupOnCommit, replicateOnCommit));
        }
        if (replicateAfter.contains("startup")) {
            replicateOnStart = true;
            RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
            try {
                DirectoryReader reader = s == null ? null : s.get().getIndexReader();
                if (reader != null && reader.getIndexCommit() != null
                        && reader.getIndexCommit().getGeneration() != 1L) {
                    try {
                        if (replicateOnOptimize) {
                            Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
                            for (IndexCommit ic : commits) {
                                if (ic.getSegmentCount() == 1) {
                                    if (indexCommitPoint == null
                                            || indexCommitPoint.getGeneration() < ic.getGeneration())
                                        indexCommitPoint = ic;
                                }
                            }
                        } else {
                            indexCommitPoint = reader.getIndexCommit();
                        }
                    } finally {
                        // We don't need to save commit points for replication, the SolrDeletionPolicy
                        // always saves the last commit point (and the last optimized commit point, if needed)
                        /***
                        if(indexCommitPoint != null){
                          core.getDeletionPolicy().saveCommitPoint(indexCommitPoint.getGeneration());
                        }
                        ***/
                    }
                }

                // ensure the writer is init'd so that we have a list of commit points
                RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
                iw.decref();

            } catch (IOException e) {
                LOG.warn("Unable to get IndexCommit on startup", e);
            } finally {
                if (s != null)
                    s.decref();
            }
        }
        String reserve = (String) master.get(RESERVE);
        if (reserve != null && !reserve.trim().equals("")) {
            reserveCommitDuration = SnapPuller.readInterval(reserve);
        }
        LOG.info("Commits will be reserved for  " + reserveCommitDuration);
        isMaster = true;
    }
}

From source file:org.apache.solr.update.DirectUpdateHandlerTest.java

License:Apache License

@Test
public void testPrepareCommit() throws Exception {
    assertU(adoc("id", "999"));
    assertU(optimize()); // make sure there's just one segment
    assertU(commit()); // commit a second time to make sure index files aren't still referenced by the old searcher

    SolrQueryRequest sr = req();/*  w  w  w  . j  ava2  s  . c o m*/
    DirectoryReader r = sr.getSearcher().getIndexReader();
    Directory d = r.directory();

    log.info("FILES before addDoc=" + Arrays.asList(d.listAll()));
    assertU(adoc("id", "1"));

    int nFiles = d.listAll().length;
    log.info("FILES before prepareCommit=" + Arrays.asList(d.listAll()));

    updateJ("", params("prepareCommit", "true"));

    log.info("FILES after prepareCommit=" + Arrays.asList(d.listAll()));
    assertTrue(d.listAll().length > nFiles); // make sure new index files were actually written

    assertJQ(req("q", "id:1"), "/response/numFound==0");

    updateJ("", params("rollback", "true"));
    assertU(commit());

    assertJQ(req("q", "id:1"), "/response/numFound==0");

    assertU(adoc("id", "1"));
    updateJ("", params("prepareCommit", "true"));

    assertJQ(req("q", "id:1"), "/response/numFound==0");

    assertU(commit());

    assertJQ(req("q", "id:1"), "/response/numFound==1");

    sr.close();
}

From source file:org.apache.solr.update.MergeIndexesCommand.java

License:Apache License

@Override
public String toString() {
    StringBuilder sb = new StringBuilder(super.toString());
    Joiner joiner = Joiner.on(",");
    Iterable<String> directories = Iterables.transform(readers, new Function<DirectoryReader, String>() {
        public String apply(DirectoryReader reader) {
            return reader.directory().toString();
        }//from  w  w  w .java  2 s. c o  m
    });
    joiner.skipNulls().join(sb, directories);
    sb.append('}');
    return sb.toString();
}