Example usage for org.apache.lucene.index SegmentReader getSegmentName

List of usage examples for org.apache.lucene.index SegmentReader getSegmentName

Introduction

In this page you can find the example usage for org.apache.lucene.index SegmentReader getSegmentName.

Prototype

public String getSegmentName() 

Source Link

Document

Return the name of the segment this reader is reading.

Usage

From source file:cn.hbu.cs.esearch.document.UIDFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(AtomicReaderContext ctx, Bits acceptDocs) throws IOException {
    SegmentReader reader = (SegmentReader) (ctx.reader());
    int idx = -1;
    for (int i = 0; i < subReaders.length; ++i) {
        if (subReaders[i].getSegmentName().equals(reader.getSegmentName())) {
            idx = i;//www .jav  a2 s.  c  o m
            break;
        }
    }
    if (idx == -1) {
        throw new IOException("Can't find sub-reader");
    }
    return new UIDDocIdSet(filteredIDs, subReaders[idx].getDocIDMapper());
}

From source file:com.browseengine.bobo.geosearch.index.impl.GeoIndexReader.java

License:Apache License

private List<GeoSegmentReader<CartesianGeoRecord>> buildGeoSegmentReaders(GeoSearchConfig geoSearchConfig)
        throws IOException {
    geoSegmentReaders = new ArrayList<GeoSegmentReader<CartesianGeoRecord>>();
    if (subGeoReaders == null || subGeoReaders.size() == 0) {
        if (in instanceof SegmentReader) {
            SegmentReader segmentReader = (SegmentReader) in;
            int maxDoc = segmentReader.maxDoc();
            String segmentName = segmentReader.getSegmentName();
            String geoSegmentName = geoSearchConfig.getGeoFileName(segmentName);
            GeoSegmentReader<CartesianGeoRecord> geoSegmentReader = new GeoSegmentReader<CartesianGeoRecord>(
                    directory(), geoSegmentName, maxDoc, DEFAULT_BUFFER_SIZE_PER_SEGMENT, geoRecordSerializer,
                    geoRecordComparator);
            geoSegmentReaders.add(geoSegmentReader);
        }//from  ww  w .j  a  va2  s  .c  o m
    } else {
        for (GeoIndexReader subReader : subGeoReaders) {
            for (GeoSegmentReader<CartesianGeoRecord> geoSegmentReader : subReader.getGeoSegmentReaders()) {
                geoSegmentReaders.add(geoSegmentReader);
            }
        }
    }

    return geoSegmentReaders;
}

From source file:com.browseengine.bobo.geosearch.merge.impl.BufferedGeoMerger.java

License:Apache License

@Override
//TODO:  Handle more frequent checkAborts
public void merge(IGeoMergeInfo geoMergeInfo, GeoSearchConfig config) throws IOException {
    IGeoConverter geoConverter = config.getGeoConverter();
    int bufferSizePerGeoReader = config.getBufferSizePerGeoSegmentReader();

    Directory directory = geoMergeInfo.getDirectory();
    List<SegmentReader> readers = geoMergeInfo.getReaders();
    List<SegmentInfo> segments = geoMergeInfo.getSegmentsToMerge();

    List<BTree<CartesianGeoRecord>> mergeInputBTrees = new ArrayList<BTree<CartesianGeoRecord>>(
            segments.size());// w w  w.  jav a  2s  . c  o  m
    List<BitVector> deletedDocsList = new ArrayList<BitVector>(segments.size());
    boolean success = false;
    try {
        assert (readers.size() == segments.size());

        IFieldNameFilterConverter fieldNameFilterConverter = config.getGeoConverter()
                .makeFieldNameFilterConverter();

        boolean hasFieldNameFilterConverter = false;
        for (SegmentReader reader : readers) {
            String geoFileName = config.getGeoFileName(reader.getSegmentName());

            BTree<CartesianGeoRecord> segmentBTree = getInputBTree(directory, geoFileName,
                    bufferSizePerGeoReader);
            mergeInputBTrees.add(segmentBTree);

            BitVector deletedDocs = buildDeletedDocsForSegment(reader);
            deletedDocsList.add(deletedDocs);

            //just take the first fieldNameFilterConverter for now.  Don't worry about merging them.
            if (!hasFieldNameFilterConverter) {
                hasFieldNameFilterConverter = loadFieldNameFilterConverter(directory, geoFileName,
                        fieldNameFilterConverter);
            }
        }

        if (!hasFieldNameFilterConverter) {
            // we are merging a bunch of segments, none of which have a corresponding .geo file
            // so there is nothing to do, it is okay if the outcome of this merge continues to 
            // not have a .geo file.
            LOGGER.warn("nothing to do during geo merge, no .geo files found for segments");
            success = true;
            return;
        }

        int newSegmentSize = calculateMergedSegmentSize(deletedDocsList, mergeInputBTrees, geoConverter);

        buildMergedSegment(mergeInputBTrees, deletedDocsList, newSegmentSize, geoMergeInfo, config,
                fieldNameFilterConverter);
        success = true;

    } finally {
        // see https://issues.apache.org/jira/browse/LUCENE-3405
        if (success) {
            IOUtils.close(mergeInputBTrees);
        } else {
            IOUtils.closeWhileHandlingException(mergeInputBTrees);
        }
    }
}

From source file:org.apache.blur.filter.FilterCache.java

License:Apache License

@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    AtomicReader reader = context.reader();
    Object key = reader.getCoreCacheKey();
    DocIdSet docIdSet = _cache.get(key);
    if (docIdSet != null) {
        _hits.incrementAndGet();/*w  ww .  jav  a 2 s  . c  o m*/
        return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
    }
    // This will only allow a single instance be created per reader per filter
    Object lock = getLock(key);
    synchronized (lock) {
        SegmentReader segmentReader = getSegmentReader(reader);
        if (segmentReader == null) {
            LOG.warn("Could not find SegmentReader from [{0}]", reader);
            return _filter.getDocIdSet(context, acceptDocs);
        }
        Directory directory = getDirectory(segmentReader);
        if (directory == null) {
            LOG.warn("Could not find Directory from [{0}]", segmentReader);
            return _filter.getDocIdSet(context, acceptDocs);
        }
        _misses.incrementAndGet();
        String segmentName = segmentReader.getSegmentName();
        docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
        _cache.put(key, docIdSet);
        return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
    }
}

From source file:org.apache.blur.lucene.warmup.IndexTracer.java

License:Apache License

public void initTrace(SegmentReader segmentReader, String field, boolean hasPositions, boolean hasPayloads,
        boolean hasOffsets) {
    _segmentReader = segmentReader;/*from w  ww .j  av a 2s . c o m*/
    _hasPositions = hasPositions;
    _hasPayloads = hasPayloads;
    _hasOffsets = hasOffsets;
    _liveDocs = _segmentReader.getLiveDocs();
    _result = new IndexTracerResult(segmentReader.getSegmentName(), field);
}

From source file:org.apache.blur.lucene.warmup.IndexWarmup.java

License:Apache License

private Directory getDirectory(IndexReader reader, String segmentName, String context) {
    if (reader instanceof AtomicReader) {
        return getDirectory((AtomicReader) reader, segmentName, context);
    }//from  w w  w  .j av  a 2s  .  c o  m
    for (IndexReaderContext ctext : reader.getContext().leaves()) {
        if (_isClosed.get()) {
            LOG.info("Context [{0}] index closed", context);
            return null;
        }
        AtomicReaderContext atomicReaderContext = (AtomicReaderContext) ctext;
        AtomicReader atomicReader = atomicReaderContext.reader();
        if (atomicReader instanceof SegmentReader) {
            SegmentReader segmentReader = (SegmentReader) atomicReader;
            if (segmentReader.getSegmentName().equals(segmentName)) {
                return segmentReader.directory();
            }
        }
    }
    return null;
}

From source file:org.apache.blur.lucene.warmup.IndexWarmup.java

License:Apache License

private Directory getDirectory(AtomicReader atomicReader, String segmentName, String context) {
    if (atomicReader instanceof SegmentReader) {
        SegmentReader segmentReader = (SegmentReader) atomicReader;
        if (segmentReader.getSegmentName().equals(segmentName)) {
            return segmentReader.directory();
        }/*from w  w  w  .ja va  2  s.  c om*/
    }
    return null;
}

From source file:org.apache.blur.lucene.warmup.IndexWarmup.java

License:Apache License

public Map<String, List<IndexTracerResult>> sampleIndex(AtomicReader atomicReader, String context)
        throws IOException {
    Map<String, List<IndexTracerResult>> results = new HashMap<String, List<IndexTracerResult>>();
    if (atomicReader instanceof SegmentReader) {
        SegmentReader segmentReader = (SegmentReader) atomicReader;
        Directory directory = segmentReader.directory();
        if (!(directory instanceof TraceableDirectory)) {
            LOG.info("Context [{1}] cannot warmup directory [{0}] needs to be a TraceableDirectory.", directory,
                    context);/*from  ww  w. j a v a 2  s .  c  o m*/
            return results;
        }
        IndexTracer tracer = new IndexTracer((TraceableDirectory) directory, _maxSampleSize);
        String fileName = getSampleFileName(segmentReader.getSegmentName());
        List<IndexTracerResult> segmentTraces = new ArrayList<IndexTracerResult>();
        if (directory.fileExists(fileName)) {
            IndexInput input = directory.openInput(fileName, IOContext.READONCE);
            segmentTraces = read(input);
            input.close();
        } else {
            Fields fields = atomicReader.fields();
            for (String field : fields) {
                LOG.debug("Context [{1}] sampling field [{0}].", field, context);
                Terms terms = fields.terms(field);
                boolean hasOffsets = terms.hasOffsets();
                boolean hasPayloads = terms.hasPayloads();
                boolean hasPositions = terms.hasPositions();

                tracer.initTrace(segmentReader, field, hasPositions, hasPayloads, hasOffsets);
                IndexTracerResult result = tracer.runTrace(terms);
                segmentTraces.add(result);
            }
            if (_isClosed.get()) {
                LOG.info("Context [{0}] index closed", context);
                return null;
            }
            IndexOutput output = directory.createOutput(fileName, IOContext.DEFAULT);
            write(segmentTraces, output);
            output.close();
        }
        results.put(segmentReader.getSegmentName(), segmentTraces);
    }
    return results;
}

From source file:org.apache.solr.codecs.onsql.ONSQLStoredFieldsWriter.java

License:Apache License

@Override
public int merge(MergeState mergeState) throws IOException {
    log.debug("merge has been called");
    // check for our primary key completeness
    /*//from   w  w  w. j  a v a  2s  .  c  om
     * instead of copying stored fields we need to update mapping table and remove non-existing entries   
     * several cases: doc may be deleted and non existing in new segment
     * doc may be deleted, yet similar doc might be found
     * thing is, these fields might be updated, so using linkup is not so productive
     * because in fact there can be two diffirent versions of documents present after the update
     * one, old version, deleted now, and one new version 
     * hmmm, since we're using custom primary key, it means on the update entry with our fields will be overwritten, 
     * also, worth considering, that merge procedure only creates new segment based on previous ones, 
     * deletion of old segments is happening later, via Directory.deletefile API
     * have to check on this.
     * 
     * first, since it's merge, we assume entries in kvstore already existing
     * so, first we search for our segment key: segID-docID->customPK, then copy it
     * all other fields will be left unchanged
     * 
     * have to consider the failure case, when merge might be aborted in the middle of it
     * since merge first copies data to new segment, we are safe here, as in worst case we will lose just the links
     * for this new segment
     * */

    int docCount = 0;
    int idx = 0;
    String new_segment_kvstore_key_part = Base62Converter
            .fromBase10(mergeState.segmentInfo.name.concat(STORED_FIELDS_EXTENSION).hashCode());
    for (AtomicReader reader : mergeState.readers) {
        final SegmentReader seg_reader = mergeState.matchingSegmentReaders[idx++];
        ONSQLStoredFieldsReader fields_reader = null;
        if (seg_reader != null) {
            final StoredFieldsReader fieldsReader = seg_reader.getFieldsReader();
            // we can do the merge only if the matching reader is also a ONSQLStoredFieldsReader
            if (fieldsReader != null && fieldsReader instanceof ONSQLStoredFieldsReader) {
                fields_reader = (ONSQLStoredFieldsReader) fieldsReader;
            } else
                throw new IllegalStateException("incorrect fieldsreader class at merge procedure, is "
                        + fieldsReader.getClass().getName() + ", only ONSQLStoredFieldsReader is accepted");
        }
        String current_segment = seg_reader.getSegmentName().concat(STORED_FIELDS_EXTENSION);
        log.debug("current segment name = " + seg_reader.getSegmentName());
        // we assume reader always uses the instance of FSDirectory, so that we can extract directory path
        String dir = ((FSDirectory) seg_reader.directory()).getDirectory().getAbsolutePath();
        final int maxDoc = reader.maxDoc();
        final Bits liveDocs = reader.getLiveDocs();
        boolean canmerge = ONSQLKVstoreHandler.getInstance().getAllowWriting(this.tdir);
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
            ++docCount;
            if (canmerge) {
                // retrieve link using our doc id
                Key doc_key = Key.createKey(Arrays.asList(Base62Converter.fromBase10(dir.hashCode()),
                        Base62Converter.fromBase10(current_segment.hashCode()), Base62Converter.fromBase10(i)));
                Iterator<Key> kv_it = kvstore.multiGetKeysIterator(Direction.FORWARD, 1, doc_key, null,
                        Depth.PARENT_AND_DESCENDANTS);
                if (!kv_it.hasNext())
                    throw new IllegalStateException(
                            "unable to get doc segment key using key id=" + doc_key.toString());
                Key entry_key = kv_it.next();
                // create link to doc id for new segment 
                Key link_key = Key.createKey(Arrays.asList(Base62Converter.fromBase10(dir.hashCode()),
                        new_segment_kvstore_key_part, Base62Converter.fromBase10(numDocsWritten)),
                        entry_key.getMinorPath());
                log.debug("putting link key=" + link_key.toString());
                kvstore.put(link_key, Value.EMPTY_VALUE);
                // next add backref
                Key backref_key = Key.createKey(entry_key.getMinorPath(),
                        Arrays.asList("_1", Base62Converter.fromBase10(dir.hashCode()),
                                new_segment_kvstore_key_part, Base62Converter.fromBase10(numDocsWritten)));
                kvstore.put(backref_key, Value.EMPTY_VALUE);
                log.debug("putting backref key=" + backref_key.toString());
                //addDocument(doc, mergeState.fieldInfos);
            } else
                log.debug("merging is not allowed, skipping doc with internal id=" + i);
            ++numDocsWritten;
            mergeState.checkAbort.work(300);
        }

    }

    finish(mergeState.fieldInfos, docCount);
    return docCount;
}

From source file:org.apache.solr.request.uninverted.UnInvertedField.java

License:Apache License

public static UnInvertedField getUnInvertedField(final DocSet baseAdvanceDocs, final String field,
        final SegmentReader reader, String partion, final IndexSchema schema, final boolean isreadDouble)
        throws IOException {
    final ILruMemSizeKey key = new GrobalCache.StringKey("seg@" + String.valueOf(isreadDouble) + "@" + field
            + "@" + reader.getStringCacheKey() + "@" + reader.getSegmentName());
    ExecutorCompletionService<UnivertPool> submit = new ExecutorCompletionService<UnivertPool>(SUBMIT_POOL);
    final long t0 = System.currentTimeMillis();

    Callable<UnivertPool> task = new Callable<UnivertPool>() {
        public UnivertPool call() throws Exception {
            UnivertPool rtnuif = new UnivertPool();
            try {
                long t1 = System.currentTimeMillis();
                Cache<ILruMemSizeKey, ILruMemSizeCache> cache = GrobalCache.fieldValueCache;
                final Object lockthr = UnInvertedFieldUtils.getLock(key);
                synchronized (lockthr) {
                    rtnuif.uni = (UnInvertedField) cache.get(key);
                    BitDocSet clonebitset = cloneBitset(baseAdvanceDocs, reader);
                    long t2 = System.currentTimeMillis();

                    if (rtnuif.uni == null || rtnuif.uni.isShutDown()) {
                        rtnuif.uni = new UnInvertedField();
                        boolean issucecess = MakeUnivertedFieldBySigment.makeInit(rtnuif.uni, clonebitset,
                                field, reader, schema, isreadDouble);
                        if (!issucecess) {
                            MakeUnivertedFieldByIndex forjoin = new MakeUnivertedFieldByIndex(rtnuif.uni);
                            forjoin.makeInit(clonebitset, field, schema, reader);
                        }/*from   www .j  av a  2 s .  c om*/
                        cache.put(key, rtnuif.uni);
                    } else {
                        boolean issucecess = MakeUnivertedFieldBySigment.addDoclist(rtnuif.uni, clonebitset,
                                field, reader, schema, isreadDouble);
                        if (!issucecess) {
                            MakeUnivertedFieldByIndex forjoin = new MakeUnivertedFieldByIndex(rtnuif.uni);
                            forjoin.addDoclist(clonebitset, field, reader);
                        }
                    }
                    long t3 = System.currentTimeMillis();

                    log.info("####timetaken####:" + (t3 - t2) + "@" + (t2 - t1) + "@" + (t1 - t0) + ","
                            + String.valueOf(rtnuif.uni));
                }
            } catch (IOException e) {
                rtnuif.e = e;
            }
            return rtnuif;
        }
    };
    submit.submit(task);

    UnInvertedField uif = UnInvertedFieldUtils.takeUnf(submit);
    uif.refCnt.incrementAndGet();
    long t4 = System.currentTimeMillis();
    log.info("####timetaken all####:" + (t4 - t0) + "," + String.valueOf(uif));
    return uif;
}