Example usage for org.apache.lucene.util IOUtils closeWhileHandlingException

List of usage examples for org.apache.lucene.util IOUtils closeWhileHandlingException

Introduction

In this page you can find the example usage for org.apache.lucene.util IOUtils closeWhileHandlingException.

Prototype

public static void closeWhileHandlingException(Iterable<? extends Closeable> objects) 

Source Link

Document

Closes all given Closeables, suppressing all thrown non VirtualMachineError exceptions.

Usage

From source file:com.browseengine.bobo.geosearch.index.impl.GeoIndexer.java

License:Apache License

@Override
public void flush(Directory directory, String segmentName) throws IOException {
    Set<CartesianGeoRecord> treeToFlush;
    Set<String> fieldNamesToFlush;
    synchronized (treeLock) {
        fieldNamesToFlush = fieldNames;//from w w  w .j a v  a  2  s .  c o  m
        fieldNames = new HashSet<String>();

        treeToFlush = fieldTree;
        fieldTree = geoUtil.getBinaryTreeOrderedByBitMag();
    }

    GeoSegmentWriter<CartesianGeoRecord> geoRecordBTree = null;

    GeoSegmentInfo geoSegmentInfo = buildGeoSegmentInfo(fieldNamesToFlush, segmentName);

    boolean success = false;
    try {
        String fileName = config.getGeoFileName(segmentName);
        geoRecordBTree = new GeoSegmentWriter<CartesianGeoRecord>(treeToFlush, directory, fileName,
                geoSegmentInfo, geoRecordSerializer);

        success = true;
    } finally {
        // see https://issues.apache.org/jira/browse/LUCENE-3405
        if (success) {
            IOUtils.close(geoRecordBTree);
        } else {
            IOUtils.closeWhileHandlingException(geoRecordBTree);
        }
    }
}

From source file:com.browseengine.bobo.geosearch.merge.impl.BufferedGeoMerger.java

License:Apache License

@Override
//TODO:  Handle more frequent checkAborts
public void merge(IGeoMergeInfo geoMergeInfo, GeoSearchConfig config) throws IOException {
    IGeoConverter geoConverter = config.getGeoConverter();
    int bufferSizePerGeoReader = config.getBufferSizePerGeoSegmentReader();

    Directory directory = geoMergeInfo.getDirectory();
    List<SegmentReader> readers = geoMergeInfo.getReaders();
    List<SegmentInfo> segments = geoMergeInfo.getSegmentsToMerge();

    List<BTree<CartesianGeoRecord>> mergeInputBTrees = new ArrayList<BTree<CartesianGeoRecord>>(
            segments.size());/* www. ja  va2  s. c o  m*/
    List<BitVector> deletedDocsList = new ArrayList<BitVector>(segments.size());
    boolean success = false;
    try {
        assert (readers.size() == segments.size());

        IFieldNameFilterConverter fieldNameFilterConverter = config.getGeoConverter()
                .makeFieldNameFilterConverter();

        boolean hasFieldNameFilterConverter = false;
        for (SegmentReader reader : readers) {
            String geoFileName = config.getGeoFileName(reader.getSegmentName());

            BTree<CartesianGeoRecord> segmentBTree = getInputBTree(directory, geoFileName,
                    bufferSizePerGeoReader);
            mergeInputBTrees.add(segmentBTree);

            BitVector deletedDocs = buildDeletedDocsForSegment(reader);
            deletedDocsList.add(deletedDocs);

            //just take the first fieldNameFilterConverter for now.  Don't worry about merging them.
            if (!hasFieldNameFilterConverter) {
                hasFieldNameFilterConverter = loadFieldNameFilterConverter(directory, geoFileName,
                        fieldNameFilterConverter);
            }
        }

        if (!hasFieldNameFilterConverter) {
            // we are merging a bunch of segments, none of which have a corresponding .geo file
            // so there is nothing to do, it is okay if the outcome of this merge continues to 
            // not have a .geo file.
            LOGGER.warn("nothing to do during geo merge, no .geo files found for segments");
            success = true;
            return;
        }

        int newSegmentSize = calculateMergedSegmentSize(deletedDocsList, mergeInputBTrees, geoConverter);

        buildMergedSegment(mergeInputBTrees, deletedDocsList, newSegmentSize, geoMergeInfo, config,
                fieldNameFilterConverter);
        success = true;

    } finally {
        // see https://issues.apache.org/jira/browse/LUCENE-3405
        if (success) {
            IOUtils.close(mergeInputBTrees);
        } else {
            IOUtils.closeWhileHandlingException(mergeInputBTrees);
        }
    }
}

From source file:com.browseengine.bobo.geosearch.merge.impl.BufferedGeoMerger.java

License:Apache License

private void buildMergedSegment(List<BTree<CartesianGeoRecord>> mergeInputBTrees,
        List<BitVector> deletedDocsList, int newSegmentSize, IGeoMergeInfo geoMergeInfo, GeoSearchConfig config,
        IFieldNameFilterConverter fieldNameFilterConverter) throws IOException {
    Directory directory = geoMergeInfo.getDirectory();
    IGeoConverter geoConverter = config.getGeoConverter();

    String segmentName = geoMergeInfo.getNewSegment().name;
    String outputFileName = config.getGeoFileName(segmentName);

    GeoSegmentInfo geoSegmentInfo = buildGeoSegmentInfo(segmentName, fieldNameFilterConverter);

    Iterator<CartesianGeoRecord> inputIterator = new ChainedConvertedGeoRecordIterator(geoConverter,
            mergeInputBTrees, deletedDocsList, BUFFER_CAPACITY);

    BTree<CartesianGeoRecord> mergeOutputBTree = null;
    boolean success = false;
    try {/* w w w .  j a va2s .co m*/
        mergeOutputBTree = getOutputBTree(newSegmentSize, inputIterator, directory, outputFileName,
                geoSegmentInfo);

        success = true;
    } finally {
        // see https://issues.apache.org/jira/browse/LUCENE-3405
        if (success) {
            IOUtils.close(mergeOutputBTree);
        } else {
            IOUtils.closeWhileHandlingException(mergeOutputBTree);
        }
    }
}

From source file:com.github.cstoku.neologd.unidic.lucene.analysis.ja.dict.CharacterDefinition.java

License:Apache License

private CharacterDefinition() throws IOException {
    InputStream is = null;//from  w w  w . j  a  v a  2  s  .c om
    boolean success = false;
    try {
        is = BinaryDictionary.getClassResource(getClass(), FILENAME_SUFFIX);
        is = new BufferedInputStream(is);
        final DataInput in = new InputStreamDataInput(is);
        CodecUtil.checkHeader(in, HEADER, VERSION, VERSION);
        in.readBytes(characterCategoryMap, 0, characterCategoryMap.length);
        for (int i = 0; i < CLASS_COUNT; i++) {
            final byte b = in.readByte();
            invokeMap[i] = (b & 0x01) != 0;
            groupMap[i] = (b & 0x02) != 0;
        }
        success = true;
    } finally {
        if (success) {
            IOUtils.close(is);
        } else {
            IOUtils.closeWhileHandlingException(is);
        }
    }
}

From source file:com.github.cstoku.neologd.unidic.lucene.analysis.ja.dict.ConnectionCosts.java

License:Apache License

private ConnectionCosts() throws IOException {
    InputStream is = null;/*from  w ww  .  ja va2s  . com*/
    short[][] costs = null;
    boolean success = false;
    try {
        is = BinaryDictionary.getClassResource(getClass(), FILENAME_SUFFIX);
        is = new BufferedInputStream(is);
        final DataInput in = new InputStreamDataInput(is);
        CodecUtil.checkHeader(in, HEADER, VERSION, VERSION);
        int forwardSize = in.readVInt();
        int backwardSize = in.readVInt();
        costs = new short[backwardSize][forwardSize];
        int accum = 0;
        for (int j = 0; j < costs.length; j++) {
            final short[] a = costs[j];
            for (int i = 0; i < a.length; i++) {
                accum += in.readZInt();
                a[i] = (short) accum;
            }
        }
        success = true;
    } finally {
        if (success) {
            IOUtils.close(is);
        } else {
            IOUtils.closeWhileHandlingException(is);
        }
    }

    this.costs = costs;
}

From source file:com.github.cstoku.neologd.unidic.lucene.analysis.ja.dict.TokenInfoDictionary.java

License:Apache License

private TokenInfoDictionary() throws IOException {
    super();/*ww w .j ava2s. c o m*/
    InputStream is = null;
    FST<Long> fst = null;
    boolean success = false;
    try {
        is = getResource(FST_FILENAME_SUFFIX);
        is = new BufferedInputStream(is);
        fst = new FST<>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton());
        success = true;
    } finally {
        if (success) {
            IOUtils.close(is);
        } else {
            IOUtils.closeWhileHandlingException(is);
        }
    }
    // TODO: some way to configure?
    this.fst = new TokenInfoFST(fst, true);
}

From source file:com.globalsight.ling.lucene.Index.java

License:Apache License

public void addDocument(long p_mainId, long p_subId, String p_text) throws IOException {
    synchronized (m_state) {
        if (m_state != STATE_OPENED) {
            throw new IOException("index is not available");
        }/*from   www  . ja v a2 s . c  om*/
    }

    // clean cache if have
    LuceneCache.cleanLuceneCache(m_directory);

    try {
        m_lock.writeLock().acquire();

        IndexWriter tempWriter = null;
        try {
            tempWriter = getIndexWriter(false);
            Document doc = getDocument(p_mainId, p_subId, p_text);
            tempWriter.addDocument(doc);
        } finally {
            m_lock.writeLock().release();
            IOUtils.closeWhileHandlingException(tempWriter);
        }
    } catch (InterruptedException ex) {
        throw new IOException(ex.getMessage());
    }
}

From source file:com.globalsight.ling.tm2.lucene.LuceneIndexWriter.java

License:Apache License

/**
 * The constructor gets a lock on the index directory.  If the
 * directory doesn't exist yet, it is created. When an operation
 * is done, close() method must be called to remove the lock.
 * //from  w  w  w .j  a  v  a  2s.co  m
 * @param p_tmId TM id
 * @param p_locale locale of the index
 */
public LuceneIndexWriter(long p_tmId, GlobalSightLocale p_locale, boolean p_isFirst) throws Exception {
    m_tmId = p_tmId;
    m_analyzer = new GsPerFieldAnalyzer(p_locale);
    m_isFirst = p_isFirst;

    m_indexDir = LuceneUtil.getGoldTmIndexDirectory(p_tmId, p_locale, true);

    // get the directory. Note that the directory cannot be
    // created before getting a lock. Note2:
    // FSDirectory.getDirectory(dir, true) doesn't really create
    // index files. It just clear out the old index files and lock
    // file.
    m_directory = FSDirectory.open(m_indexDir);

    // get a lock on the directory
    m_lock = m_directory.makeLock(LOCK_NAME);
    if (!m_lock.obtain(180000L)) {
        m_lock = null;
        throw new IOException("Index locked for write: " + m_lock);
    }

    // only after gettting a lock, create the initial index files
    // if it doesn't exist.
    if (!DirectoryReader.indexExists(m_directory)) {
        IndexWriterConfig conf = new IndexWriterConfig(LuceneUtil.VERSION, m_analyzer);
        conf.setOpenMode(OpenMode.CREATE_OR_APPEND);
        boolean initSuccess = false;
        IndexWriter writer = null;
        try {
            writer = new IndexWriter(m_directory, conf);
            initSuccess = true;
        } catch (IndexFormatTooOldException ie) {
            // delete too old index
            File[] files = m_indexDir.listFiles();
            if (files != null && files.length > 0) {
                for (int i = 0; i < files.length; i++) {
                    File oneFile = files[i];
                    if (!LuceneIndexWriter.LOCK_NAME.equals(oneFile.getName())) {
                        oneFile.delete();
                    }
                }
            }

            writer = new IndexWriter(m_directory, conf);
            initSuccess = true;
        } finally {
            if (!initSuccess) {
                m_lock.release();
            }
            IOUtils.closeWhileHandlingException(writer);
        }
    }
}

From source file:com.lucure.core.codec.CompressingStoredFieldsWriter.java

License:Apache License

/** Sole constructor. */
public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, String segmentSuffix,
        IOContext context, String formatName, CompressionMode compressionMode, int chunkSize)
        throws IOException {
    assert directory != null;
    this.directory = directory;
    this.segment = si.name;
    this.segmentSuffix = segmentSuffix;
    this.compressionMode = compressionMode;
    this.compressor = compressionMode.newCompressor();
    this.chunkSize = chunkSize;
    this.docBase = 0;
    this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
    this.numStoredFields = new int[16];
    this.endOffsets = new int[16];
    this.numBufferedDocs = 0;

    boolean success = false;
    IndexOutput indexStream = directory.createOutput(
            IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION), context);
    try {//from   w  ww  . j  a va  2 s .co m
        fieldsStream = directory.createOutput(
                IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);

        final String codecNameIdx = formatName + CODEC_SFX_IDX;
        final String codecNameDat = formatName + CODEC_SFX_DAT;
        CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
        CodecUtil.writeHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
        assert CodecUtil.headerLength(codecNameDat) == fieldsStream.getFilePointer();
        assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();

        indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
        indexStream = null;

        fieldsStream.writeVInt(chunkSize);
        fieldsStream.writeVInt(PackedInts.VERSION_CURRENT);

        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(indexStream);
            abort();
        }
    }
}

From source file:com.lucure.core.codec.CompressingStoredFieldsWriter.java

License:Apache License

@Override
public void abort() {
    IOUtils.closeWhileHandlingException(this);
    IOUtils.deleteFilesIgnoringExceptions(directory,
            IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION),
            IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION));
}