Example usage for org.apache.lucene.store IndexInput readBytes

List of usage examples for org.apache.lucene.store IndexInput readBytes

Introduction

In this page you can find the example usage for org.apache.lucene.store IndexInput readBytes.

Prototype

public abstract void readBytes(byte[] b, int offset, int len) throws IOException;

Source Link

Document

Reads a specified number of bytes into an array at the specified offset.

Usage

From source file:axiom.objectmodel.dom.LuceneManager.java

License:Open Source License

public static void commitSegments(String segmentsNew, Connection conn, Application app, Directory dir) {
    byte[] segmentContents = null;
    if (segmentsNew == null) {
        segmentsNew = TransFSDirectory.SEGMENTS_NEW;//TODO:IndexFileNames.getSegmentsNewFileName();
    }// w ww . ja  v a 2 s.  co m
    IndexInput input = null;
    try {
        input = dir.openInput(segmentsNew);
        int length = (int) input.length();
        segmentContents = new byte[length];
        try {
            input.readBytes(segmentContents, 0, length);
        } catch (IOException ioe) {
            segmentContents = null;
        }
    } catch (Exception ex) {
        app.logError(ErrorReporter.errorMsg(LuceneManager.class, "commitSegments"), ex);
        throw new TransactionException("LuceneTransaction.executeSubTransaction(): " + ex.getMessage());
    } finally {
        if (input != null) {
            try {
                input.close();
            } catch (Exception ignore) {
            }
            input = null;
        }
    }

    if (segmentContents == null || segmentContents.length == 0) {
        throw new TransactionException("LuceneTransaction.executeSubTransaction(): "
                + "The segments.new file does not contain any data to save.");
    }

    PreparedStatement pstmt = null;
    ByteArrayInputStream bais = null;
    boolean exceptionOccured = false;

    try {
        String sql = "UPDATE Lucene SET valid = ?, version = ? " + "WHERE valid = ? AND db_home = ?";
        pstmt = conn.prepareStatement(sql);
        int count = 1;
        pstmt.setBoolean(count++, false);
        pstmt.setInt(count++, getLuceneVersion());
        pstmt.setBoolean(count++, true);
        pstmt.setString(count++, app.getDbDir().getName());
        pstmt.executeUpdate();
        pstmt.close();
        pstmt = null;

        sql = "INSERT INTO Lucene (valid, db_home, segments, version) " + "VALUES (?,?,?,?)";
        pstmt = conn.prepareStatement(sql);
        count = 1;
        pstmt.setBoolean(count++, true);
        pstmt.setString(count++, app.getDbDir().getName());
        bais = new ByteArrayInputStream(segmentContents);
        pstmt.setBinaryStream(count++, bais, segmentContents.length);
        pstmt.setInt(count++, getLuceneVersion());
        int rows = pstmt.executeUpdate();
        if (rows < 1) {
            throw new Exception(
                    "LuceneTransactionManager.executeTransaction(): update didn't affect any rows in the database");
        }
    } catch (Exception ex) {
        exceptionOccured = true;
        throw new TransactionException(ex.getMessage());
    } finally {
        try {
            dir.deleteFile(segmentsNew);
        } catch (IOException ioex) {
            // i guess its okay if a random segments.new file is lying around, itll 
            // get overwritten on the next lucene write operation anyway
            app.logEvent(ErrorReporter.warningMsg(LuceneManager.class, "commitSegments") + "Could not delete "
                    + segmentsNew);
        }

        if (bais != null) {
            try {
                bais.close();
            } catch (Exception ignoreit) {
            }
            bais = null;
        }
        segmentContents = null;

        if (pstmt != null) {
            try {
                pstmt.close();
            } catch (SQLException sqle) {
                if (!exceptionOccured) {
                    throw new TransactionException(sqle.getMessage());
                }
            }
            pstmt = null;
        }
    }
}

From source file:axiom.objectmodel.dom.LuceneManager.java

License:Open Source License

public static void commitSegments(String segmentsNew, Connection conn, File dbhome, Directory dir) {
    byte[] segmentContents = null;
    if (segmentsNew == null) {
        segmentsNew = TransFSDirectory.SEGMENTS_NEW;//TODO:IndexFileNames.getSegmentsNewFileName();
    }//from  ww  w  .j  ava  2  s .  co  m
    IndexInput input = null;
    try {
        input = dir.openInput(segmentsNew);
        int length = (int) input.length();
        segmentContents = new byte[length];
        try {
            input.readBytes(segmentContents, 0, length);
        } catch (IOException ioe) {
            segmentContents = null;
        }
    } catch (Exception ex) {
        throw new TransactionException("LuceneTransaction.executeSubTransaction(): " + ex.getMessage());
    } finally {
        if (input != null) {
            try {
                input.close();
            } catch (Exception ignore) {
            }
            input = null;
        }
    }

    if (segmentContents == null || segmentContents.length == 0) {
        throw new TransactionException("LuceneTransaction.executeSubTransaction(): "
                + "The segments.new file does not contain any data to save.");
    }

    PreparedStatement pstmt = null;
    ByteArrayInputStream bais = null;
    boolean exceptionOccured = false;

    try {
        String sql = "UPDATE Lucene SET valid = ?, version = ? " + "WHERE valid = ? AND db_home = ?";
        pstmt = conn.prepareStatement(sql);
        int count = 1;
        pstmt.setBoolean(count++, false);
        pstmt.setInt(count++, getLuceneVersion());
        pstmt.setBoolean(count++, true);
        pstmt.setString(count++, dbhome.getName());
        pstmt.executeUpdate();
        pstmt.close();
        pstmt = null;

        sql = "INSERT INTO Lucene (valid, db_home, segments, version) " + "VALUES (?,?,?,?)";
        pstmt = conn.prepareStatement(sql);
        count = 1;
        pstmt.setBoolean(count++, true);
        pstmt.setString(count++, dbhome.getName());
        bais = new ByteArrayInputStream(segmentContents);
        pstmt.setBinaryStream(count++, bais, segmentContents.length);
        pstmt.setInt(count++, getLuceneVersion());
        int rows = pstmt.executeUpdate();
        System.out.println("EXECUTE update was a SUCCESS!!");
        if (rows < 1) {
            throw new Exception(
                    "LuceneTransactionManager.executeTransaction(): update didn't affect any rows in the database");
        }
    } catch (Exception ex) {
        exceptionOccured = true;
        throw new TransactionException(ex.getMessage());
    } finally {
        try {
            dir.deleteFile(segmentsNew);
        } catch (IOException ioex) {
            // i guess its okay if a random segments.new file is lying around, itll 
            // get overwritten on the next lucene write operation anyway
        }

        if (bais != null) {
            try {
                bais.close();
            } catch (Exception ignoreit) {
            }
            bais = null;
        }
        segmentContents = null;

        if (pstmt != null) {
            try {
                pstmt.close();
            } catch (SQLException sqle) {
                if (!exceptionOccured) {
                    throw new TransactionException(sqle.getMessage());
                }
            }
            pstmt = null;
        }
    }
}

From source file:com.bah.lucene.BaseDirectoryTestSuite.java

License:Apache License

private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) throws IOException {
    int reads = random.nextInt(MAX_NUMBER_OF_READS);
    IndexInput fsInput = fsDir.openInput(name, IOContext.DEFAULT);
    IndexInput hdfsInput = hdfs.openInput(name, IOContext.DEFAULT);
    assertEquals(fsInput.length(), hdfsInput.length());
    int fileLength = (int) fsInput.length();
    for (int i = 0; i < reads; i++) {
        byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength))
                + MIN_BUFFER_SIZE];/*from  w w w  .j a  va  2  s . c  o  m*/
        byte[] hdfsBuf = new byte[fsBuf.length];
        int offset = random.nextInt(fsBuf.length);
        int length = random.nextInt(fsBuf.length - offset);
        int pos = random.nextInt(fileLength - length);
        fsInput.seek(pos);
        fsInput.readBytes(fsBuf, offset, length);
        hdfsInput.seek(pos);
        hdfsInput.readBytes(hdfsBuf, offset, length);
        for (int f = offset; f < length; f++) {
            if (fsBuf[f] != hdfsBuf[f]) {
                fail();
            }
        }
    }
    fsInput.close();
    hdfsInput.close();
}

From source file:com.bah.lucene.blockcache.BlockDirectoryTest.java

License:Apache License

private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) throws IOException {
    int reads = random.nextInt(MAX_NUMBER_OF_READS);
    IndexInput fsInput = fsDir.openInput(name, IOContext.DEFAULT);
    IndexInput hdfsInput = hdfs.openInput(name, IOContext.DEFAULT);
    assertEquals(fsInput.length(), hdfsInput.length());
    int fileLength = (int) fsInput.length();
    if (fileLength != 0) {
        for (int i = 0; i < reads; i++) {
            byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength))
                    + MIN_BUFFER_SIZE];/* w ww  .j av a2  s . c o  m*/
            byte[] hdfsBuf = new byte[fsBuf.length];
            int offset = random.nextInt(fsBuf.length);
            int length = random.nextInt(fsBuf.length - offset);
            int pos = random.nextInt(fileLength - length);
            fsInput.seek(pos);
            fsInput.readBytes(fsBuf, offset, length);
            hdfsInput.seek(pos);
            hdfsInput.readBytes(hdfsBuf, offset, length);
            for (int f = offset; f < length; f++) {
                if (fsBuf[f] != hdfsBuf[f]) {
                    fail(Long.toString(seed) + " read [" + i + "]");
                }
            }
        }
    }
    fsInput.close();
    hdfsInput.close();
}

From source file:com.bah.lucene.blockcache_v2.CacheIndexInputTest.java

License:Apache License

public static void readRandomData(IndexInput baseInput, IndexInput testInput, Random random, int sampleSize,
        int maxBufSize, int maxOffset) throws IOException {
    assertEquals(baseInput.length(), testInput.length());
    int fileLength = (int) baseInput.length();
    for (int i = 0; i < sampleSize; i++) {
        int position = random.nextInt(fileLength - maxBufSize);
        int bufSize = random.nextInt(maxBufSize - maxOffset) + 1;
        byte[] buf1 = new byte[bufSize];
        byte[] buf2 = new byte[bufSize];

        int offset = random.nextInt(Math.min(maxOffset, bufSize));
        int len = Math.min(random.nextInt(bufSize - offset), fileLength - position);

        baseInput.seek(position);/*from   w w  w .  j  av a  2s .c  o  m*/
        baseInput.readBytes(buf1, offset, len);
        testInput.seek(position);
        testInput.readBytes(buf2, offset, len);
        assertArrayEquals("Read [" + i + "] The position is [" + position + "] and bufSize [" + bufSize + "]",
                buf1, buf2);
    }
}

From source file:com.bah.lucene.blockcache_v2.CacheIndexOutputTest.java

License:Apache License

@Test
public void test1() throws IOException {
    Random random = new Random(seed);
    RAMDirectory directory = new RAMDirectory();
    IndexOutput output = directory.createOutput("test", IOContext.DEFAULT);

    Cache cache = CacheIndexInputTest.getCache();
    CacheIndexOutput indexOutput = new CacheIndexOutput(null, "test", output, cache);
    indexOutput.writeByte((byte) 1);
    indexOutput.writeByte((byte) 2);
    byte[] b = new byte[16000];
    random.nextBytes(b);// www . java2s. c  o m
    indexOutput.writeBytes(b, 16000);
    indexOutput.close();

    IndexInput input = directory.openInput("test", IOContext.DEFAULT);
    assertEquals(16002, input.length());
    assertEquals(1, input.readByte());
    assertEquals(2, input.readByte());

    byte[] buf = new byte[16000];
    input.readBytes(buf, 0, 16000);
    input.close();
    assertArrayEquals(b, buf);
    directory.close();
}

From source file:com.github.lucene.store.jdbc.index.AbstractIndexInputOutputITest.java

License:Apache License

private void verifyData() throws IOException {
    final byte[] test = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
    Assert.assertTrue(jdbcDirectory.fileExists("value1"));
    Assert.assertEquals(36, jdbcDirectory.fileLength("value1"));

    final IndexInput indexInput = jdbcDirectory.openInput("value1", new IOContext());
    Assert.assertEquals(-1, indexInput.readInt());
    Assert.assertEquals(10, indexInput.readLong());
    Assert.assertEquals(0, indexInput.readInt());
    Assert.assertEquals(0, indexInput.readInt());
    indexInput.readBytes(test, 0, 8);
    Assert.assertEquals((byte) 1, test[0]);
    Assert.assertEquals((byte) 8, test[7]);
    indexInput.readBytes(test, 0, 5);/*from w w  w .  ja  v a 2 s . co m*/
    Assert.assertEquals((byte) 1, test[0]);
    Assert.assertEquals((byte) 5, test[4]);

    indexInput.seek(28);
    Assert.assertEquals((byte) 1, indexInput.readByte());
    indexInput.seek(30);
    Assert.assertEquals((byte) 3, indexInput.readByte());

    indexInput.close();
}

From source file:com.liferay.portal.search.lucene.dump.IndexCommitSerializationUtil.java

License:Open Source License

private static void serializeSegment(IndexInput indexInput, long length, OutputStream outputStream)
        throws IOException {

    byte[] buffer = new byte[_BUFFER_SIZE];

    int count = (int) (length / _BUFFER_SIZE);
    int tail = (int) (length - count * _BUFFER_SIZE);

    try {/*from  w  w w  .ja v a  2 s. co m*/
        for (int i = 0; i < count; i++) {
            indexInput.readBytes(buffer, 0, _BUFFER_SIZE);
            outputStream.write(buffer);
        }

        indexInput.readBytes(buffer, 0, tail);
        outputStream.write(buffer, 0, tail);
    } finally {
        indexInput.close();
    }
}

From source file:com.lucure.core.codec.ForUtil.java

License:Apache License

/**
 * Read the next block of data (<code>For</code> format).
 *
 * @param in        the input to use to read data
 * @param encoded   a buffer that can be used to store encoded data
 * @param decoded   where to write decoded data
 * @throws IOException If there is a low-level I/O error
 *//*from ww  w  . j av  a  2  s .com*/
void readBlock(IndexInput in, byte[] encoded, int[] decoded) throws IOException {
    final int numBits = in.readByte();
    assert numBits <= 32 : numBits;

    if (numBits == ALL_VALUES_EQUAL) {
        final int value = in.readVInt();
        Arrays.fill(decoded, 0, BLOCK_SIZE, value);
        return;
    }

    final int encodedSize = encodedSizes[numBits];
    in.readBytes(encoded, 0, encodedSize);

    final PackedInts.Decoder decoder = decoders[numBits];
    final int iters = iterations[numBits];
    assert iters * decoder.byteValueCount() >= BLOCK_SIZE;

    decoder.decode(encoded, 0, decoded, 0, iters);
}

From source file:com.nearinfinity.blur.mapreduce.BlurReducer.java

License:Apache License

protected long copyBytes(IndexInput in, IndexOutput out, long numBytes, Context context, long totalBytesCopied,
        long totalBytesToCopy, long startTime, String src) throws IOException {
    if (_copyBuf == null) {
        _copyBuf = new byte[BufferedIndexInput.BUFFER_SIZE];
    }/*w ww.jav  a2  s.  c om*/
    long start = System.currentTimeMillis();
    long copied = 0;
    while (numBytes > 0) {
        if (start + REPORT_PERIOD < System.currentTimeMillis()) {
            report(context, totalBytesCopied + copied, totalBytesToCopy, startTime, src);
            start = System.currentTimeMillis();
        }
        final int toCopy = (int) (numBytes > _copyBuf.length ? _copyBuf.length : numBytes);
        in.readBytes(_copyBuf, 0, toCopy);
        out.writeBytes(_copyBuf, 0, toCopy);
        numBytes -= toCopy;
        copied += toCopy;
        context.progress();
    }
    return copied;
}