List of usage examples for org.apache.lucene.store IndexOutput writeBytes
public abstract void writeBytes(byte[] b, int offset, int length) throws IOException;
From source file:cc.solr.lucene.store.CustomBufferedIndexInput.java
License:Apache License
/** * Flushes the in-memory bufer to the given output, copying at most * <code>numBytes</code>.// w w w . j av a2s . c om * <p> * <b>NOTE:</b> this method does not refill the buffer, however it does * advance the buffer position. * * @return the number of bytes actually flushed from the in-memory buffer. */ protected int flushBuffer(IndexOutput out, long numBytes) throws IOException { int toCopy = bufferLength - bufferPosition; if (toCopy > numBytes) { toCopy = (int) numBytes; } if (toCopy > 0) { out.writeBytes(buffer, bufferPosition, toCopy); bufferPosition += toCopy; } return toCopy; }
From source file:com.bah.lucene.BaseDirectoryTestSuite.java
License:Apache License
private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException { int writes = random.nextInt(MAX_NUMBER_OF_WRITES); int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE; IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT); fsOutput.setLength(fileLength);/*from www. j av a 2 s . c o m*/ IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT); hdfsOutput.setLength(fileLength); for (int i = 0; i < writes; i++) { byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE]; random.nextBytes(buf); int offset = random.nextInt(buf.length); int length = random.nextInt(buf.length - offset); fsOutput.writeBytes(buf, offset, length); hdfsOutput.writeBytes(buf, offset, length); } fsOutput.close(); hdfsOutput.close(); }
From source file:com.bah.lucene.BaseDirectoryTestSuite.java
License:Apache License
private Directory getControlDir(final Directory control, final Directory test) { return new Directory() { @Override// w ww .ja va 2 s . c o m public Lock makeLock(String name) { return control.makeLock(name); } @Override public void clearLock(String name) throws IOException { control.clearLock(name); } @Override public void setLockFactory(LockFactory lockFactory) throws IOException { control.setLockFactory(lockFactory); } @Override public LockFactory getLockFactory() { return control.getLockFactory(); } @Override public String getLockID() { return control.getLockID(); } @Override public void copy(Directory to, String src, String dest, IOContext context) throws IOException { control.copy(to, src, dest, context); } @Override public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException { return control.createSlicer(name, context); } @Override public IndexOutput createOutput(final String name, IOContext context) throws IOException { final IndexOutput testOutput = test.createOutput(name, context); final IndexOutput controlOutput = control.createOutput(name, context); return new IndexOutput() { @Override public void flush() throws IOException { testOutput.flush(); controlOutput.flush(); } @Override public void close() throws IOException { testOutput.close(); controlOutput.close(); } @Override public long getFilePointer() { long filePointer = testOutput.getFilePointer(); long controlFilePointer = controlOutput.getFilePointer(); if (controlFilePointer != filePointer) { System.err.println("Output Name [" + name + "] with filePointer [" + filePointer + "] and control filePointer [" + controlFilePointer + "] does not match"); } return filePointer; } @SuppressWarnings("deprecation") @Override public void seek(long pos) throws IOException { testOutput.seek(pos); controlOutput.seek(pos); } @Override public long length() throws IOException { long length = testOutput.length(); long controlLength = controlOutput.length(); if (controlLength != length) { System.err.println("Ouput Name [" + name + "] with length [" + length + "] and control length [" + controlLength + "] does not match"); } return length; } @Override public void writeByte(byte b) throws IOException { testOutput.writeByte(b); controlOutput.writeByte(b); } @Override public void writeBytes(byte[] b, int offset, int length) throws IOException { testOutput.writeBytes(b, offset, length); controlOutput.writeBytes(b, offset, length); } }; } @Override public IndexInput openInput(final String name, IOContext context) throws IOException { final IndexInput testInput = test.openInput(name, context); final IndexInput controlInput = control.openInput(name, context); return new IndexInputCompare(name, testInput, controlInput); } @Override public String[] listAll() throws IOException { return test.listAll(); } @Override public boolean fileExists(String name) throws IOException { return test.fileExists(name); } @Override public void deleteFile(String name) throws IOException { test.deleteFile(name); control.deleteFile(name); } @Override public long fileLength(String name) throws IOException { long fileLength = test.fileLength(name); long controlFileLength = control.fileLength(name); if (controlFileLength != fileLength) { System.err.println("Input Name [" + name + "] with length [" + fileLength + "] and control length [" + controlFileLength + "] does not match"); } return fileLength; } @Override public void sync(Collection<String> names) throws IOException { test.sync(names); test.sync(names); } @Override public void close() throws IOException { test.close(); control.close(); } }; }
From source file:com.bah.lucene.blockcache.BlockDirectoryTest.java
License:Apache License
private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException { int writes = random.nextInt(MAX_NUMBER_OF_WRITES); int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE; IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT); IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT); for (int i = 0; i < writes; i++) { byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];/*w ww .j a v a 2 s .c om*/ random.nextBytes(buf); int offset = random.nextInt(buf.length); int length = random.nextInt(buf.length - offset); fsOutput.writeBytes(buf, offset, length); hdfsOutput.writeBytes(buf, offset, length); } fsOutput.close(); hdfsOutput.close(); }
From source file:com.bah.lucene.buffer.ReusedBufferedIndexInput.java
License:Apache License
/** * Flushes the in-memory bufer to the given output, copying at most * <code>numBytes</code>./*from w ww .j a v a2 s . c o m*/ * <p> * <b>NOTE:</b> this method does not refill the buffer, however it does * advance the buffer position. * * @return the number of bytes actually flushed from the in-memory buffer. */ protected final int flushBuffer(IndexOutput out, long numBytes) throws IOException { int toCopy = bufferLength - bufferPosition; if (toCopy > numBytes) { toCopy = (int) numBytes; } if (toCopy > 0) { out.writeBytes(buffer, bufferPosition, toCopy); bufferPosition += toCopy; } return toCopy; }
From source file:com.nearinfinity.blur.mapreduce.BlurReducer.java
License:Apache License
protected long copyBytes(IndexInput in, IndexOutput out, long numBytes, Context context, long totalBytesCopied, long totalBytesToCopy, long startTime, String src) throws IOException { if (_copyBuf == null) { _copyBuf = new byte[BufferedIndexInput.BUFFER_SIZE]; }/*w ww . ja v a 2 s . c o m*/ long start = System.currentTimeMillis(); long copied = 0; while (numBytes > 0) { if (start + REPORT_PERIOD < System.currentTimeMillis()) { report(context, totalBytesCopied + copied, totalBytesToCopy, startTime, src); start = System.currentTimeMillis(); } final int toCopy = (int) (numBytes > _copyBuf.length ? _copyBuf.length : numBytes); in.readBytes(_copyBuf, 0, toCopy); out.writeBytes(_copyBuf, 0, toCopy); numBytes -= toCopy; copied += toCopy; context.progress(); } return copied; }
From source file:com.sensei.indexing.hadoop.reduce.RAMDirectoryUtil.java
License:Apache License
/** * Read a number of files from a data input to a ram directory. * @param in the data input/*from w w w.j a v a 2s. com*/ * @param dir the ram directory * @throws IOException */ public static void readRAMFiles(DataInput in, RAMDirectory dir) throws IOException { int numFiles = in.readInt(); for (int i = 0; i < numFiles; i++) { String name = Text.readString(in); long length = in.readLong(); if (length > 0) { // can we avoid the extra copy? IndexOutput output = null; try { output = dir.createOutput(name); int position = 0; byte[] buffer = new byte[BUFFER_SIZE]; while (position < length) { int len = position + BUFFER_SIZE <= length ? BUFFER_SIZE : (int) (length - position); in.readFully(buffer, 0, len); output.writeBytes(buffer, 0, len); position += len; } } finally { if (output != null) { output.close(); } } } } }
From source file:com.xiaomi.linden.hadoop.indexing.reduce.RAMDirectoryUtil.java
License:Apache License
/** * Read a number of files from a data input to a ram directory. * @param in the data input/* ww w . ja va2 s . com*/ * @param dir the ram directory * @throws IOException */ public static void readRAMFiles(DataInput in, RAMDirectory dir) throws IOException { int numFiles = in.readInt(); for (int i = 0; i < numFiles; i++) { String name = Text.readString(in); long length = in.readLong(); if (length > 0) { // can we avoid the extra copy? IndexOutput output = null; try { IOContext context = new IOContext(); output = dir.createOutput(name, context); int position = 0; byte[] buffer = new byte[BUFFER_SIZE]; while (position < length) { int len = position + BUFFER_SIZE <= length ? BUFFER_SIZE : (int) (length - position); in.readFully(buffer, 0, len); output.writeBytes(buffer, 0, len); position += len; } } finally { if (output != null) { output.close(); } } } } }
From source file:org.apache.solr.store.hdfs.HdfsDirectoryTest.java
License:Apache License
private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException { int writes = random.nextInt(MAX_NUMBER_OF_WRITES); int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE; IndexOutput fsOutput = fsDir.createOutput(name, new IOContext()); fsOutput.setLength(fileLength);/*from w ww . j a v a 2 s . c o m*/ IndexOutput hdfsOutput = hdfs.createOutput(name, new IOContext()); hdfsOutput.setLength(fileLength); for (int i = 0; i < writes; i++) { byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE]; random.nextBytes(buf); int offset = random.nextInt(buf.length); int length = random.nextInt(buf.length - offset); fsOutput.writeBytes(buf, offset, length); hdfsOutput.writeBytes(buf, offset, length); } fsOutput.close(); hdfsOutput.close(); }
From source file:org.elasticsearch.common.compress.snappy.xerial.XerialSnappyCompressedIndexOutput.java
License:Apache License
@Override protected void compress(byte[] data, int offset, int len, IndexOutput out) throws IOException { int compressedLength = Snappy.rawCompress(data, offset, len, compressedBuffer, 0); // use uncompressed input if less than 12.5% compression if (compressedLength >= (len - (len / 8))) { out.writeByte((byte) 0); out.writeVInt(len);/*www . j a va 2 s.co m*/ out.writeBytes(data, offset, len); } else { out.writeByte((byte) 1); out.writeVInt(compressedLength); out.writeBytes(compressedBuffer, 0, compressedLength); } }