List of usage examples for org.apache.hadoop.io DataInputBuffer DataInputBuffer
public DataInputBuffer()
From source file:edu.uci.ics.hyracks.dataflow.hadoop.mapreduce.KVIterator.java
License:Apache License
public KVIterator(IHyracksTaskContext ctx, HadoopHelper helper, RecordDescriptor recordDescriptor) { this.helper = helper; accessor = new FrameTupleAccessor(ctx.getFrameSize(), recordDescriptor); kBuffer = new DataInputBuffer(); vBuffer = new DataInputBuffer(); }
From source file:FormatStorage.Unit.java
License:Open Source License
public void transfer(long newOffset) throws Exception { long adjust = newOffset - offset; boolean VAR = segment.formatData.isVar(); if (VAR) {/*from w w w. j a v a2 s. c om*/ if (!compressed) { int tnum = ((DataOutputBuffer) metasBuffer).getLength() / ConstVar.Sizeof_Long; if (tnum != recordNum) { throw new SEException.InnerException("tnum != recordNum"); } DataOutputBuffer tmpOuputBuffer = new DataOutputBuffer(); DataInputBuffer tmpinput = new DataInputBuffer(); tmpinput.reset(((DataOutputBuffer) metasBuffer).getData(), 0, ((DataOutputBuffer) metasBuffer).getLength()); for (int i = 0; i < recordNum; i++) { long value = tmpinput.readLong() + adjust; tmpOuputBuffer.writeLong(value); } tmpinput.reset(tmpOuputBuffer.getData(), 0, tmpOuputBuffer.getLength()); ((DataOutputBuffer) metasBuffer).reset(); for (int i = 0; i < recordNum; i++) { ((DataOutputBuffer) metasBuffer).writeLong(tmpinput.readLong()); } tmpOuputBuffer = null; tmpinput = null; } else { compressedMetasOutput.finish(); InputStream tmpMetasInputStream = new DataInputBuffer(); ((DataInputBuffer) tmpMetasInputStream).reset(((DataOutputBuffer) metasBuffer).getData(), 0, ((DataOutputBuffer) metasBuffer).getLength()); CompressionInputStream tmpCompressedMetasInput = codec.createInputStream(tmpMetasInputStream); DataOutputBuffer tmpOutputBuffer = new DataOutputBuffer(); for (int i = 0; i < recordNum; i++) { int count = 0; try { count = tmpCompressedMetasInput.read(metaOffsetBytes, 0, ConstVar.Sizeof_Long); long meta = Util.bytes2long(metaOffsetBytes, 0, ConstVar.Sizeof_Long) + adjust; tmpOutputBuffer.writeLong(meta); } catch (Exception e) { e.printStackTrace(); System.out.println("i:" + i + ",count:" + count); throw e; } } ((DataOutputBuffer) metasBuffer).reset(); compressedMetasOutput.resetState(); DataInputBuffer tmpInputBuffer = new DataInputBuffer(); tmpInputBuffer.reset(tmpOutputBuffer.getData(), 0, tmpOutputBuffer.getLength()); for (int i = 0; i < recordNum; i++) { long newMeta = tmpInputBuffer.readLong(); Util.long2bytes(metaOffsetBytes, newMeta); compressedMetasOutput.write(metaOffsetBytes, 0, ConstVar.Sizeof_Long); } } } metaOffset += adjust; setOffset(newOffset); }
From source file:hamr.core.general.group.GeneralGroupComparator.java
License:Open Source License
protected GeneralGroupComparator() { buffer = new DataInputBuffer(); // super(conf.getMapOutputKeyClass().asSubclass(WritableComparable.class) // , true); }
From source file:io.github.dlmarion.clowncar.hdfs.TestBloscCompressorDecompressor.java
License:Apache License
@Test public void testCompressorDecompressorLogicWithCompressionStreams() { DataOutputStream deflateOut = null; DataInputStream inflateIn = null; int BYTE_SIZE = 1024 * 100; byte[] bytes = generate(BYTE_SIZE); int bufferSize = 262144; int compressionOverhead = (bufferSize / 6) + 32; try {/*from w ww . j a v a 2s. co m*/ Configuration conf = new Configuration(false); conf.set(BloscCompressor.COMPRESSOR_NAME_KEY, compressor); conf.set(BloscCompressor.COMPRESSION_LEVEL_KEY, Integer.toString(level)); conf.set(BloscCompressor.BYTES_FOR_TYPE_KEY, Integer.toString(Integer.BYTES)); conf.set(BloscCompressor.SHUFFLE_TYPE_KEY, Integer.toString(shuffle)); conf.set(BloscCompressor.NUM_THREADS_KEY, Integer.toString(threads)); DataOutputBuffer compressedDataBuffer = new DataOutputBuffer(); CompressionOutputStream deflateFilter = new BlockCompressorStream(compressedDataBuffer, new BloscCompressor(bufferSize, conf), bufferSize, compressionOverhead); deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(bytes, 0, bytes.length); deflateOut.flush(); deflateFilter.finish(); DataInputBuffer deCompressedDataBuffer = new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength()); CompressionInputStream inflateFilter = new BlockDecompressorStream(deCompressedDataBuffer, new BloscDecompressor(bufferSize), bufferSize); inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter)); byte[] result = new byte[BYTE_SIZE]; inflateIn.read(result); assertArrayEquals("original array not equals compress/decompressed array", result, bytes); } catch (IOException e) { e.printStackTrace(); fail("testBloscCompressorDecopressorLogicWithCompressionStreams ex error !!!"); } finally { try { if (deflateOut != null) deflateOut.close(); if (inflateIn != null) inflateIn.close(); } catch (Exception e) { } } }
From source file:io.hops.metadata.adaptor.INodeDALAdaptor.java
License:Apache License
@Override public org.apache.hadoop.hdfs.server.namenode.INode convertDALtoHDFS(INode hopINode) throws StorageException { org.apache.hadoop.hdfs.server.namenode.INode inode = null; if (hopINode != null) { DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(hopINode.getPermission(), hopINode.getPermission().length); PermissionStatus ps = null;//from w w w.ja v a 2 s . c om try { ps = PermissionStatus.read(buffer); } catch (IOException e) { throw new StorageException(e); } if (hopINode.isDir()) { if (hopINode.isDirWithQuota()) { inode = new INodeDirectoryWithQuota(hopINode.getName(), ps); } else { String iname = (hopINode.getName().length() == 0) ? INodeDirectory.ROOT_NAME : hopINode.getName(); inode = new INodeDirectory(iname, ps); } inode.setAccessTimeNoPersistance(hopINode.getAccessTime()); inode.setModificationTimeNoPersistance(hopINode.getModificationTime()); } else if (hopINode.getSymlink() != null) { inode = new INodeSymlink(hopINode.getSymlink(), hopINode.getModificationTime(), hopINode.getAccessTime(), ps); } else { if (hopINode.isUnderConstruction()) { DatanodeID dnID = (hopINode.getClientNode() == null || hopINode.getClientNode().isEmpty()) ? null : new DatanodeID(hopINode.getClientNode()); inode = new INodeFileUnderConstruction(ps, INodeFile.getBlockReplication(hopINode.getHeader()), INodeFile.getPreferredBlockSize(hopINode.getHeader()), hopINode.getModificationTime(), hopINode.getClientName(), hopINode.getClientMachine(), dnID); inode.setAccessTimeNoPersistance(hopINode.getAccessTime()); } else { inode = new INodeFile(ps, hopINode.getHeader(), hopINode.getModificationTime(), hopINode.getAccessTime()); } ((INodeFile) inode).setGenerationStampNoPersistence(hopINode.getGenerationStamp()); } inode.setIdNoPersistance(hopINode.getId()); inode.setLocalNameNoPersistance(hopINode.getName()); inode.setParentIdNoPersistance(hopINode.getParentId()); inode.setSubtreeLocked(hopINode.isSubtreeLocked()); inode.setSubtreeLockOwner(hopINode.getSubtreeLockOwner()); } return inode; }
From source file:net.sf.katta.zk.ZKClient.java
License:Apache License
private Writable readWritable(final Writable writable, byte[] data) throws KattaException { final DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(data, data.length);//from w w w. j av a 2 s . c om try { writable.readFields(buffer); } catch (final IOException e) { throw new KattaException("unable to read data into Writable", e); } finally { try { buffer.close(); } catch (IOException e) { LOG.warn("could not close data buffer", e); } } return writable; }
From source file:org.apache.accumulo.core.replication.ReplicationTarget.java
License:Apache License
/** * Deserialize a ReplicationTarget//from w w w . j ava 2 s. com * * @param t * Serialized copy * @return the deserialized version */ public static ReplicationTarget from(Text t) { ReplicationTarget target = new ReplicationTarget(); DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(t.getBytes(), t.getLength()); try { target.readFields(buffer); } catch (IOException e) { throw new RuntimeException(e); } return target; }
From source file:org.apache.accumulo.core.replication.ReplicationTarget.java
License:Apache License
/** * Deserialize a ReplicationTarget/*from w w w .j av a2s .c o m*/ * * @param s * Serialized copy * @return the deserialized version */ public static ReplicationTarget from(String s) { ReplicationTarget target = new ReplicationTarget(); DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(s.getBytes(UTF_8), s.length()); try { target.readFields(buffer); } catch (IOException e) { throw new RuntimeException(e); } return target; }
From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java
License:Apache License
@Test public void writableOut() throws Exception { ReplicationTarget expected = new ReplicationTarget("foo", "bar", "1"); DataOutputBuffer buffer = new DataOutputBuffer(); expected.write(buffer);/*from w w w. j a va 2s . co m*/ DataInputBuffer input = new DataInputBuffer(); input.reset(buffer.getData(), buffer.getLength()); ReplicationTarget actual = new ReplicationTarget(); actual.readFields(input); }
From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java
License:Apache License
@Test public void writableOutWithNulls() throws Exception { ReplicationTarget expected = new ReplicationTarget(null, null, null); DataOutputBuffer buffer = new DataOutputBuffer(); expected.write(buffer);//from w ww . j a v a 2s .c o m DataInputBuffer input = new DataInputBuffer(); input.reset(buffer.getData(), buffer.getLength()); ReplicationTarget actual = new ReplicationTarget(); actual.readFields(input); }