Example usage for org.apache.hadoop.io.compress BlockCompressorStream BlockCompressorStream

List of usage examples for org.apache.hadoop.io.compress BlockCompressorStream BlockCompressorStream

Introduction

In this page you can find the example usage for org.apache.hadoop.io.compress BlockCompressorStream BlockCompressorStream.

Prototype

public BlockCompressorStream(OutputStream out, Compressor compressor, int bufferSize, int compressionOverhead) 

Source Link

Document

Create a BlockCompressorStream .

Usage

From source file:com.hadoop.compression.fourmc.Lz4Codec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }//from  w w w .ja v a 2  s . c  om

    int bufferPlusOverhead = Lz4Compressor.compressBound(LZ4_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, LZ4_BUFFER_SIZE, bufferPlusOverhead - LZ4_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.Lz4HighCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }//from  w ww .jav  a 2s  .c  o m

    int bufferPlusOverhead = Lz4HighCompressor.compressBound(LZ4_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, LZ4_BUFFER_SIZE, bufferPlusOverhead - LZ4_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.Lz4MediumCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }/*from  w  ww  .j  ava2s  .  co m*/

    int bufferPlusOverhead = Lz4MediumCompressor.compressBound(LZ4_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, LZ4_BUFFER_SIZE, bufferPlusOverhead - LZ4_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.Lz4UltraCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }//from ww  w. j a  va 2s  . c o  m

    int bufferPlusOverhead = Lz4UltraCompressor.compressBound(LZ4_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, LZ4_BUFFER_SIZE, bufferPlusOverhead - LZ4_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.ZstdCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }/*  w  ww . ja  v  a 2 s.com*/

    int bufferPlusOverhead = ZstdCompressor.compressBound(ZSTD_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, ZSTD_BUFFER_SIZE, bufferPlusOverhead - ZSTD_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.ZstdHighCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }/*from  w  ww  .  j ava 2s.c  om*/

    int bufferPlusOverhead = ZstdHighCompressor.compressBound(ZSTD_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, ZSTD_BUFFER_SIZE, bufferPlusOverhead - ZSTD_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.ZstdMediumCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }//  ww  w  . j  a  va 2s.co  m

    int bufferPlusOverhead = ZstdMediumCompressor.compressBound(ZSTD_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, ZSTD_BUFFER_SIZE, bufferPlusOverhead - ZSTD_BUFFER_SIZE);
}

From source file:com.hadoop.compression.fourmc.ZstdUltraCodec.java

License:BSD License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {

    if (!isNativeLoaded(conf)) {
        throw new RuntimeException("native hadoop-4mc library not available");
    }//from  w  w  w.j a  v a2  s . c  o m

    int bufferPlusOverhead = ZstdUltraCompressor.compressBound(LZ4_BUFFER_SIZE);
    return new BlockCompressorStream(out, compressor, LZ4_BUFFER_SIZE, bufferPlusOverhead - LZ4_BUFFER_SIZE);
}

From source file:com.hadoop.compression.lzo.LzoCodec.java

License:Open Source License

public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException {
    // Ensure native-lzo library is loaded & initialized
    if (!isNativeLzoLoaded(conf)) {
        throw new RuntimeException("native-lzo library not available");
    }/*from www  .j  a  va 2 s  .c om*/

    /**
     * <b>http://www.oberhumer.com/opensource/lzo/lzofaq.php</b>
     *
     * How much can my data expand during compression ?
     * ================================================
     * LZO will expand incompressible data by a little amount.
     * I still haven't computed the exact values, but I suggest using
     * these formulas for a worst-case expansion calculation:
     * 
     * Algorithm LZO1, LZO1A, LZO1B, LZO1C, LZO1F, LZO1X, LZO1Y, LZO1Z:
     * ----------------------------------------------------------------
     * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3
     * 
     * This is about 106% for a large block size.
     * 
     * Algorithm LZO2A:
     * ----------------
     * output_block_size = input_block_size + (input_block_size / 8) + 128 + 3
     */

    // Create the lzo output-stream
    LzoCompressor.CompressionStrategy strategy = LzoCompressor.CompressionStrategy.valueOf(
            conf.get("io.compression.codec.lzo.compressor", LzoCompressor.CompressionStrategy.LZO1X_1.name()));
    int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 64 * 1024);
    int compressionOverhead = strategy.name().contains("LZO1") ? (bufferSize >> 4) + 64 + 3
            : (bufferSize >> 3) + 128 + 3;

    return new BlockCompressorStream(out, compressor, bufferSize, compressionOverhead);
}

From source file:io.github.dlmarion.clowncar.hdfs.TestBloscCompressorDecompressor.java

License:Apache License

@Test
public void testCompressorDecompressorEmptyStreamLogic() {
    ByteArrayInputStream bytesIn = null;
    ByteArrayOutputStream bytesOut = null;
    byte[] buf = null;
    BlockDecompressorStream blockDecompressorStream = null;
    try {/*from   w ww . java2  s  .  c  o  m*/
        Configuration conf = new Configuration(false);
        conf.set(BloscCompressor.COMPRESSOR_NAME_KEY, compressor);
        conf.set(BloscCompressor.COMPRESSION_LEVEL_KEY, Integer.toString(level));
        conf.set(BloscCompressor.BYTES_FOR_TYPE_KEY, Integer.toString(Integer.BYTES));
        conf.set(BloscCompressor.SHUFFLE_TYPE_KEY, Integer.toString(shuffle));
        conf.set(BloscCompressor.NUM_THREADS_KEY, Integer.toString(threads));
        // compress empty stream
        bytesOut = new ByteArrayOutputStream();
        BlockCompressorStream blockCompressorStream = new BlockCompressorStream(bytesOut,
                new BloscCompressor(conf), 1024, 0);
        // close without write
        blockCompressorStream.close();
        // check compressed output
        buf = bytesOut.toByteArray();
        assertEquals("empty stream compressed output size != 4", 4, buf.length);
        // use compressed output as input for decompression
        bytesIn = new ByteArrayInputStream(buf);
        // create decompression stream
        blockDecompressorStream = new BlockDecompressorStream(bytesIn, new BloscDecompressor(), 1024);
        // no byte is available because stream was closed
        assertEquals("return value is not -1", -1, blockDecompressorStream.read());
    } catch (Exception e) {
        e.printStackTrace();
        fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage());
    } finally {
        if (blockDecompressorStream != null)
            try {
                bytesIn.close();
                bytesOut.close();
                blockDecompressorStream.close();
            } catch (IOException e) {
            }
    }
}