Example usage for java.util.zip GZIPOutputStream GZIPOutputStream

List of usage examples for java.util.zip GZIPOutputStream GZIPOutputStream

Introduction

In this page you can find the example usage for java.util.zip GZIPOutputStream GZIPOutputStream.

Prototype

public GZIPOutputStream(OutputStream out) throws IOException 

Source Link

Document

Creates a new output stream with a default buffer size.

Usage

From source file:com.amazonaws.services.logs.connectors.kinesis.KinesisTransformer.java

private static byte[] compress(byte[] data) throws IOException {
    byte[] buffer = new byte[1024];

    try (ByteArrayInputStream in = new ByteArrayInputStream(data)) {
        try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
            try (GZIPOutputStream gzos = new GZIPOutputStream(out)) {
                int len;
                while ((len = in.read(buffer)) > 0) {
                    gzos.write(buffer, 0, len);
                }//ww  w  .j a v  a 2  s .  co  m
                gzos.flush();
            }
            return out.toByteArray();
        }
    }
}

From source file:NettyServerHandler.java

public static String compress(String str) throws IOException {
    if (str == null || str.length() == 0) {
        return str;
    }// www.  ja  v  a  2s .  c o m
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    GZIPOutputStream gzip = new GZIPOutputStream(out);
    gzip.write(str.getBytes());
    gzip.close();
    String outStr = out.toString("ISO-8859-1");
    return outStr;
}

From source file:com.epam.wilma.test.client.HttpRequestSender.java

private InputStream encode(final InputStream inputStream) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    GZIPOutputStream gout = new GZIPOutputStream(baos);
    //... Code to read from your original uncompressed data and write to gout.
    IOUtils.copy(inputStream, gout);/*from www .  j  av  a2 s . c o  m*/
    gout.finish();
    //Convert to InputStream.
    return new ByteArrayInputStream(baos.toByteArray());
}

From source file:com.mulesoft.jockey.maven.GenerateMojo.java

private void createTarGz(File distDir) throws MojoExecutionException {
    File output = new File(buildDirectory, distributionName + ".tar.gz");
    try {/*  w  ww. ja  v a  2s .  c o  m*/
        final OutputStream out = new FileOutputStream(output);
        TarArchiveOutputStream os = new TarArchiveOutputStream(new GZIPOutputStream(out));
        os.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU);
        copyArchiveFile(distDir, os, false);

        os.finish();

        os.close();
        out.close();
    } catch (IOException e) {
        throw new MojoExecutionException("Could not create zip file.", e);
    }
    projectHelper.attachArtifact(project, "tar.gz", "", output);
}

From source file:com.panet.imeta.trans.steps.sort.SortRows.java

private boolean addBuffer(RowMetaInterface rowMeta, Object[] r) throws KettleException {
    if (r != null) {
        // Do we need to convert binary string keys?
        ///*from  ww  w . j  av  a2s  . c  o  m*/
        for (int i = 0; i < data.fieldnrs.length; i++) {
            if (data.convertKeysToNative[i]) {
                int index = data.fieldnrs[i];
                r[index] = rowMeta.getValueMeta(index).convertBinaryStringToNativeType((byte[]) r[index]);
            }
        }

        // Save row
        // 
        data.buffer.add(r);
    }
    if (data.files.size() == 0 && r == null) // No more records: sort buffer
    {
        quickSort(data.buffer);
    }

    // Check the free memory every 1000 rows...
    //
    data.freeCounter++;
    if (data.sortSize <= 0 && data.freeCounter >= 1000) {
        data.freeMemoryPct = Const.getPercentageFreeMemory();
        data.freeCounter = 0;

        if (log.isDetailed()) {
            data.memoryReporting++;
            if (data.memoryReporting >= 10) {
                logDetailed("Available memory : " + data.freeMemoryPct + "%");
                data.memoryReporting = 0;
            }
        }
    }

    boolean doSort = data.buffer.size() == data.sortSize; // Buffer is full: sort & dump to disk
    doSort |= data.files.size() > 0 && r == null && data.buffer.size() > 0; // No more records: join from disk 
    doSort |= data.freeMemoryPctLimit > 0 && data.freeMemoryPct < data.freeMemoryPctLimit
            && data.buffer.size() >= data.minSortSize;

    // time to sort the buffer and write the data to disk...
    //
    if (doSort) {
        // First sort the rows in buffer[]
        quickSort(data.buffer);

        // Then write them to disk...
        DataOutputStream dos;
        GZIPOutputStream gzos;
        int p;
        Object[] previousRow = null;

        try {
            FileObject fileObject = KettleVFS.createTempFile(meta.getPrefix(), ".tmp",
                    environmentSubstitute(meta.getDirectory()));

            data.files.add(fileObject); // Remember the files!
            OutputStream outputStream = KettleVFS.getOutputStream(fileObject, false);
            if (data.compressFiles) {
                gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
                dos = new DataOutputStream(gzos);
            } else {
                dos = new DataOutputStream(new BufferedOutputStream(outputStream, 500000));
                gzos = null;
            }

            // Just write the data, nothing else
            if (meta.isOnlyPassingUniqueRows()) {
                int index = 0;
                while (index < data.buffer.size()) {
                    Object[] row = data.buffer.get(index);
                    if (previousRow != null) {
                        int result = data.outputRowMeta.compare(row, previousRow, data.fieldnrs);
                        if (result == 0) {
                            data.buffer.remove(index); // remove this duplicate element as requested
                            if (log.isRowLevel())
                                logRowlevel("Duplicate row removed: " + data.outputRowMeta.getString(row));
                        } else {
                            index++;
                        }
                    } else {
                        index++;
                    }
                    previousRow = row;
                }
            }

            // How many records do we have left?
            data.bufferSizes.add(data.buffer.size());

            for (p = 0; p < data.buffer.size(); p++) {
                data.outputRowMeta.writeData(dos, data.buffer.get(p));
            }

            if (data.sortSize < 0) {
                if (data.buffer.size() > data.minSortSize) {
                    data.minSortSize = data.buffer.size(); // if we did it once, we can do it again.

                    // Memory usage goes up over time, even with garbage collection
                    // We need pointers, file handles, etc.
                    // As such, we're going to lower the min sort size a bit
                    //
                    data.minSortSize = (int) Math.round((double) data.minSortSize * 0.90);
                }
            }

            // Clear the list
            data.buffer.clear();

            // Close temp-file
            dos.close(); // close data stream
            if (gzos != null) {
                gzos.close(); // close gzip stream
            }
            outputStream.close(); // close file stream

            // How much memory do we have left?
            //
            data.freeMemoryPct = Const.getPercentageFreeMemory();
            data.freeCounter = 0;
            if (data.sortSize <= 0) {
                if (log.isDetailed())
                    logDetailed("Available memory : " + data.freeMemoryPct + "%");
            }

        } catch (Exception e) {
            throw new KettleException("Error processing temp-file!", e);
        }

        data.getBufferIndex = 0;
    }

    return true;
}

From source file:net.daboross.bukkitdev.skywars.world.providers.ProtobufStorageProvider.java

@Override
public void loadArena(final SkyArenaConfig arena, final boolean forceReload) throws IOException {
    if (forceReload || cache.containsKey(arena.getArenaName())) {
        plugin.getLogger().log(Level.WARNING, "Updating arena blocks cache for arena ''{0}''.",
                arena.getArenaName());/* ww  w  .  j av  a 2  s .c  om*/
    }
    boolean createdNewCache = false;
    Path cachePath = plugin.getArenaPath().resolve(arena.getArenaName() + ".blocks");
    BlockStorage.BlockArea area = null;
    if (!forceReload) {
        try (InputStream inputStream = new FileInputStream(cachePath.toFile())) {
            try (GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream)) {
                area = BlockStorage.BlockArea.parseFrom(gzipInputStream);
            }
        } catch (FileNotFoundException ignored) {
        }
    }
    if (area == null) {
        try {
            area = createCache(arena);
            createdNewCache = true;
        } catch (IllegalStateException ex1) {
            if (ex1.getMessage().contains("Origin location not listed in configuration")) {
                try (InputStream inputStream = plugin
                        .getResourceAsStream("arenas/" + arena.getArenaName() + ".blocks")) {
                    try (GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream)) {
                        area = BlockStorage.BlockArea.parseFrom(gzipInputStream);
                    }
                    plugin.getLogger().log(Level.INFO, "Loaded pre-built blocks cache file for arena {0}.",
                            arena.getArenaName());
                } catch (FileNotFoundException ex) {
                    throw new IOException(
                            "No origin listed in configuration, but no blocks file found in SkyWars jar file either!",
                            ex);
                }
            } else {
                throw ex1;
            }
        }
        try (OutputStream outputStream = new FileOutputStream(cachePath.toFile())) {
            try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(outputStream)) {
                area.writeTo(gzipOutputStream);
            }
        }
    }

    // We turn the BlockStorage.BlockArea into a StoredBlockArea here, not above, because StoredBlockArea can't write to a file.
    MemoryBlockArea memoryBlockArea = new MemoryBlockArea(area);

    if (createdNewCache || arena.getChestConfiguration() == null) {
        loadChests(arena, memoryBlockArea);
    }
    cache.put(arena.getArenaName(), memoryBlockArea);
}

From source file:com.nextdoor.bender.ipc.es.ElasticSearchTransporterTest.java

@Test(expected = TransportException.class)
public void testGzipErrorsResponse() throws TransportException, IOException {
    byte[] respPayload = getResponse().getBytes(StandardCharsets.UTF_8);

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    GZIPOutputStream os = new GZIPOutputStream(baos);
    os.write(respPayload);//from w  ww . jav a 2s. c o m
    os.close();
    byte[] compressedResponse = baos.toByteArray();

    HttpClient client = getMockClientWithResponse(compressedResponse, ContentType.DEFAULT_BINARY,
            HttpStatus.SC_OK);
    ElasticSearchTransport transport = new ElasticSearchTransport(client, true);

    try {
        transport.sendBatch("foo".getBytes());
    } catch (Exception e) {
        assertEquals("es call failed because expected failure", e.getCause().getMessage());
        throw e;
    }
}

From source file:org.icgc.dcc.release.job.document.task.CreateVCFFileTask.java

@SneakyThrows
private static OutputStream createOutputStream(TaskContext taskContext) {
    val vcfPath = resolveVcfPath(taskContext.getJobContext());

    return new GZIPOutputStream(new BufferedOutputStream(taskContext.getFileSystem().create(vcfPath)));
}

From source file:dk.netarkivet.common.utils.IntegrityTester.java

/**
 * This tests that we are actually able to write and read more than 4GB worth of data using GZip.
 *
 * @throws IOException/*from   ww w .  j a  va 2  s  .  c o  m*/
 */
@Test
public void failingTestGzipLargeFile() throws IOException {
    byte[] block = new byte[BLOCKSIZE];
    File largeFile = new File(WORKING, LARGE_FILE);
    OutputStream os = new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(largeFile)));
    System.out.println("Creating " + 5 * LARGE + " bytes file " + "- this will take a long time");
    block[1] = 'a';
    block[2] = 'b';
    for (long l = 0; l < 5 * LARGE / ((long) BLOCKSIZE) + 1L; l++) {
        os.write(block);
    }
    os.close();

    InputStream is = new LargeFileGZIPInputStream(new BufferedInputStream(new FileInputStream(largeFile)));
    System.out.println("Reading " + 5 * LARGE + " bytes file " + "- this will take a long time");
    byte[] buf = new byte[BLOCKSIZE];
    for (long l = 0; l < 5 * LARGE / ((long) BLOCKSIZE) + 1L; l++) {
        int totalRead = 0;
        int read = 0;
        while (totalRead != block.length && read != -1) {
            read = is.read(buf, totalRead, buf.length - totalRead);
            totalRead += read;
        }
        assertEquals("Should have read full length of block " + l, block.length, totalRead);
        for (int i = 0; i < 8; i++) {
            assertEquals("Read block " + l + " should be equals at " + i, block[i], buf[i]);
        }
    }
    assertEquals("This should be the end of the stream.", -1, is.read());
    is.close();
}

From source file:com.ebay.erl.mobius.core.mapred.ConfigurableJob.java

private static void writePartitionFile(JobConf job, Sampler sampler) {
    try {/*  w  w w.j  av a2s .  c om*/
        ////////////////////////////////////////////////
        // first, getting samples from the data sources
        ////////////////////////////////////////////////
        LOGGER.info("Running local sampling for job [" + job.getJobName() + "]");
        InputFormat inf = job.getInputFormat();
        Object[] samples = sampler.getSample(inf, job);
        LOGGER.info("Samples retrieved, sorting...");

        ////////////////////////////////////////////////
        // sort the samples
        ////////////////////////////////////////////////
        RawComparator comparator = job.getOutputKeyComparator();
        Arrays.sort(samples, comparator);

        if (job.getBoolean("mobius.print.sample", false)) {
            PrintWriter pw = new PrintWriter(
                    new OutputStreamWriter(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(
                            new File(job.get("mobius.sample.file", "./samples.txt.gz")))))));
            for (Object obj : samples) {
                pw.println(obj);
            }
            pw.flush();
            pw.close();
        }

        ////////////////////////////////////////////////
        // start to write partition files
        ////////////////////////////////////////////////

        FileSystem fs = FileSystem.get(job);
        Path partitionFile = fs.makeQualified(new Path(TotalOrderPartitioner.getPartitionFile(job)));
        while (fs.exists(partitionFile)) {
            partitionFile = new Path(partitionFile.toString() + "." + System.currentTimeMillis());
        }
        fs.deleteOnExit(partitionFile);
        TotalOrderPartitioner.setPartitionFile(job, partitionFile);
        LOGGER.info("write partition file to:" + partitionFile.toString());

        int reducersNbr = job.getNumReduceTasks();
        Set<Object> wroteSamples = new HashSet<Object>();

        SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, partitionFile, Tuple.class,
                NullWritable.class);

        float avgReduceSize = samples.length / reducersNbr;

        int lastBegin = 0;
        for (int i = 0; i < samples.length;) {
            // trying to distribute the load for every reducer evenly,
            // dividing the <code>samples</code> into a set of blocks
            // separated by boundaries, objects that selected from the
            // <code>samples</code> array, and each blocks should have
            // about the same size.

            // find the last index of element that equals to samples[i], as
            // such element might appear multiple times in the samples.
            int upperBound = Util.findUpperBound(samples, samples[i], comparator);

            int lowerBound = i;//Util.findLowerBound(samples, samples[i], comparator);

            // the repeat time of samples[i], if the key itself is too big
            // select it as boundary
            int currentElemSize = upperBound - lowerBound + 1;

            if (currentElemSize > avgReduceSize * 2) // greater than two times of average reducer size
            {
                // the current element is too big, greater than
                // two times of the <code>avgReduceSize</code>, 
                // put itself as boundary
                writer.append(((DataJoinKey) samples[i]).getKey(), NullWritable.get());
                wroteSamples.add(((DataJoinKey) samples[i]).getKey());
                //pw.println(samples[i]);

                // immediate put the next element to the boundary,
                // the next element starts at <code> upperBound+1
                // </code>, to prevent the current one consume even 
                // more.
                if (upperBound + 1 < samples.length) {
                    writer.append(((DataJoinKey) samples[upperBound + 1]).getKey(), NullWritable.get());
                    wroteSamples.add(((DataJoinKey) samples[upperBound + 1]).getKey());
                    //pw.println(samples[upperBound+1]);

                    // move on to the next element of <code>samples[upperBound+1]/code>
                    lastBegin = Util.findUpperBound(samples, samples[upperBound + 1], comparator) + 1;
                    i = lastBegin;
                } else {
                    break;
                }
            } else {
                // current element is small enough to be consider
                // with previous group
                int size = upperBound - lastBegin;
                if (size > avgReduceSize) {
                    // by including the current elements, we have
                    // found a block that's big enough, select it
                    // as boundary
                    writer.append(((DataJoinKey) samples[i]).getKey(), NullWritable.get());
                    wroteSamples.add(((DataJoinKey) samples[i]).getKey());
                    //pw.println(samples[i]);

                    i = upperBound + 1;
                    lastBegin = i;
                } else {
                    i = upperBound + 1;
                }
            }
        }

        writer.close();

        // if the number of wrote samples doesn't equals to number of
        // reducer minus one, then it means the key spaces is too small
        // hence TotalOrderPartitioner won't work, it works only if 
        // the partition boundaries are distinct.
        //
        // we need to change the number of reducers
        if (wroteSamples.size() + 1 != reducersNbr) {
            LOGGER.info("Write complete, but key space is too small, sample size=" + wroteSamples.size()
                    + ", reducer size:" + (reducersNbr));
            LOGGER.info("Set the reducer size to:" + (wroteSamples.size() + 1));

            // add 1 because the wrote samples define boundary, ex, if
            // the sample size is two with two element [300, 1000], then 
            // there should be 3 reducers, one for handling i<300, one 
            // for n300<=i<1000, and another one for 1000<=i
            job.setNumReduceTasks((wroteSamples.size() + 1));
        }

        samples = null;
    } catch (IOException e) {
        LOGGER.error(e.getMessage(), e);
        throw new RuntimeException(e);
    }
}