Example usage for java.util.zip GZIPOutputStream GZIPOutputStream

List of usage examples for java.util.zip GZIPOutputStream GZIPOutputStream

Introduction

In this page you can find the example usage for java.util.zip GZIPOutputStream GZIPOutputStream.

Prototype

public GZIPOutputStream(OutputStream out) throws IOException 

Source Link

Document

Creates a new output stream with a default buffer size.

Usage

From source file:com.yoncabt.ebr.logger.fs.FileSystemReportLogger.java

@Override
public void logReport(ReportRequest request, ReportOutputFormat outputFormat, InputStream reportData)
        throws IOException {
    String uuid = request.getUuid();
    Map<String, Object> reportParams = request.getReportParams();
    File saveDir = new File(EBRConf.INSTANCE.getValue(EBRParams.REPORT_LOGGER_FSLOGGER_PATH, "/tmp"));
    saveDir.mkdirs();//from ww w.ja  v  a2 s.c  o m
    boolean compress = EBRConf.INSTANCE.getValue(EBRParams.REPORT_LOGGER_FSLOGGER_COMPRESS, true);
    OutputStream osReport;
    OutputStream osParams;
    if (compress) {
        osReport = new GZIPOutputStream(new FileOutputStream(new File(saveDir, uuid + ".gz")));
        osParams = new GZIPOutputStream(new FileOutputStream(new File(saveDir, uuid + ".json.gz")));
    } else {
        osReport = new FileOutputStream(new File(saveDir, uuid));
        osParams = new FileOutputStream(new File(saveDir, uuid + ".json"));
    }
    IOUtils.copy(reportData, osReport);
    JSONObject jo = new JSONObject(reportParams);
    try (OutputStreamWriter osw = new OutputStreamWriter(osParams, "utf-8")) {
        jo.write(osw);
    }
    osReport.close();
    osParams.close();
}

From source file:ezbake.deployer.utilities.Utilities.java

public static void appendFilesInTarArchive(OutputStream output, Iterable<ArtifactDataEntry> filesToAdd)
        throws DeploymentException {
    ArchiveStreamFactory asf = new ArchiveStreamFactory();
    try (GZIPOutputStream gzs = new GZIPOutputStream(output)) {
        try (ArchiveOutputStream aos = asf.createArchiveOutputStream(ArchiveStreamFactory.TAR, gzs)) {
            for (ArtifactDataEntry entry : filesToAdd) {
                aos.putArchiveEntry(entry.getEntry());
                IOUtils.write(entry.getData(), aos);
                aos.closeArchiveEntry();
            }//w ww.  j  a  v  a  2  s  .  com
        }
    } catch (ArchiveException e) {
        log.error(e.getMessage(), e);
        throw new DeploymentException(e.getMessage());
    } catch (IOException e) {
        log.error(e.getMessage(), e);
        throw new DeploymentException(e.getMessage());
    }
}

From source file:fr.insalyon.creatis.vip.applicationimporter.server.business.TargzUtils.java

public static void createTargz(List<File> pathIn, String pathOut) throws BusinessException {
    try {//from   w w  w. j a v a2  s .co m

        FileOutputStream fos = new FileOutputStream(pathOut);
        TarArchiveOutputStream tos = new TarArchiveOutputStream(
                new GZIPOutputStream(new BufferedOutputStream(fos)));
        for (File entry : pathIn) {
            addFileToTarGz(tos, entry, null);
        }
        tos.finish();
        tos.close();
    } catch (IOException ex) {
        throw new BusinessException(ex);
    }
}

From source file:ml.shifu.shifu.core.dtrain.dt.BinaryDTSerializer.java

public static void save(ModelConfig modelConfig, List<ColumnConfig> columnConfigList,
        List<List<TreeNode>> baggingTrees, String loss, int inputCount, OutputStream output)
        throws IOException {
    DataOutputStream fos = null;/*ww  w.j  ava  2  s . c om*/

    try {
        fos = new DataOutputStream(new GZIPOutputStream(output));
        // version
        fos.writeInt(CommonConstants.TREE_FORMAT_VERSION);
        fos.writeUTF(modelConfig.getAlgorithm());
        fos.writeUTF(loss);
        fos.writeBoolean(modelConfig.isClassification());
        fos.writeBoolean(modelConfig.getTrain().isOneVsAll());
        fos.writeInt(inputCount);

        Map<Integer, String> columnIndexNameMapping = new HashMap<Integer, String>();
        Map<Integer, List<String>> columnIndexCategoricalListMapping = new HashMap<Integer, List<String>>();
        Map<Integer, Double> numericalMeanMapping = new HashMap<Integer, Double>();
        for (ColumnConfig columnConfig : columnConfigList) {
            if (columnConfig.isFinalSelect()) {
                columnIndexNameMapping.put(columnConfig.getColumnNum(), columnConfig.getColumnName());
            }
            if (columnConfig.isCategorical() && CollectionUtils.isNotEmpty(columnConfig.getBinCategory())) {
                columnIndexCategoricalListMapping.put(columnConfig.getColumnNum(),
                        columnConfig.getBinCategory());
            }

            if (columnConfig.isNumerical() && columnConfig.getMean() != null) {
                numericalMeanMapping.put(columnConfig.getColumnNum(), columnConfig.getMean());
            }
        }

        if (columnIndexNameMapping.size() == 0) {
            boolean hasCandidates = CommonUtils.hasCandidateColumns(columnConfigList);
            for (ColumnConfig columnConfig : columnConfigList) {
                if (CommonUtils.isGoodCandidate(columnConfig, hasCandidates)) {
                    columnIndexNameMapping.put(columnConfig.getColumnNum(), columnConfig.getColumnName());
                }
            }
        }

        // serialize numericalMeanMapping
        fos.writeInt(numericalMeanMapping.size());
        for (Entry<Integer, Double> entry : numericalMeanMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            // for some feature, it is null mean value, it is not selected, just set to 0d to avoid NPE
            fos.writeDouble(entry.getValue() == null ? 0d : entry.getValue());
        }
        // serialize columnIndexNameMapping
        fos.writeInt(columnIndexNameMapping.size());
        for (Entry<Integer, String> entry : columnIndexNameMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            fos.writeUTF(entry.getValue());
        }
        // serialize columnIndexCategoricalListMapping
        fos.writeInt(columnIndexCategoricalListMapping.size());
        for (Entry<Integer, List<String>> entry : columnIndexCategoricalListMapping.entrySet()) {
            List<String> categories = entry.getValue();
            if (categories != null) {
                fos.writeInt(entry.getKey());
                fos.writeInt(categories.size());
                for (String category : categories) {
                    // There is 16k limitation when using writeUTF() function.
                    // if the category value is larger than 10k, write a marker -1 and write bytes instead of
                    // writeUTF;
                    // in read part logic should be changed also to readByte not readUTF according to the marker
                    if (category.length() < Constants.MAX_CATEGORICAL_VAL_LEN) {
                        fos.writeUTF(category);
                    } else {
                        fos.writeShort(UTF_BYTES_MARKER); // marker here
                        byte[] bytes = category.getBytes("UTF-8");
                        fos.writeInt(bytes.length);
                        for (int i = 0; i < bytes.length; i++) {
                            fos.writeByte(bytes[i]);
                        }
                    }
                }
            }
        }

        Map<Integer, Integer> columnMapping = getColumnMapping(columnConfigList);
        fos.writeInt(columnMapping.size());
        for (Entry<Integer, Integer> entry : columnMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            fos.writeInt(entry.getValue());
        }

        // after model version 4 (>=4), IndependentTreeModel support bagging, here write a default RF/GBT size 1
        fos.writeInt(baggingTrees.size());
        for (int i = 0; i < baggingTrees.size(); i++) {
            List<TreeNode> trees = baggingTrees.get(i);
            int treeLength = trees.size();
            fos.writeInt(treeLength);
            for (TreeNode treeNode : trees) {
                treeNode.write(fos);
            }
        }
    } catch (IOException e) {
        LOG.error("Error in writing output.", e);
    } finally {
        IOUtils.closeStream(fos);
    }
}

From source file:com.openshift.client.utils.TarFileTestUtils.java

/**
 * Replaces the given file(-name), that might exist anywhere nested in the
 * given archive, by a new entry with the given content. The replacement is
 * faked by adding a new entry into the archive which will overwrite the
 * existing (older one) on extraction.//  www  .  j av a2  s . c  om
 * 
 * @param name
 *            the name of the file to replace (no path required)
 * @param newContent
 *            the content of the replacement file
 * @param in
 * @return
 * @throws IOException
 * @throws ArchiveException
 * @throws CompressorException
 */
public static File fakeReplaceFile(String name, String newContent, InputStream in) throws IOException {
    Assert.notNull(name);
    Assert.notNull(in);

    File newArchive = FileUtils.createRandomTempFile(".tar.gz");
    newArchive.deleteOnExit();

    TarArchiveOutputStream newArchiveOut = new TarArchiveOutputStream(
            new GZIPOutputStream(new FileOutputStream(newArchive)));
    newArchiveOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU);

    TarArchiveInputStream archiveIn = new TarArchiveInputStream(new GZIPInputStream(in));
    String pathToReplace = null;
    try {
        // copy the existing entries
        for (ArchiveEntry nextEntry = null; (nextEntry = archiveIn.getNextEntry()) != null;) {
            if (nextEntry.getName().endsWith(name)) {
                pathToReplace = nextEntry.getName();
            }
            newArchiveOut.putArchiveEntry(nextEntry);
            IOUtils.copy(archiveIn, newArchiveOut);
            newArchiveOut.closeArchiveEntry();
        }

        if (pathToReplace == null) {
            throw new IllegalStateException("Could not find file " + name + " in the given archive.");
        }
        TarArchiveEntry newEntry = new TarArchiveEntry(pathToReplace);
        newEntry.setSize(newContent.length());
        newArchiveOut.putArchiveEntry(newEntry);
        IOUtils.copy(new ByteArrayInputStream(newContent.getBytes()), newArchiveOut);
        newArchiveOut.closeArchiveEntry();

        return newArchive;
    } finally {
        newArchiveOut.finish();
        newArchiveOut.flush();
        StreamUtils.close(archiveIn);
        StreamUtils.close(newArchiveOut);
    }
}

From source file:org.jboss.capedwarf.connect.io.GzipContentProducer.java

public void writeTo(OutputStream outstream) throws IOException {
    if (GzipOptionalSerializator.isGzipEnabled()) {
        GZIPOutputStream gzip = new GZIPOutputStream(outstream);
        doWriteTo(gzip);//from w  ww .j ava 2  s  .  c  o m
        gzip.finish();
    } else {
        doWriteTo(outstream);
    }
}

From source file:hr.fer.zemris.vhdllab.remoting.GzipHttpInvokerServiceExporter.java

@Override
protected OutputStream decorateOutputStream(HttpServletRequest request, HttpServletResponse response,
        OutputStream os) throws IOException {
    return new GZIPOutputStream(os);
}

From source file:com.epam.wilma.test.server.compress.gzip.GzipCompressor.java

/**
 * Compresses an {@link InputStream} object into gzip.
 * @param source the input stream that will be compressed.
 * @return a {@link ByteArrayOutputStream} containing gzipped byte array.
 *//*ww w.  j a v a2 s  .  c  o m*/
public ByteArrayOutputStream compress(final InputStream source) {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    try {
        GZIPOutputStream gout = new GZIPOutputStream(baos);
        //... Code to read from your original uncompressed data and write to gout.
        IOUtils.copy(source, gout);
        gout.finish();
        gout.close();
    } catch (IOException e) {
        throw new SystemException("error", e);
    }
    return baos;
}

From source file:com.orange.clara.cloud.servicedbdumper.filer.compression.GzipCompressing.java

@Async
public Future<Boolean> gziptIt(InputStream inputStream, OutputStream outputStream) throws IOException {
    logger.debug("Start compressing...");
    GZIPOutputStream gout = new GZIPOutputStream(outputStream);
    ByteStreams.copy(inputStream, gout);
    gout.flush();/*w  w w . j  a va 2 s  .c  o  m*/
    gout.close();
    outputStream.flush();
    outputStream.close();
    inputStream.close();
    logger.debug("Finish compressing");
    return new AsyncResult<Boolean>(true);
}

From source file:com.cyberway.issue.io.arc.ARC2WCDX.java

public static Object[] createWcdx(ARCReader reader) {
    reader.setDigest(true);//  www. jav a  2 s. c om

    String wcdxPath = reader.getReaderIdentifier().replaceAll("\\.arc(\\.gz)?$", ".wcdx.gz");
    File wcdxFile = new File(wcdxPath + ".open");
    PrintStream writer = null;
    long count = 0;
    try {
        writer = new PrintStream(new GZIPOutputStream(new FileOutputStream(wcdxFile)));

        // write header: legend + timestamp
        StringBuilder legend = new StringBuilder();
        appendField(legend, "CDX");
        appendField(legend, "surt-uri");
        appendField(legend, "b"); // ARC timestamp
        appendField(legend, "http-date");
        appendField(legend, "s"); // status code
        appendField(legend, "m"); // media type
        appendField(legend, "sha1"); // content sha1
        appendField(legend, "g"); // ARC name
        appendField(legend, "V"); // start offset
        appendField(legend, "end-offset"); // TODO: implement
        appendField(legend, "n"); // ARC record length TODO: verify
        appendField(legend, "http-content-length");
        appendField(legend, "http-last-modified");
        appendField(legend, "http-expires");
        appendField(legend, "http-etag");
        appendField(legend, "http-location");
        appendField(legend, "e"); // IP
        appendField(legend, "a"); // original URL
        // WCDX version+creation time: crude version control
        appendField(legend, WCDX_VERSION + "@" + ArchiveUtils.get14DigitDate());
        writer.println(legend.toString());

        Iterator iter = reader.iterator();
        count = 0;
        while (iter.hasNext()) {
            ARCRecord record = (ARCRecord) iter.next();
            record.close();
            ARCRecordMetaData h = (ARCRecordMetaData) record.getHeader();
            Header[] httpHeaders = record.getHttpHeaders();
            if (httpHeaders == null) {
                httpHeaders = new Header[0];
            }
            HeaderGroup hg = new HeaderGroup();
            hg.setHeaders(httpHeaders);
            StringBuilder builder = new StringBuilder();

            // SURT-form URI
            appendField(builder, SURT.fromURI(h.getUrl()));
            // record timestamp ('b')
            appendField(builder, h.getDate());
            // http header date
            appendTimeField(builder, hg.getFirstHeader("Date"));
            // response code ('s')
            appendField(builder, h.getStatusCode());
            // media type ('m')
            appendField(builder, h.getMimetype());
            // content checksum (like 'c', but here Base32 SHA1)
            appendField(builder, record.getDigestStr());
            // arc name ('g')
            appendField(builder, reader.getFileName());
            // compressed start offset ('V')
            appendField(builder, h.getOffset());

            // compressed end offset (?)
            //            appendField(builder,
            //                    reader.getInputStream() instanceof RepositionableStream
            //                    ? ((GzippedInputStream)reader.getInputStream()).vPosition()
            //                    : "-");
            // TODO; leave unavail for now
            appendField(builder, "-");

            // uncompressed (declared in ARC headerline) record length
            appendField(builder, h.getLength());
            // http header content-length
            appendField(builder, hg.getFirstHeader("Content-Length"));

            // http header mod-date
            appendTimeField(builder, hg.getFirstHeader("Last-Modified"));
            // http header expires
            appendTimeField(builder, hg.getFirstHeader("Expires"));

            // http header etag
            appendField(builder, hg.getFirstHeader("ETag"));
            // http header redirect ('Location' header?)
            appendField(builder, hg.getFirstHeader("Location"));
            // ip ('e')
            appendField(builder, h.getIp());
            // original URI
            appendField(builder, h.getUrl());
            // TODO MAYBE - a title from inside content? 

            writer.println(builder.toString());
            count++;
        }
        wcdxFile.renameTo(new File(wcdxPath));
    } catch (IOException e) {
        // soldier on: but leave '.open' wcdx file as indicator of error
        if (!wcdxFile.exists()) {
            try {
                wcdxFile.createNewFile();
            } catch (IOException e1) {
                // TODO Auto-generated catch block
                throw new RuntimeException(e1);
            }
        }
    } catch (RuntimeException e) {
        // soldier on: but leave '.open' wcdx file as indicator of error
        if (!wcdxFile.exists()) {
            try {
                wcdxFile.createNewFile();
            } catch (IOException e1) {
                // TODO Auto-generated catch block
                throw new RuntimeException(e1);
            }
        }
    } finally {
        if (writer != null) {
            writer.close();
        }
    }

    return new Object[] { wcdxPath, count };
}