Example usage for java.util.zip CRC32 reset

List of usage examples for java.util.zip CRC32 reset

Introduction

In this page you can find the example usage for java.util.zip CRC32 reset.

Prototype

@Override
public void reset() 

Source Link

Document

Resets CRC-32 to initial value.

Usage

From source file:net.librec.util.FileUtil.java

/**
 * Zip a given folder/*from w  w w.  jav  a  2 s .c o m*/
 *
 * @param dirPath    a given folder: must be all files (not sub-folders)
 * @param filePath   zipped file
 * @throws Exception if error occurs
 */
public static void zipFolder(String dirPath, String filePath) throws Exception {
    File outFile = new File(filePath);
    ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(outFile));
    int bytesRead;
    byte[] buffer = new byte[1024];
    CRC32 crc = new CRC32();
    for (File file : listFiles(dirPath)) {
        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(file));
        crc.reset();
        while ((bytesRead = bis.read(buffer)) != -1) {
            crc.update(buffer, 0, bytesRead);
        }
        bis.close();

        // Reset to beginning of input stream
        bis = new BufferedInputStream(new FileInputStream(file));
        ZipEntry entry = new ZipEntry(file.getName());
        entry.setMethod(ZipEntry.STORED);
        entry.setCompressedSize(file.length());
        entry.setSize(file.length());
        entry.setCrc(crc.getValue());
        zos.putNextEntry(entry);
        while ((bytesRead = bis.read(buffer)) != -1) {
            zos.write(buffer, 0, bytesRead);
        }
        bis.close();
    }
    zos.close();

    LOG.debug("A zip-file is created to: " + outFile.getPath());
}

From source file:Main.java

public void doZip(String filename, String zipfilename) throws Exception {
    byte[] buf = new byte[1024];
    FileInputStream fis = new FileInputStream(filename);
    fis.read(buf, 0, buf.length);/*from  www .j av  a2 s  .  com*/

    CRC32 crc = new CRC32();
    ZipOutputStream s = new ZipOutputStream((OutputStream) new FileOutputStream(zipfilename));
    s.setLevel(6);

    ZipEntry entry = new ZipEntry(filename);
    entry.setSize((long) buf.length);
    crc.reset();
    crc.update(buf);
    entry.setCrc(crc.getValue());
    s.putNextEntry(entry);
    s.write(buf, 0, buf.length);
    s.finish();
    s.close();
}

From source file:nl.nn.adapterframework.compression.ZipWriter.java

public void writeEntryWithCompletedHeader(String filename, Object contents, boolean close, String charset)
        throws CompressionException, IOException {
    if (StringUtils.isEmpty(filename)) {
        throw new CompressionException("filename cannot be empty");
    }//  w w  w  .  j  a  v a2s  .c o m

    byte[] contentBytes = null;
    BufferedInputStream bis = null;
    long size = 0;
    if (contents != null) {
        if (contents instanceof byte[]) {
            contentBytes = (byte[]) contents;
        } else if (contents instanceof InputStream) {
            contentBytes = Misc.streamToBytes((InputStream) contents);
        } else {
            contentBytes = contents.toString().getBytes(charset);
        }
        bis = new BufferedInputStream(new ByteArrayInputStream(contentBytes));
        size = bis.available();
    } else {
        log.warn("contents of zip entry [" + filename + "] is null");
    }

    int bytesRead;
    byte[] buffer = new byte[1024];
    CRC32 crc = new CRC32();
    crc.reset();
    if (bis != null) {
        while ((bytesRead = bis.read(buffer)) != -1) {
            crc.update(buffer, 0, bytesRead);
        }
        bis.close();
    }
    if (contents != null) {
        bis = new BufferedInputStream(new ByteArrayInputStream(contentBytes));
    }
    ZipEntry entry = new ZipEntry(filename);
    entry.setMethod(ZipEntry.STORED);
    entry.setCompressedSize(size);
    entry.setSize(size);
    entry.setCrc(crc.getValue());
    getZipoutput().putNextEntry(entry);
    if (bis != null) {
        while ((bytesRead = bis.read(buffer)) != -1) {
            getZipoutput().write(buffer, 0, bytesRead);
        }
        bis.close();
    }
    getZipoutput().closeEntry();
}

From source file:Main.java

public void doZip(String filename, String zipfilename) throws Exception {
    byte[] buf = new byte[1024];
    FileInputStream fis = new FileInputStream(filename);
    fis.read(buf, 0, buf.length);/*www . ja va  2 s  . c om*/

    CRC32 crc = new CRC32();
    ZipOutputStream s = new ZipOutputStream((OutputStream) new FileOutputStream(zipfilename));
    s.setLevel(6);

    ZipEntry entry = new ZipEntry(filename);
    entry.setSize((long) buf.length);
    entry.setMethod(ZipEntry.DEFLATED);
    crc.reset();
    crc.update(buf);
    entry.setCrc(crc.getValue());
    s.putNextEntry(entry);
    s.write(buf, 0, buf.length);
    s.finish();
    s.close();
}

From source file:Main.java

public void doZip(String filename, String zipfilename) throws Exception {
    byte[] buf = new byte[1024];
    FileInputStream fis = new FileInputStream(filename);
    fis.read(buf, 0, buf.length);/*from  w  w w  . ja v  a 2s  .  c o  m*/

    CRC32 crc = new CRC32();
    ZipOutputStream s = new ZipOutputStream((OutputStream) new FileOutputStream(zipfilename));
    s.setLevel(6);

    ZipEntry entry = new ZipEntry(filename);
    entry.setSize((long) buf.length);
    entry.setTime(new Date().getTime());
    crc.reset();
    crc.update(buf);
    entry.setCrc(crc.getValue());
    s.putNextEntry(entry);
    s.write(buf, 0, buf.length);
    s.finish();
    s.close();
}

From source file:io.hops.erasure_coding.XORDecoder.java

@Override
protected long fixErasedBlockImpl(FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile,
        boolean fixSource, long blockSize, long errorOffset, long limit, boolean partial, OutputStream out,
        Progressable reporter, CRC32 crc) throws IOException {
    if (partial) {
        throw new IOException("We don't support partial reconstruction");
    }/*from   w  w  w  . j a va  2 s  .  c  om*/
    LOG.info("Fixing block at " + srcFile + ":" + errorOffset + ", limit " + limit);
    if (crc != null) {
        crc.reset();
    }
    FileStatus srcStat = fs.getFileStatus(srcFile);
    FSDataInputStream[] inputs = new FSDataInputStream[stripeSize + this.codec.parityLength];

    try {
        long errorBlockOffset = (errorOffset / blockSize) * blockSize;
        long[] srcOffsets = stripeOffsets(errorOffset, blockSize, fixSource);
        for (int i = 0; i < srcOffsets.length; i++) {
            if (fixSource && srcOffsets[i] == errorBlockOffset) {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
                continue;
            }
            if (srcOffsets[i] < srcStat.getLen()) {
                FSDataInputStream in = fs.open(srcFile);
                in.seek(srcOffsets[i]);
                inputs[i] = in;
            } else {
                inputs[i] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
                LOG.info("Using zeros at " + srcFile + ":" + errorBlockOffset);
            }
        }

        if (fixSource) {
            FSDataInputStream parityFileIn = parityFs.open(parityFile);
            parityFileIn.seek(parityOffset(errorOffset, blockSize));
            inputs[inputs.length - 1] = parityFileIn;
        } else {
            inputs[inputs.length - 1] = new FSDataInputStream(new RaidUtils.ZeroInputStream(blockSize));
            LOG.info("Using zeros at " + parityFile + ":" + errorBlockOffset);
        }
    } catch (IOException e) {
        RaidUtils.closeStreams(inputs);
        throw e;
    }

    int boundedBufferCapacity = 1;
    ParallelStreamReader parallelReader = new ParallelStreamReader(reporter, inputs, bufSize, parallelism,
            boundedBufferCapacity, blockSize);
    parallelReader.start();
    try {
        // Loop while the number of skipped + written bytes is less than the max.
        long written;
        for (written = 0; written < limit;) {
            ParallelStreamReader.ReadResult readResult;
            try {
                readResult = parallelReader.getReadResult();
            } catch (InterruptedException e) {
                throw new IOException("Interrupted while waiting for read result");
            }
            // Cannot tolerate any IO errors.
            IOException readEx = readResult.getException();
            if (readEx != null) {
                throw readEx;
            }

            int toWrite = (int) Math.min((long) bufSize, limit - written);

            XOREncoder.xor(readResult.readBufs, writeBufs[0]);

            out.write(writeBufs[0], 0, toWrite);
            if (crc != null) {
                crc.update(writeBufs[0], 0, toWrite);
            }
            written += toWrite;
        }
        return written;
    } finally {
        // Inputs will be closed by parallelReader.shutdown().
        parallelReader.shutdown();
    }
}

From source file:com.sastix.cms.server.services.content.impl.HashedDirectoryServiceImpl.java

/**
 * Returns the crc32 hash for the input String.
 *
 * @param text a String with the text//from w w  w.j a  va 2s .  co m
 * @return a BigInteger with the hash
 */
@Override
public String hashText(final String text) {
    final CRC32 crc32 = new CRC32();
    crc32.reset();
    crc32.update(text.getBytes());
    return Long.toHexString(crc32.getValue());
}

From source file:io.hops.erasure_coding.ReedSolomonDecoder.java

/**
 * Decode the inputs provided and write to the output.
 *
 * @param inputs/*from  ww w  . j  av  a 2 s  .  c  om*/
 *     array of inputs.
 * @param erasedLocations
 *     indexes in the inputs which are known to be erased.
 * @param erasedLocationToFix
 *     index in the inputs which needs to be fixed.
 * @param limit
 *     maximum number of bytes to be written.
 * @param out
 *     the output.
 * @return size of recovered bytes
 * @throws java.io.IOException
 */
long writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int erasedLocationToFix, long limit,
        OutputStream out, Progressable reporter, ParallelStreamReader parallelReader, CRC32 crc)
        throws IOException {

    LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix);
    if (crc != null) {
        crc.reset();
    }
    int[] tmp = new int[inputs.length];
    int[] decoded = new int[erasedLocations.length];
    // Loop while the number of written bytes is less than the max.
    long written;
    for (written = 0; written < limit;) {
        erasedLocations = readFromInputs(inputs, erasedLocations, limit, reporter, parallelReader);
        if (decoded.length != erasedLocations.length) {
            decoded = new int[erasedLocations.length];
        }

        int toWrite = (int) Math.min((long) bufSize, limit - written);

        int partSize = (int) Math.ceil(bufSize * 1.0 / parallelism);
        try {
            long startTime = System.currentTimeMillis();
            for (int i = 0; i < parallelism; i++) {
                decodeOps.acquire(1);
                int start = i * partSize;
                int count = Math.min(bufSize - start, partSize);
                parallelDecoder.execute(
                        new DecodeOp(readBufs, writeBufs, start, count, erasedLocations, reedSolomonCode[i]));
            }
            decodeOps.acquire(parallelism);
            decodeOps.release(parallelism);
            decodeTime += (System.currentTimeMillis() - startTime);
        } catch (InterruptedException e) {
            throw new IOException("Interrupted while waiting for read result");
        }

        for (int i = 0; i < erasedLocations.length; i++) {
            if (erasedLocations[i] == erasedLocationToFix) {
                out.write(writeBufs[i], 0, toWrite);
                if (crc != null) {
                    crc.update(writeBufs[i], 0, toWrite);
                }
                written += toWrite;
                break;
            }
        }
    }
    return written;
}

From source file:org.apache.hadoop.raid.ReedSolomonDecoder.java

/**
 * Decode the inputs provided and write to the output.
 * @param inputs array of inputs.//from w ww  . java  2s  . c  o  m
 * @param erasedLocations indexes in the inputs which are known to be erased.
 * @param erasedLocationToFix index in the inputs which needs to be fixed.
 * @param limit maximum number of bytes to be written.
 * @param out the output.
 * @return size of recovered bytes
 * @throws IOException
 */
long writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int erasedLocationToFix, long limit,
        OutputStream out, Progressable reporter, ParallelStreamReader parallelReader, CRC32 crc)
        throws IOException {

    LOG.info("Need to write " + limit + " bytes for erased location index " + erasedLocationToFix);
    if (crc != null) {
        crc.reset();
    }
    int[] tmp = new int[inputs.length];
    int[] decoded = new int[erasedLocations.length];
    // Loop while the number of written bytes is less than the max.
    long written;
    for (written = 0; written < limit;) {
        erasedLocations = readFromInputs(inputs, erasedLocations, limit, reporter, parallelReader);
        if (decoded.length != erasedLocations.length) {
            decoded = new int[erasedLocations.length];
        }

        int toWrite = (int) Math.min((long) bufSize, limit - written);
        int partSize = (int) Math.ceil(bufSize * 1.0 / parallelism);

        try {
            long startTime = System.currentTimeMillis();
            for (int i = 0; i < parallelism; i++) {
                decodeOps.acquire(1);
                int start = i * partSize;
                int count = Math.min(bufSize - start, partSize);
                parallelDecoder.execute(
                        new DecodeOp(readBufs, writeBufs, start, count, erasedLocations, reedSolomonCode[i]));
            }
            decodeOps.acquire(parallelism);
            decodeOps.release(parallelism);
            decodeTime += (System.currentTimeMillis() - startTime);
        } catch (InterruptedException e) {
            throw new IOException("Interrupted while waiting for read result");
        }

        for (int i = 0; i < erasedLocations.length; i++) {
            if (erasedLocations[i] == erasedLocationToFix) {
                out.write(writeBufs[i], 0, toWrite);
                if (crc != null) {
                    crc.update(writeBufs[i], 0, toWrite);
                }
                written += toWrite;
                break;
            }
        }
    }
    return written;
}

From source file:com.sastix.cms.server.services.cache.hazelcast.HazelcastCacheService.java

@Override
public String getUID(String region) {
    CRC32 CRC_32 = new CRC32();
    LOG.info("HazelcastCacheService->GET_UID");
    IdGenerator idGenerator = cm.getIdGenerator(region);
    final String uid = String.valueOf(idGenerator.newId()); //assures uniqueness during the life cycle of the cluster
    final String uuid = UUID.randomUUID().toString();
    String ret = new StringBuilder(uuid).append(region).append(uid).toString();
    CRC_32.reset();
    CRC_32.update(ret.getBytes());//  w  w  w  .  ja v  a 2s .  com
    return Long.toHexString(CRC_32.getValue());
}