Example usage for java.util.zip Checksum update

List of usage examples for java.util.zip Checksum update

Introduction

In this page you can find the example usage for java.util.zip Checksum update.

Prototype

default public void update(ByteBuffer buffer) 

Source Link

Document

Updates the current checksum with the bytes from the specified buffer.

Usage

From source file:Main.java

public static void main(String[] args) throws IOException {
    FileInputStream fin = new FileInputStream("a.zip");
    Checksum cs = new CRC32();
    for (int b = fin.read(); b != -1; b = fin.read()) {
        cs.update(b);
    }/*ww w.  j a  v a 2s.  co m*/
    System.out.println(cs.getValue());
    fin.close();
}

From source file:Main.java

public static long getCRC32(InputStream in) throws IOException {
    Checksum cs = new CRC32();

    for (int b = in.read(); b != -1; b = in.read()) {
        cs.update(b);
    }//from w  ww .jav a  2  s  .c  om
    return cs.getValue();
}

From source file:MainClass.java

public static long getCRC32(InputStream in) throws IOException {

    Checksum cs = new CRC32();

    // more efficient to read chunks of data at a time
    for (int b = in.read(); b != -1; b = in.read()) {
        cs.update(b);
    }//from   w  w  w  .  ja v  a  2  s. co m
    return cs.getValue();
}

From source file:org.apache.cassandra.db.commitlog.CommitLog.java

public static int recover(File[] clogs) throws IOException {
    final Set<Table> tablesRecovered = new HashSet<Table>();
    List<Future<?>> futures = new ArrayList<Future<?>>();
    byte[] bytes = new byte[4096];
    Map<Integer, AtomicInteger> invalidMutations = new HashMap<Integer, AtomicInteger>();

    // count the number of replayed mutation. We don't really care about atomicity, but we need it to be a reference.
    final AtomicInteger replayedCount = new AtomicInteger();

    // compute per-CF and global replay positions
    final Map<Integer, ReplayPosition> cfPositions = new HashMap<Integer, ReplayPosition>();
    for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
        // it's important to call RP.gRP per-cf, before aggregating all the positions w/ the Ordering.min call
        // below: gRP will return NONE if there are no flushed sstables, which is important to have in the
        // list (otherwise we'll just start replay from the first flush position that we do have, which is not correct).
        ReplayPosition rp = ReplayPosition.getReplayPosition(cfs.getSSTables());
        cfPositions.put(cfs.metadata.cfId, rp);
    }//from   w w w .  ja v a  2s  .co  m
    final ReplayPosition globalPosition = Ordering.from(ReplayPosition.comparator).min(cfPositions.values());

    for (final File file : clogs) {
        final long segment = CommitLogSegment.idFromFilename(file.getName());

        int bufferSize = (int) Math.min(Math.max(file.length(), 1), 32 * 1024 * 1024);
        BufferedRandomAccessFile reader = new BufferedRandomAccessFile(new File(file.getAbsolutePath()), "r",
                bufferSize, true);
        assert reader.length() <= Integer.MAX_VALUE;

        try {
            int replayPosition;
            if (globalPosition.segment < segment)
                replayPosition = 0;
            else if (globalPosition.segment == segment)
                replayPosition = globalPosition.position;
            else
                replayPosition = (int) reader.length();

            if (replayPosition < 0 || replayPosition >= reader.length()) {
                // replayPosition > reader.length() can happen if some data gets flushed before it is written to the commitlog
                // (see https://issues.apache.org/jira/browse/CASSANDRA-2285)
                logger.debug("skipping replay of fully-flushed {}", file);
                continue;
            }

            reader.seek(replayPosition);

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + reader.getFilePointer());

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF()) {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                Checksum checksum = new CRC32();
                int serializedSize;
                try {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Table and Key (including the 2-byte length from
                    // writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;
                    long claimedSizeChecksum = reader.readLong();
                    checksum.update(serializedSize);
                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully.  that's ok.

                    if (serializedSize > bytes.length)
                        bytes = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(bytes, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                } catch (EOFException eof) {
                    break; // last CL entry didn't get completely written.  that's ok.
                }

                checksum.update(bytes, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue()) {
                    // this entry must not have been fsynced.  probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                ByteArrayInputStream bufIn = new ByteArrayInputStream(bytes, 0, serializedSize);
                RowMutation rm = null;
                try {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version.  so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn),
                            MessagingService.version_, false);
                } catch (UnserializableColumnFamilyException ex) {
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null) {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    } else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(),
                            ByteBufferUtil.bytesToHex(rm.key()),
                            "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable() {
                    public void runMayThrow() throws IOException {
                        if (DatabaseDescriptor.getKSMetaData(frm.getTable()) == null)
                            return;
                        final Table table = Table.open(frm.getTable());
                        RowMutation newRm = new RowMutation(frm.getTable(), frm.key());

                        // Rebuild the row mutation, omitting column families that a) have already been flushed,
                        // b) are part of a cf that was dropped. Keep in mind that the cf.name() is suspect. do every
                        // thing based on the cfid instead.
                        for (ColumnFamily columnFamily : frm.getColumnFamilies()) {
                            if (CFMetaData.getCF(columnFamily.id()) == null)
                                // null means the cf has been dropped
                                continue;

                            ReplayPosition rp = cfPositions.get(columnFamily.id());

                            // replay if current segment is newer than last flushed one or, if it is the last known
                            // segment, if we are after the replay position
                            if (segment > rp.segment
                                    || (segment == rp.segment && entryLocation > rp.position)) {
                                newRm.add(columnFamily);
                                replayedCount.incrementAndGet();
                            }
                        }
                        if (!newRm.isEmpty()) {
                            Table.open(newRm.getTable()).apply(newRm, false);
                            tablesRecovered.add(table);
                        }
                    }
                };
                futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
                if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) {
                    FBUtilities.waitOnFutures(futures);
                    futures.clear();
                }
            }
        } finally {
            FileUtils.closeQuietly(reader);
            logger.info("Finished reading " + file);
        }
    }

    for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet())
        logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d",
                entry.getValue().intValue(), entry.getKey()));

    // wait for all the writes to finish on the mutation stage
    FBUtilities.waitOnFutures(futures);
    logger.debug("Finished waiting on mutations from recovery");

    // flush replayed tables
    futures.clear();
    for (Table table : tablesRecovered)
        futures.addAll(table.flush());
    FBUtilities.waitOnFutures(futures);

    return replayedCount.get();
}

From source file:org.apache.mnemonic.collections.DurableArrayNGTest.java

protected DurableChunk<NonVolatileMemAllocator> genuptChunk(NonVolatileMemAllocator act, Checksum cs,
        long size) {
    DurableChunk<NonVolatileMemAllocator> ret = null;
    ret = act.createChunk(size, false);/*from  ww  w  .  j av  a2  s .  c  om*/
    if (null == ret) {
        throw new OutOfHybridMemory("Create Durable Chunk Failed.");
    }
    byte b;
    for (int i = 0; i < ret.getSize(); ++i) {
        b = (byte) rand.nextInt(255);
        unsafe.putByte(ret.get() + i, b);
        cs.update(b);
    }
    return ret;
}

From source file:org.apache.mnemonic.collections.DurableArrayNGTest.java

@Test(enabled = true)
public void testGetSetArrayChunk() {
    DurableType gtypes[] = { DurableType.CHUNK };
    int capacity = 10;
    DurableArray<DurableChunk> array = DurableArrayFactory.create(m_act, null, gtypes, capacity, false);

    Long handler = array.getHandler();

    long chunkVal;

    Checksum chunkCheckSum = new CRC32();
    chunkCheckSum.reset();/*from   w  ww .j a v a 2s . c o  m*/

    for (int i = 0; i < capacity; i++) {
        array.set(i, genuptChunk(m_act, chunkCheckSum, genRandSize()));
    }
    chunkVal = chunkCheckSum.getValue();
    chunkCheckSum.reset();

    for (int i = 0; i < capacity; i++) {
        DurableChunk<NonVolatileMemAllocator> dc = array.get(i);
        Assert.assertNotNull(dc);
        for (int j = 0; j < dc.getSize(); ++j) {
            byte b = unsafe.getByte(dc.get() + j);
            chunkCheckSum.update(b);
        }
    }
    Assert.assertEquals(chunkCheckSum.getValue(), chunkVal);
    chunkCheckSum.reset();

    DurableArray<DurableChunk> restoredArray = DurableArrayFactory.restore(m_act, null, gtypes, handler, false);
    for (int i = 0; i < capacity; i++) {
        DurableChunk<NonVolatileMemAllocator> dc = restoredArray.get(i);
        Assert.assertNotNull(dc);
        for (int j = 0; j < dc.getSize(); ++j) {
            byte b = unsafe.getByte(dc.get() + j);
            chunkCheckSum.update(b);
        }
    }
    Assert.assertEquals(chunkCheckSum.getValue(), chunkVal);

    chunkCheckSum.reset();
    Iterator<DurableChunk> itr = restoredArray.iterator();
    int val = 0;
    while (itr.hasNext()) {
        DurableChunk<NonVolatileMemAllocator> dc = itr.next();
        Assert.assertNotNull(dc);
        for (int j = 0; j < dc.getSize(); ++j) {
            byte b = unsafe.getByte(dc.get() + j);
            chunkCheckSum.update(b);
        }
        val++;
    }
    Assert.assertEquals(val, capacity);
    Assert.assertEquals(chunkCheckSum.getValue(), chunkVal);

    restoredArray.destroy();
}

From source file:org.apache.mnemonic.collections.DurableHashMapNGTest.java

@Test(enabled = true)
public void testMapValueChunk() {
    DurableType gtypes[] = { DurableType.STRING, DurableType.CHUNK };
    DurableHashMap<String, DurableChunk> map = DurableHashMapFactory.create(m_act, null, gtypes, 1, false);
    long chunkVal;

    Checksum chunkCheckSum = new CRC32();
    chunkCheckSum.reset();//w w  w . j  a  va2s  .  com

    Long handler = map.getHandler();
    for (int i = 0; i < 10; i++) {
        map.put("chunk" + i, genuptChunk(m_act, chunkCheckSum, genRandSize()));
    }

    chunkVal = chunkCheckSum.getValue();
    chunkCheckSum.reset();

    for (int i = 0; i < 10; i++) {
        DurableChunk<NonVolatileMemAllocator> dc = map.get("chunk" + i);
        for (int j = 0; j < dc.getSize(); ++j) {
            byte b = unsafe.getByte(dc.get() + j);
            chunkCheckSum.update(b);
        }
    }
    chunkVal = chunkCheckSum.getValue();
    Assert.assertEquals(chunkCheckSum.getValue(), chunkVal);

    chunkCheckSum.reset();
    DurableHashMap<String, DurableChunk> restoredMap = DurableHashMapFactory.restore(m_act, null, gtypes,
            handler, false);

    for (int i = 0; i < 10; i++) {
        DurableChunk<NonVolatileMemAllocator> dc = restoredMap.get("chunk" + i);
        for (int j = 0; j < dc.getSize(); ++j) {
            byte b = unsafe.getByte(dc.get() + j);
            chunkCheckSum.update(b);
        }
    }
    chunkVal = chunkCheckSum.getValue();
    Assert.assertEquals(chunkCheckSum.getValue(), chunkVal);

    restoredMap.destroy();
}