Example usage for java.util.zip Checksum getValue

List of usage examples for java.util.zip Checksum getValue

Introduction

In this page you can find the example usage for java.util.zip Checksum getValue.

Prototype

public long getValue();

Source Link

Document

Returns the current checksum value.

Usage

From source file:com.threewks.thundr.util.Encoder.java

public Encoder crc32() {
    Checksum checksum = new CRC32();
    checksum.update(data, 0, data.length);
    long checksumValue = checksum.getValue();
    data = Long.toHexString(checksumValue).getBytes();
    return unhex();
}

From source file:CRC32HashBuilder.java

/**
 * {@inheritDoc}/*from   w w w.  ja  v  a2 s  .  co m*/
 */
public String getHash(final InputStream input) throws IOException {
    if (input == null) {
        throw new IllegalArgumentException("Content cannot be null!");
    }
    final Checksum checksum = new CRC32();
    final byte[] bytes = new byte[1024];
    int len = 0;
    while ((len = input.read(bytes)) >= 0) {
        checksum.update(bytes, 0, len);
    }

    final String hash = new BigInteger(Long.toString(checksum.getValue())).toString(16);
    return hash;
}

From source file:org.spliffy.server.apps.calendar.CalendarManager.java

private void updateCtag(CalEvent event) {
    OutputStream nulOut = new NullOutputStream();
    CheckedOutputStream cout = new CheckedOutputStream(nulOut, new Adler32());
    HashUtils.appendLine(event.getDescription(), cout);
    HashUtils.appendLine(event.getSummary(), cout);
    HashUtils.appendLine(event.getTimezone(), cout);
    HashUtils.appendLine(event.getStartDate(), cout);
    HashUtils.appendLine(event.getEndDate(), cout);
    Checksum check = cout.getChecksum();
    long crc = check.getValue();
    event.setCtag(crc);/* w  ww .ja va  2 s.  c o  m*/
    updateCtag(event.getCalendar());
}

From source file:org.spliffy.server.apps.calendar.CalendarManager.java

private void updateCtag(Calendar sourceCal) {
    OutputStream nulOut = new NullOutputStream();
    CheckedOutputStream cout = new CheckedOutputStream(nulOut, new Adler32());

    HashUtils.appendLine(sourceCal.getColor(), cout);
    if (sourceCal.getEvents() != null) {
        for (CalEvent r : sourceCal.getEvents()) {
            String name = r.getName();
            String line = HashUtils.toHashableText(name, r.getCtag(), "");
            HashUtils.appendLine(line, cout);
        }/*from  w  w  w . ja  v a2s. co  m*/
    }
    Checksum check = cout.getChecksum();
    long crc = check.getValue();
    sourceCal.setCtag(crc);
}

From source file:com.jkoolcloud.tnt4j.streams.configure.state.FileStreamStateHandlerTest.java

@Test
public void findStreamingFile() throws Exception {
    FileStreamStateHandler rwd = new FileStreamStateHandler();

    File testFilesDir = new File(samplesDir, "/multiple-logs/");
    File[] testFiles = testFilesDir.listFiles((FilenameFilter) new WildcardFileFilter("orders*")); // NON-NLS
    FileAccessState newFAS = new FileAccessState();

    int count = 0;
    File fileToSearchFor = null;/*from w  w  w.  ja v  a 2 s .co m*/
    int lineLastRead = 0;
    File fileWritten = null;
    for (File testFile : testFiles) {
        count++;
        FileReader in;
        LineNumberReader reader;

        Long fileCRC = rwd.getFileCrc(testFile);
        if (count == 2) {
            newFAS.currentFileCrc = fileCRC;
            fileToSearchFor = testFile;
        }

        in = new FileReader(testFile);
        reader = new LineNumberReader(in);
        reader.setLineNumber(0);
        String line = reader.readLine();
        int count2 = 0;
        while (line != null) {
            count2++;
            Checksum crcLine = new CRC32();
            final byte[] bytes4Line = line.getBytes();
            crcLine.update(bytes4Line, 0, bytes4Line.length);
            final long lineCRC = crcLine.getValue();
            final int lineNumber = reader.getLineNumber();
            System.out.println("for " + lineNumber + " line CRC is " + lineCRC); // NON-NLS
            if (count2 == 3) {
                newFAS.currentLineCrc = lineCRC;
                newFAS.currentLineNumber = lineNumber;
                newFAS.lastReadTime = System.currentTimeMillis();
                lineLastRead = lineNumber;
            }
            line = reader.readLine();
        }
        fileWritten = AbstractFileStreamStateHandler.writeState(newFAS, testFilesDir, "TestStream"); // NON-NLS
        Utils.close(reader);
    }

    final File findLastProcessed = rwd.findStreamingFile(newFAS, testFiles);
    assertEquals(fileToSearchFor, findLastProcessed);
    final int lineLastReadRecorded = rwd.checkLine(findLastProcessed, newFAS);
    assertEquals(lineLastRead, lineLastReadRecorded);
    fileWritten.delete();
}

From source file:io.milton.cloud.server.apps.calendar.CalendarManager.java

private void updateCtag(CalEvent event) {
    OutputStream nulOut = new NullOutputStream();
    CheckedOutputStream cout = new CheckedOutputStream(nulOut, new Adler32());
    appendLine(event.getDescription(), cout);
    appendLine(event.getSummary(), cout);
    appendLine(event.getTimezone(), cout);
    appendLine(event.getStartDate(), cout);
    appendLine(event.getEndDate(), cout);
    Checksum check = cout.getChecksum();
    long crc = check.getValue();
    event.setCtag(crc);//w  w w.  j av  a  2 s  .  co m
    updateCtag(event.getCalendar());
}

From source file:io.milton.cloud.server.apps.calendar.CalendarManager.java

private void updateCtag(Calendar sourceCal) {
    OutputStream nulOut = new NullOutputStream();
    CheckedOutputStream cout = new CheckedOutputStream(nulOut, new Adler32());

    appendLine(sourceCal.getColor(), cout);
    if (sourceCal.getEvents() != null) {
        for (CalEvent r : sourceCal.getEvents()) {
            String name = r.getName();
            String line = HashCalc.getInstance().toHashableText(name, r.getCtag() + "", "");
            appendLine(line, cout);/*from  ww w.j ava2  s . c  o m*/
        }
    }
    Checksum check = cout.getChecksum();
    long crc = check.getValue();
    sourceCal.setCtag(crc);
}

From source file:kr.ac.cau.mecs.cass.signal.payload.JSONObjectPayload.java

@Override
public void serializeRawData(Payload payload) {
    String data = this.data.toString();

    Checksum crc32 = new CRC32();
    Base64 base64 = new Base64();

    byte[] bytedata = data.getBytes(Charset.forName("UTF-8"));

    crc32.reset();/*from w w w  .jav  a  2  s.  co m*/
    crc32.update(bytedata, 0, bytedata.length);

    String encoded = base64.encode(bytedata);

    payload.setCrc(crc32.getValue());
    payload.setLength(encoded.length());
    payload.setRawdata(encoded);

    payload.setType(0x11);
}

From source file:org.anarres.lzo.LzopOutputStream.java

private void writeChecksum(Checksum csum, byte[] data, int off, int len) throws IOException {
    if (csum == null)
        return;//  w w w . j  av a 2s  .co  m
    csum.reset();
    csum.update(data, off, len);
    long value = csum.getValue();
    // LOG.info("Writing checksum " + csum);
    writeInt((int) (value & 0xFFFFFFFF));
}

From source file:org.apache.cassandra.db.commitlog.CommitLog.java

public static int recover(File[] clogs) throws IOException {
    final Set<Table> tablesRecovered = new HashSet<Table>();
    List<Future<?>> futures = new ArrayList<Future<?>>();
    byte[] bytes = new byte[4096];
    Map<Integer, AtomicInteger> invalidMutations = new HashMap<Integer, AtomicInteger>();

    // count the number of replayed mutation. We don't really care about atomicity, but we need it to be a reference.
    final AtomicInteger replayedCount = new AtomicInteger();

    // compute per-CF and global replay positions
    final Map<Integer, ReplayPosition> cfPositions = new HashMap<Integer, ReplayPosition>();
    for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
        // it's important to call RP.gRP per-cf, before aggregating all the positions w/ the Ordering.min call
        // below: gRP will return NONE if there are no flushed sstables, which is important to have in the
        // list (otherwise we'll just start replay from the first flush position that we do have, which is not correct).
        ReplayPosition rp = ReplayPosition.getReplayPosition(cfs.getSSTables());
        cfPositions.put(cfs.metadata.cfId, rp);
    }//from  ww w.  ja v  a  2  s.com
    final ReplayPosition globalPosition = Ordering.from(ReplayPosition.comparator).min(cfPositions.values());

    for (final File file : clogs) {
        final long segment = CommitLogSegment.idFromFilename(file.getName());

        int bufferSize = (int) Math.min(Math.max(file.length(), 1), 32 * 1024 * 1024);
        BufferedRandomAccessFile reader = new BufferedRandomAccessFile(new File(file.getAbsolutePath()), "r",
                bufferSize, true);
        assert reader.length() <= Integer.MAX_VALUE;

        try {
            int replayPosition;
            if (globalPosition.segment < segment)
                replayPosition = 0;
            else if (globalPosition.segment == segment)
                replayPosition = globalPosition.position;
            else
                replayPosition = (int) reader.length();

            if (replayPosition < 0 || replayPosition >= reader.length()) {
                // replayPosition > reader.length() can happen if some data gets flushed before it is written to the commitlog
                // (see https://issues.apache.org/jira/browse/CASSANDRA-2285)
                logger.debug("skipping replay of fully-flushed {}", file);
                continue;
            }

            reader.seek(replayPosition);

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + reader.getFilePointer());

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF()) {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                Checksum checksum = new CRC32();
                int serializedSize;
                try {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Table and Key (including the 2-byte length from
                    // writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;
                    long claimedSizeChecksum = reader.readLong();
                    checksum.update(serializedSize);
                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully.  that's ok.

                    if (serializedSize > bytes.length)
                        bytes = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(bytes, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                } catch (EOFException eof) {
                    break; // last CL entry didn't get completely written.  that's ok.
                }

                checksum.update(bytes, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue()) {
                    // this entry must not have been fsynced.  probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                ByteArrayInputStream bufIn = new ByteArrayInputStream(bytes, 0, serializedSize);
                RowMutation rm = null;
                try {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version.  so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn),
                            MessagingService.version_, false);
                } catch (UnserializableColumnFamilyException ex) {
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null) {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    } else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(),
                            ByteBufferUtil.bytesToHex(rm.key()),
                            "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable() {
                    public void runMayThrow() throws IOException {
                        if (DatabaseDescriptor.getKSMetaData(frm.getTable()) == null)
                            return;
                        final Table table = Table.open(frm.getTable());
                        RowMutation newRm = new RowMutation(frm.getTable(), frm.key());

                        // Rebuild the row mutation, omitting column families that a) have already been flushed,
                        // b) are part of a cf that was dropped. Keep in mind that the cf.name() is suspect. do every
                        // thing based on the cfid instead.
                        for (ColumnFamily columnFamily : frm.getColumnFamilies()) {
                            if (CFMetaData.getCF(columnFamily.id()) == null)
                                // null means the cf has been dropped
                                continue;

                            ReplayPosition rp = cfPositions.get(columnFamily.id());

                            // replay if current segment is newer than last flushed one or, if it is the last known
                            // segment, if we are after the replay position
                            if (segment > rp.segment
                                    || (segment == rp.segment && entryLocation > rp.position)) {
                                newRm.add(columnFamily);
                                replayedCount.incrementAndGet();
                            }
                        }
                        if (!newRm.isEmpty()) {
                            Table.open(newRm.getTable()).apply(newRm, false);
                            tablesRecovered.add(table);
                        }
                    }
                };
                futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
                if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) {
                    FBUtilities.waitOnFutures(futures);
                    futures.clear();
                }
            }
        } finally {
            FileUtils.closeQuietly(reader);
            logger.info("Finished reading " + file);
        }
    }

    for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet())
        logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d",
                entry.getValue().intValue(), entry.getKey()));

    // wait for all the writes to finish on the mutation stage
    FBUtilities.waitOnFutures(futures);
    logger.debug("Finished waiting on mutations from recovery");

    // flush replayed tables
    futures.clear();
    for (Table table : tablesRecovered)
        futures.addAll(table.flush());
    FBUtilities.waitOnFutures(futures);

    return replayedCount.get();
}