Example usage for java.nio.channels FileChannel position

List of usage examples for java.nio.channels FileChannel position

Introduction

In this page you can find the example usage for java.nio.channels FileChannel position.

Prototype

public abstract FileChannel position(long newPosition) throws IOException;

Source Link

Document

Sets this channel's file position.

Usage

From source file:org.apache.nifi.processors.standard.TailFile.java

/**
 * Updates member variables to reflect the "expected recovery checksum" and
 * seek to the appropriate location in the tailed file, updating our
 * checksum, so that we are ready to proceed with the
 * {@link #onTrigger(ProcessContext, ProcessSession)} call.
 *
 * @param context the ProcessContext/*from  www.  ja va2s  .  c  o  m*/
 * @param stateValues the values that were recovered from state that was
 * previously stored. This Map should be populated with the keys defined in
 * {@link TailFileState.StateKeys}.
 * @param filePath the file of the file for which state must be recovered
 * @throws IOException if unable to seek to the appropriate location in the
 * tailed file.
 */
private void recoverState(final ProcessContext context, final Map<String, String> stateValues,
        final String filePath) throws IOException {

    final String prefix = MAP_PREFIX + states.get(filePath).getFilenameIndex() + '.';

    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.FILENAME)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.POSITION)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.TIMESTAMP)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.LENGTH)) {
        resetState(filePath);
        return;
    }

    final String checksumValue = stateValues.get(prefix + TailFileState.StateKeys.CHECKSUM);
    final boolean checksumPresent = (checksumValue != null);
    final String storedStateFilename = stateValues.get(prefix + TailFileState.StateKeys.FILENAME);
    final long position = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.POSITION));
    final long timestamp = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.TIMESTAMP));
    final long length = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.LENGTH));

    FileChannel reader = null;
    File tailFile = null;

    if (checksumPresent && filePath.equals(storedStateFilename)) {
        states.get(filePath).setExpectedRecoveryChecksum(Long.parseLong(checksumValue));

        // We have an expected checksum and the currently configured filename is the same as the state file.
        // We need to check if the existing file is the same as the one referred to in the state file based on
        // the checksum.
        final Checksum checksum = new CRC32();
        final File existingTailFile = new File(storedStateFilename);
        if (existingTailFile.length() >= position) {
            try (final InputStream tailFileIs = new FileInputStream(existingTailFile);
                    final CheckedInputStream in = new CheckedInputStream(tailFileIs, checksum)) {
                StreamUtils.copy(in, new NullOutputStream(), states.get(filePath).getState().getPosition());

                final long checksumResult = in.getChecksum().getValue();
                if (checksumResult == states.get(filePath).getExpectedRecoveryChecksum()) {
                    // Checksums match. This means that we want to resume reading from where we left off.
                    // So we will populate the reader object so that it will be used in onTrigger. If the
                    // checksums do not match, then we will leave the reader object null, so that the next
                    // call to onTrigger will result in a new Reader being created and starting at the
                    // beginning of the file.
                    getLogger().debug(
                            "When recovering state, checksum of tailed file matches the stored checksum. Will resume where left off.");
                    tailFile = existingTailFile;
                    reader = FileChannel.open(tailFile.toPath(), StandardOpenOption.READ);
                    getLogger().debug("Created FileChannel {} for {} in recoverState",
                            new Object[] { reader, tailFile });

                    reader.position(position);
                } else {
                    // we don't seek the reader to the position, so our reader will start at beginning of file.
                    getLogger().debug(
                            "When recovering state, checksum of tailed file does not match the stored checksum. Will begin tailing current file from beginning.");
                }
            }
        } else {
            // fewer bytes than our position, so we know we weren't already reading from this file. Keep reader at a position of 0.
            getLogger().debug(
                    "When recovering state, existing file to tail is only {} bytes but position flag is {}; "
                            + "this indicates that the file has rotated. Will begin tailing current file from beginning.",
                    new Object[] { existingTailFile.length(), position });
        }

        states.get(filePath).setState(new TailFileState(filePath, tailFile, reader, position, timestamp, length,
                checksum, ByteBuffer.allocate(65536)));
    } else {
        resetState(filePath);
    }

    getLogger().debug("Recovered state {}", new Object[] { states.get(filePath).getState() });
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

private boolean checkVersion(FileChannel channel) throws IOException {
    if (channel.size() > 0) {
        channel.position(0);
        ByteBuffer buffer;/*from  w  w  w.  j a va  2s .  c  om*/

        if (useNIOMemoryMapping) {
            MappedByteBuffer mbb = channel.map(MapMode.READ_ONLY, 0, 8);
            mbb.load();
            buffer = mbb;
        } else {
            buffer = ByteBuffer.wrap(new byte[8]);
            channel.read(buffer);
            buffer.position(0);
        }

        buffer.position(0);
        long onDiskVersion = buffer.getLong();
        return (version == onDiskVersion);
    }
    return (version == 0);
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

private void setStatusFromFile(FileChannel channel) throws IOException {
    if (channel.size() > 0) {
        channel.position(0);
        ByteBuffer buffer;//from ww w  .j  a v  a 2 s  .c  o m

        if (useNIOMemoryMapping) {
            MappedByteBuffer mbb = channel.map(MapMode.READ_ONLY, 0, channel.size());
            mbb.load();
            buffer = mbb;
        } else {
            buffer = ByteBuffer.wrap(new byte[(int) channel.size()]);
            channel.read(buffer);
            buffer.position(0);
        }

        buffer.position(0);
        long onDiskVersion = buffer.getLong();
        if (version != onDiskVersion) {
            CRC32 crc32 = new CRC32();
            crc32.update((int) (onDiskVersion >>> 32) & 0xFFFFFFFF);
            crc32.update((int) (onDiskVersion >>> 0) & 0xFFFFFFFF);
            int size = buffer.getInt();
            crc32.update(size);
            LinkedHashMap<String, IndexEntry> newIndexEntries = new LinkedHashMap<String, IndexEntry>();
            // Not all state is saved some is specific to this index so we
            // need to add the transient stuff.
            // Until things are committed they are not shared unless it is
            // prepared
            for (int i = 0; i < size; i++) {
                String indexTypeString = readString(buffer, crc32);
                IndexType indexType;
                try {
                    indexType = IndexType.valueOf(indexTypeString);
                } catch (IllegalArgumentException e) {
                    throw new IOException("Invalid type " + indexTypeString);
                }

                String name = readString(buffer, crc32);

                String parentName = readString(buffer, crc32);

                String txStatus = readString(buffer, crc32);
                TransactionStatus status;
                try {
                    status = TransactionStatus.valueOf(txStatus);
                } catch (IllegalArgumentException e) {
                    throw new IOException("Invalid status " + txStatus);
                }

                String mergeId = readString(buffer, crc32);

                long documentCount = buffer.getLong();
                crc32.update((int) (documentCount >>> 32) & 0xFFFFFFFF);
                crc32.update((int) (documentCount >>> 0) & 0xFFFFFFFF);

                long deletions = buffer.getLong();
                crc32.update((int) (deletions >>> 32) & 0xFFFFFFFF);
                crc32.update((int) (deletions >>> 0) & 0xFFFFFFFF);

                byte deleteOnlyNodesFlag = buffer.get();
                crc32.update(deleteOnlyNodesFlag);
                boolean isDeletOnlyNodes = deleteOnlyNodesFlag == 1;

                if (!status.isTransient()) {
                    newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId,
                            documentCount, deletions, isDeletOnlyNodes));
                }
            }
            long onDiskCRC32 = buffer.getLong();
            if (crc32.getValue() == onDiskCRC32) {
                for (IndexEntry entry : indexEntries.values()) {
                    if (entry.getStatus().isTransient()) {
                        newIndexEntries.put(entry.getName(), entry);
                    }
                }
                version = onDiskVersion;
                indexEntries = newIndexEntries;
            } else {
                throw new IOException("Invalid file check sum");
            }
        }
    }

}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

private void writeStatusToFile(FileChannel channel) throws IOException {
    long size = getBufferSize();

    ByteBuffer buffer;/*ww  w.  j  av  a 2 s  . c  o m*/
    if (useNIOMemoryMapping) {
        MappedByteBuffer mbb = channel.map(MapMode.READ_WRITE, 0, size);
        mbb.load();
        buffer = mbb;
    } else {
        channel.truncate(size);
        buffer = ByteBuffer.wrap(new byte[(int) size]);
    }

    buffer.position(0);

    buffer.putLong(version);
    CRC32 crc32 = new CRC32();
    crc32.update((int) (version >>> 32) & 0xFFFFFFFF);
    crc32.update((int) (version >>> 0) & 0xFFFFFFFF);

    buffer.putInt(indexEntries.size());
    crc32.update(indexEntries.size());

    for (IndexEntry entry : indexEntries.values()) {
        String entryType = entry.getType().toString();
        writeString(buffer, crc32, entryType);

        writeString(buffer, crc32, entry.getName());

        writeString(buffer, crc32, entry.getParentName());

        String entryStatus = entry.getStatus().toString();
        writeString(buffer, crc32, entryStatus);

        writeString(buffer, crc32, entry.getMergeId());

        buffer.putLong(entry.getDocumentCount());
        crc32.update((int) (entry.getDocumentCount() >>> 32) & 0xFFFFFFFF);
        crc32.update((int) (entry.getDocumentCount() >>> 0) & 0xFFFFFFFF);

        buffer.putLong(entry.getDeletions());
        crc32.update((int) (entry.getDeletions() >>> 32) & 0xFFFFFFFF);
        crc32.update((int) (entry.getDeletions() >>> 0) & 0xFFFFFFFF);

        buffer.put(entry.isDeletOnlyNodes() ? (byte) 1 : (byte) 0);
        crc32.update(entry.isDeletOnlyNodes() ? new byte[] { (byte) 1 } : new byte[] { (byte) 0 });
    }
    buffer.putLong(crc32.getValue());

    if (useNIOMemoryMapping) {
        ((MappedByteBuffer) buffer).force();
    } else {
        buffer.rewind();
        channel.position(0);
        channel.write(buffer);
    }
}