Example usage for java.nio.channels FileChannel position

List of usage examples for java.nio.channels FileChannel position

Introduction

In this page you can find the example usage for java.nio.channels FileChannel position.

Prototype

public abstract FileChannel position(long newPosition) throws IOException;

Source Link

Document

Sets this channel's file position.

Usage

From source file:org.cloudata.core.commitlog.CommitLogServer.java

public int readLastLogFromMarkedPosition(String dirName) {
    File logFile = getLogFile(dirName);
    File indexFile = getLogIndexFile(dirName);

    FileChannel ch = null;
    try {/* w ww .ja v a 2  s  .  c  om*/
        long index = 0;

        if (logFile.exists() == false) {
            LOG.warn("A directory [" + dirName + "] or log file does not exist");
            return -1;
        } else if (indexFile.exists()) {
            FileChannel indexChannel = new FileInputStream(indexFile).getChannel();
            index = CommitLogFileChannel.readLastIndex(indexChannel);
            LOG.info("index of dir [" + dirName + "] : " + index);
            indexChannel.close();
        }

        FileInputStream in = new FileInputStream(logFile);
        ch = in.getChannel();
        ch.position(index);
    } catch (IOException e) {
        LOG.error("Fail to read logs", e);
        return -1;
    }

    return startFileTransferChannel(dirName, ch);
}

From source file:Main.java

public static long getCommentLength(final FileChannel fileChannel) throws IOException {
    // End of central directory record (EOCD)
    // Offset    Bytes     Description[23]
    // 0           4       End of central directory signature = 0x06054b50
    // 4           2       Number of this disk
    // 6           2       Disk where central directory starts
    // 8           2       Number of central directory records on this disk
    // 10          2       Total number of central directory records
    // 12          4       Size of central directory (bytes)
    // 16          4       Offset of start of central directory, relative to start of archive
    // 20          2       Comment length (n)
    // 22          n       Comment
    // For a zip with no archive comment, the
    // end-of-central-directory record will be 22 bytes long, so
    // we expect to find the EOCD marker 22 bytes from the end.

    final long archiveSize = fileChannel.size();
    if (archiveSize < ZIP_EOCD_REC_MIN_SIZE) {
        throw new IOException("APK too small for ZIP End of Central Directory (EOCD) record");
    }/*from  w w w.  j a v a 2 s  .  c  om*/
    // ZIP End of Central Directory (EOCD) record is located at the very end of the ZIP archive.
    // The record can be identified by its 4-byte signature/magic which is located at the very
    // beginning of the record. A complication is that the record is variable-length because of
    // the comment field.
    // The algorithm for locating the ZIP EOCD record is as follows. We search backwards from
    // end of the buffer for the EOCD record signature. Whenever we find a signature, we check
    // the candidate record's comment length is such that the remainder of the record takes up
    // exactly the remaining bytes in the buffer. The search is bounded because the maximum
    // size of the comment field is 65535 bytes because the field is an unsigned 16-bit number.
    final long maxCommentLength = Math.min(archiveSize - ZIP_EOCD_REC_MIN_SIZE, UINT16_MAX_VALUE);
    final long eocdWithEmptyCommentStartPosition = archiveSize - ZIP_EOCD_REC_MIN_SIZE;
    for (int expectedCommentLength = 0; expectedCommentLength <= maxCommentLength; expectedCommentLength++) {
        final long eocdStartPos = eocdWithEmptyCommentStartPosition - expectedCommentLength;

        final ByteBuffer byteBuffer = ByteBuffer.allocate(4);
        fileChannel.position(eocdStartPos);
        fileChannel.read(byteBuffer);
        byteBuffer.order(ByteOrder.LITTLE_ENDIAN);

        if (byteBuffer.getInt(0) == ZIP_EOCD_REC_SIG) {
            final ByteBuffer commentLengthByteBuffer = ByteBuffer.allocate(2);
            fileChannel.position(eocdStartPos + ZIP_EOCD_COMMENT_LENGTH_FIELD_OFFSET);
            fileChannel.read(commentLengthByteBuffer);
            commentLengthByteBuffer.order(ByteOrder.LITTLE_ENDIAN);

            final int actualCommentLength = commentLengthByteBuffer.getShort(0);
            if (actualCommentLength == expectedCommentLength) {
                return actualCommentLength;
            }
        }
    }
    throw new IOException("ZIP End of Central Directory (EOCD) record not found");
}

From source file:com.clustercontrol.agent.job.PublicKeyThread.java

/**
 * ?Authorized_key????<BR>/*  w  ww . jav  a 2 s.co  m*/
 * 
 * @param publicKey
 * @return
 */
private synchronized boolean addKey(String publicKey) {
    m_log.debug("add key start");

    if (SKIP_KEYFILE_UPDATE) {
        m_log.info("skipped appending publicKey");
        return true;
    }

    //???
    String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH);

    m_log.debug("faileName" + fileName);
    if (fileName == null || fileName.length() == 0)
        return false;

    //File?
    File fi = new File(fileName);

    RandomAccessFile randomAccessFile = null;
    FileChannel channel = null;
    FileLock lock = null;
    boolean add = false;
    try {
        //RandomAccessFile?
        randomAccessFile = new RandomAccessFile(fi, "rw");
        //FileChannel?
        channel = randomAccessFile.getChannel();

        // 
        for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) {
            if (null != (lock = channel.tryLock())) {
                break;
            }
            m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT)
                    + " : " + fileName + "]");
            Thread.sleep(FILELOCK_WAIT);
        }
        if (null == lock) {
            m_log.warn("file locking timeout.");
            return false;
        }

        // (?)
        synchronized (authKeyLock) {
            //??
            channel.position(channel.size());

            //?
            String writeData = "\n" + publicKey;
            // 
            m_log.debug("add key : " + writeData);

            //?????
            ByteBuffer buffer = ByteBuffer.allocate(512);

            //???
            buffer.clear();
            buffer.put(writeData.getBytes());
            buffer.flip();
            channel.write(buffer);
        }

        add = true;
    } catch (Exception e) {
        m_log.error(e);
    } finally {
        try {
            if (channel != null) {
                channel.close();
            }
            if (randomAccessFile != null) {
                randomAccessFile.close();
            }
            if (lock != null) {
                //
                lock.release();
            }
        } catch (Exception e) {
        }
    }

    return add;
}

From source file:edu.harvard.iq.dataverse.dataaccess.TabularSubsetGenerator.java

public Object[] subsetObjectVector(File tabfile, int column, int varcount, int casecount, int columntype,
        boolean compatmode) throws IOException {

    Object[] retVector = null;//from  ww w.  j  ava2  s .  co m

    boolean isString = false;
    boolean isDouble = false;
    boolean isLong = false;
    boolean isFloat = false;

    //Locale loc = new Locale("en", "US");

    if (columntype == COLUMN_TYPE_STRING) {
        isString = true;
        retVector = new String[casecount];
    } else if (columntype == COLUMN_TYPE_DOUBLE) {
        isDouble = true;
        retVector = new Double[casecount];
    } else if (columntype == COLUMN_TYPE_LONG) {
        isLong = true;
        retVector = new Long[casecount];
    } else if (columntype == COLUMN_TYPE_FLOAT) {
        isFloat = true;
        retVector = new Float[casecount];
    } else {
        throw new IOException("Unsupported column type: " + columntype);
    }

    File rotatedImageFile = getRotatedImage(tabfile, varcount, casecount);
    long[] columnEndOffsets = extractColumnOffsets(rotatedImageFile, varcount, casecount);
    long columnOffset = 0;
    long columnLength = 0;

    if (column > 0) {
        columnOffset = columnEndOffsets[column - 1];
        columnLength = columnEndOffsets[column] - columnEndOffsets[column - 1];
    } else {
        columnOffset = varcount * 8;
        columnLength = columnEndOffsets[0] - varcount * 8;
    }

    FileChannel fc = (FileChannel.open(Paths.get(rotatedImageFile.getAbsolutePath()), StandardOpenOption.READ));
    fc.position(columnOffset);
    int MAX_COLUMN_BUFFER = 8192;

    ByteBuffer in = ByteBuffer.allocate(MAX_COLUMN_BUFFER);

    if (columnLength < MAX_COLUMN_BUFFER) {
        in.limit((int) (columnLength));
    }

    long bytesRead = 0;
    long bytesReadTotal = 0;
    int caseindex = 0;
    int byteoffset = 0;
    byte[] leftover = null;

    while (bytesReadTotal < columnLength) {
        bytesRead = fc.read(in);
        byte[] columnBytes = in.array();
        int bytecount = 0;

        while (bytecount < bytesRead) {
            if (columnBytes[bytecount] == '\n') {
                /*
                String token = new String(columnBytes, byteoffset, bytecount-byteoffset, "UTF8");
                        
                if (leftover != null) {
                String leftoverString = new String (leftover, "UTF8");
                token = leftoverString + token;
                leftover = null;
                }
                */
                /* 
                 * Note that the way I was doing it at first - above - 
                 * was not quite the correct way - because I was creating UTF8
                 * strings from the leftover bytes, and the bytes in the 
                 * current buffer *separately*; which means, if a multi-byte
                 * UTF8 character got split in the middle between one buffer
                 * and the next, both chunks of it would become junk 
                 * characters, on each side!
                 * The correct way of doing it, of course, is to create a
                 * merged byte buffer, and then turn it into a UTF8 string. 
                 *      -- L.A. 4.0
                 */
                String token = null;

                if (leftover == null) {
                    token = new String(columnBytes, byteoffset, bytecount - byteoffset, "UTF8");
                } else {
                    byte[] merged = new byte[leftover.length + bytecount - byteoffset];

                    System.arraycopy(leftover, 0, merged, 0, leftover.length);
                    System.arraycopy(columnBytes, byteoffset, merged, leftover.length, bytecount - byteoffset);
                    token = new String(merged, "UTF8");
                    leftover = null;
                    merged = null;
                }

                if (isString) {
                    if ("".equals(token)) {
                        // An empty string is a string missing value!
                        // An empty string in quotes is an empty string!
                        retVector[caseindex] = null;
                    } else {
                        // Strip the outer quotes:
                        token = token.replaceFirst("^\\\"", "");
                        token = token.replaceFirst("\\\"$", "");

                        // We need to restore the special characters that 
                        // are stored in tab files escaped - quotes, new lines 
                        // and tabs. Before we do that however, we need to 
                        // take care of any escaped backslashes stored in 
                        // the tab file. I.e., "foo\t" should be transformed 
                        // to "foo<TAB>"; but "foo\\t" should be transformed 
                        // to "foo\t". This way new lines and tabs that were
                        // already escaped in the original data are not 
                        // going to be transformed to unescaped tab and 
                        // new line characters!

                        String[] splitTokens = token.split(Matcher.quoteReplacement("\\\\"), -2);

                        // (note that it's important to use the 2-argument version 
                        // of String.split(), and set the limit argument to a
                        // negative value; otherwise any trailing backslashes 
                        // are lost.)

                        for (int i = 0; i < splitTokens.length; i++) {
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\\""), "\"");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\t"), "\t");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\n"), "\n");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\r"), "\r");
                        }
                        // TODO: 
                        // Make (some of?) the above optional; for ex., we 
                        // do need to restore the newlines when calculating UNFs;
                        // But if we are subsetting these vectors in order to 
                        // create a new tab-delimited file, they will 
                        // actually break things! -- L.A. Jul. 28 2014

                        token = StringUtils.join(splitTokens, '\\');

                        // "compatibility mode" - a hack, to be able to produce
                        // unfs identical to those produced by the "early" 
                        // unf5 jar; will be removed in production 4.0. 
                        // -- L.A. (TODO: ...)
                        if (compatmode && !"".equals(token)) {
                            if (token.length() > 128) {
                                if ("".equals(token.trim())) {
                                    // don't ask... 
                                    token = token.substring(0, 129);
                                } else {
                                    token = token.substring(0, 128);
                                    //token = String.format(loc, "%.128s", token);
                                    token = token.trim();
                                    //dbgLog.info("formatted and trimmed: "+token);
                                }
                            } else {
                                if ("".equals(token.trim())) {
                                    // again, don't ask; 
                                    // - this replicates some bugginness 
                                    // that happens inside unf5;
                                    token = "null";
                                } else {
                                    token = token.trim();
                                }
                            }
                        }

                        retVector[caseindex] = token;
                    }
                } else if (isDouble) {
                    try {
                        // TODO: verify that NaN and +-Inf are 
                        // handled correctly here! -- L.A.
                        // Verified: new Double("nan") works correctly, 
                        // resulting in Double.NaN;
                        // Double("[+-]Inf") doesn't work however; 
                        // (the constructor appears to be expecting it
                        // to be spelled as "Infinity", "-Infinity", etc. 
                        if ("inf".equalsIgnoreCase(token) || "+inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Double.POSITIVE_INFINITY;
                        } else if ("-inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Double.NEGATIVE_INFINITY;
                        } else if (token == null || token.equals("")) {
                            // missing value:
                            retVector[caseindex] = null;
                        } else {
                            retVector[caseindex] = new Double(token);
                        }
                    } catch (NumberFormatException ex) {
                        dbgLog.warning("NumberFormatException thrown for " + token + " as Double");

                        retVector[caseindex] = null; // missing value
                        // TODO: ?
                    }
                } else if (isLong) {
                    try {
                        retVector[caseindex] = new Long(token);
                    } catch (NumberFormatException ex) {
                        retVector[caseindex] = null; // assume missing value
                    }
                } else if (isFloat) {
                    try {
                        if ("inf".equalsIgnoreCase(token) || "+inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Float.POSITIVE_INFINITY;
                        } else if ("-inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Float.NEGATIVE_INFINITY;
                        } else if (token == null || token.equals("")) {
                            // missing value:
                            retVector[caseindex] = null;
                        } else {
                            retVector[caseindex] = new Float(token);
                        }
                    } catch (NumberFormatException ex) {
                        dbgLog.warning("NumberFormatException thrown for " + token + " as Float");
                        retVector[caseindex] = null; // assume missing value (TODO: ?)
                    }
                }
                caseindex++;

                if (bytecount == bytesRead - 1) {
                    byteoffset = 0;
                } else {
                    byteoffset = bytecount + 1;
                }
            } else {
                if (bytecount == bytesRead - 1) {
                    // We've reached the end of the buffer; 
                    // This means we'll save whatever unused bytes left in 
                    // it - i.e., the bytes between the last new line 
                    // encountered and the end - in the leftover buffer. 

                    // *EXCEPT*, there may be a case of a very long String
                    // that is actually longer than MAX_COLUMN_BUFFER, in 
                    // which case it is possible that we've read through
                    // an entire buffer of bytes without finding any 
                    // new lines... in this case we may need to add this
                    // entire byte buffer to an already existing leftover 
                    // buffer!
                    if (leftover == null) {
                        leftover = new byte[(int) bytesRead - byteoffset];
                        System.arraycopy(columnBytes, byteoffset, leftover, 0, (int) bytesRead - byteoffset);
                    } else {
                        if (byteoffset != 0) {
                            throw new IOException(
                                    "Reached the end of the byte buffer, with some leftover left from the last read; yet the offset is not zero!");
                        }
                        byte[] merged = new byte[leftover.length + (int) bytesRead];

                        System.arraycopy(leftover, 0, merged, 0, leftover.length);
                        System.arraycopy(columnBytes, byteoffset, merged, leftover.length, (int) bytesRead);
                        //leftover = null;
                        leftover = merged;
                        merged = null;
                    }
                    byteoffset = 0;

                }
            }
            bytecount++;
        }

        bytesReadTotal += bytesRead;
        in.clear();
        if (columnLength - bytesReadTotal < MAX_COLUMN_BUFFER) {
            in.limit((int) (columnLength - bytesReadTotal));
        }
    }

    fc.close();

    if (caseindex != casecount) {
        throw new IOException("Faile to read " + casecount + " tokens for column " + column);
        //System.out.println("read "+caseindex+" tokens instead of expected "+casecount+".");
    }

    return retVector;
}

From source file:org.apache.nifi.processors.standard.TailFile.java

private FileChannel createReader(final File file, final long position) {
    final FileChannel reader;

    try {/*from   ww  w  .  j  a v a 2  s.co m*/
        reader = FileChannel.open(file.toPath(), StandardOpenOption.READ);
    } catch (final IOException ioe) {
        getLogger().warn(
                "Unable to open file {}; will attempt to access file again after the configured Yield Duration has elapsed: {}",
                new Object[] { file, ioe });
        return null;
    }

    getLogger().debug("Created FileChannel {} for {}", new Object[] { reader, file });

    try {
        reader.position(position);
    } catch (final IOException ioe) {
        getLogger().error("Failed to read from {} due to {}", new Object[] { file, ioe });

        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader });
        } catch (final IOException ioe2) {
        }

        return null;
    }

    return reader;
}

From source file:com.clustercontrol.agent.log.LogfileMonitor.java

/**
 * ?//from  w  w  w .  j  a v a 2  s. c o m
 */
private boolean openFile() {
    m_log.info("openFile : filename=" + status.rsFilePath.getName());

    closeFile();

    FileChannel fc = null;

    // 
    try {
        if (checkPrefix())
            status.rotate();

        fc = FileChannel.open(Paths.get(getFilePath()), StandardOpenOption.READ);

        long filesize = fc.size();
        if (filesize > LogfileMonitorConfig.fileMaxSize) {
            // ????????
            // message.log.agent.1={0}?
            // message.log.agent.3=??????
            // message.log.agent.5={0} byte?
            String[] args1 = { getFilePath() };
            String[] args2 = { String.valueOf(filesize) };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE_SIZE_EXCEEDED_UPPER_BOUND.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args1) + ", "
                            + MessageConstant.MESSAGE_LOG_FILE_SIZE_BYTE.getMessage(args2));
        }

        // ??
        // ?open?init=true??
        // ??open?init=false???
        fc.position(status.position);

        fileChannel = fc;

        return true;
    } catch (FileNotFoundException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ????????
            // message.log.agent.1={0}?
            // message.log.agent.2=???????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE_NOT_FOUND.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args));
        }

        return false;
    } catch (SecurityException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ??
            // message.log.agent.1={0}?
            // message.log.agent.4=????????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_WARNING, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FAILED_TO_READ_FILE.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args) + "\n" + e.getMessage());
        }
        return false;
    } catch (IOException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ??
            // message.log.agent.1={0}?
            // message.log.agent.4=????????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FAILED_TO_READ_FILE.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args));
        }
        return false;
    } finally {
        // ??????????????
        if (fc != null && fileChannel == null) {
            try {
                fc.close();
            } catch (IOException e) {
                m_log.warn(e.getMessage(), e);
            }
        }
        m_initFlag = false;
    }
}

From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java

/**
 * @param packetStream// w ww  . j  ava  2s  .  c o  m
 * @param sharedStream
 * @throws IOException
 */
private void doBlockedStream(FileChannel from, FileChannel to) throws IOException {
    to.position(0);
    long size = from.size();
    for (long pos = 0; pos < size;) {
        long count = size - pos;
        if (count > MAX_BLOCK_SIZE) {
            count = MAX_BLOCK_SIZE;

        }

        to.position(pos);
        long cpos = to.position();
        log.debug("NIOTransfering |" + count + "| bytes from |" + pos + "| to |" + cpos + "|");
        long t = to.transferFrom(from, pos, count);
        pos = pos + t;
    }
    log.debug("  Final Size Source        " + from.size());
    log.debug("  Final Size Destination   " + to.size());
}

From source file:org.apache.nifi.processors.standard.TailFile.java

private void processTailFile(final ProcessContext context, final ProcessSession session,
        final String tailFile) {
    // If user changes the file that is being tailed, we need to consume the already-rolled-over data according
    // to the Initial Start Position property
    boolean rolloverOccurred;
    TailFileObject tfo = states.get(tailFile);

    if (tfo.isTailFileChanged()) {
        rolloverOccurred = false;//ww  w.j a v  a  2s. c  o  m
        final String recoverPosition = context.getProperty(START_POSITION).getValue();

        if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) {
            recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(),
                    tfo.getState().getTimestamp(), tfo.getState().getPosition());
        } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) {
            cleanup();
            tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer()));
        } else {
            final String filename = tailFile;
            final File file = new File(filename);

            try {
                final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
                getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file });

                final Checksum checksum = new CRC32();
                final long position = file.length();
                final long timestamp = file.lastModified();

                try (final InputStream fis = new FileInputStream(file);
                        final CheckedInputStream in = new CheckedInputStream(fis, checksum)) {
                    StreamUtils.copy(in, new NullOutputStream(), position);
                }

                fileChannel.position(position);
                cleanup();
                tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(),
                        checksum, tfo.getState().getBuffer()));
            } catch (final IOException ioe) {
                getLogger().error(
                        "Attempted to position Reader at current position in file {} but failed to do so due to {}",
                        new Object[] { file, ioe.toString() }, ioe);
                context.yield();
                return;
            }
        }

        tfo.setTailFileChanged(false);
    } else {
        // Recover any data that may have rolled over since the last time that this processor ran.
        // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value
        // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered"
        // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case,
        // use whatever checksum value is currently in the state.
        Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum();
        if (expectedChecksumValue == null) {
            expectedChecksumValue = tfo.getState().getChecksum() == null ? null
                    : tfo.getState().getChecksum().getValue();
        }

        rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue,
                tfo.getState().getTimestamp(), tfo.getState().getPosition());
        tfo.setExpectedRecoveryChecksum(null);
    }

    // initialize local variables from state object; this is done so that we can easily change the values throughout
    // the onTrigger method and then create a new state object after we finish processing the files.
    TailFileState state = tfo.getState();
    File file = state.getFile();
    FileChannel reader = state.getReader();
    Checksum checksum = state.getChecksum();
    if (checksum == null) {
        checksum = new CRC32();
    }
    long position = state.getPosition();
    long timestamp = state.getTimestamp();
    long length = state.getLength();

    // Create a reader if necessary.
    if (file == null || reader == null) {
        file = new File(tailFile);
        reader = createReader(file, position);
        if (reader == null) {
            context.yield();
            return;
        }
    }

    final long startNanos = System.nanoTime();

    // Check if file has rotated
    if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length())
            || (timestamp < file.lastModified() && length >= file.length())) {

        // Since file has rotated, we close the reader, create a new one, and then reset our state.
        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader });
        } catch (final IOException ioe) {
            getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe });
        }

        reader = createReader(file, 0L);
        position = 0L;
        checksum.reset();
    }

    if (file.length() == position || !file.exists()) {
        // no data to consume so rather than continually running, yield to allow other processors to use the thread.
        getLogger().debug("No data to consume; created no FlowFiles");
        tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
                state.getBuffer()));
        persistState(tfo, context);
        context.yield();
        return;
    }

    // If there is data to consume, read as much as we can.
    final TailFileState currentState = state;
    final Checksum chksum = checksum;
    // data has been written to file. Stream it to a new FlowFile.
    FlowFile flowFile = session.create();

    final FileChannel fileReader = reader;
    final AtomicLong positionHolder = new AtomicLong(position);
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream rawOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(rawOut)) {
                positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum));
            }
        }
    });

    // If there ended up being no data, just remove the FlowFile
    if (flowFile.getSize() == 0) {
        session.remove(flowFile);
        getLogger().debug("No data to consume; removed created FlowFile");
    } else {
        // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension>
        final String tailFilename = file.getName();
        final String baseName = StringUtils.substringBeforeLast(tailFilename, ".");
        final String flowFileName;
        if (baseName.length() < tailFilename.length()) {
            flowFileName = baseName + "." + position + "-" + positionHolder.get() + "."
                    + StringUtils.substringAfterLast(tailFilename, ".");
        } else {
            flowFileName = baseName + "." + position + "-" + positionHolder.get();
        }

        final Map<String, String> attributes = new HashMap<>(3);
        attributes.put(CoreAttributes.FILENAME.key(), flowFileName);
        attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
        attributes.put("tailfile.original.path", tailFile);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, file.toURI().toString(),
                "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
        session.transfer(flowFile, REL_SUCCESS);
        position = positionHolder.get();

        // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state.
        // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date
        // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the
        // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the
        // rotated file a second time.
        timestamp = Math.max(state.getTimestamp(), file.lastModified());
        length = file.length();
        getLogger().debug("Created {} and routed to success", new Object[] { flowFile });
    }

    // Create a new state object to represent our current position, timestamp, etc.
    tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
            state.getBuffer()));

    // We must commit session before persisting state in order to avoid data loss on restart
    session.commit();
    persistState(tfo, context);
}

From source file:gephi.spade.panel.fcsFile.java

/**
 * readFile ---//from w  w  w  .  j a v  a 2  s. co m
 * <p>
 * A helper function to read all the fields in the TEXT segment of the FCS
 * file.
 * </p>
 *
 * <p>
 * This helper function should only be called once by the constructor as it
 * is quite expensive.
 * </p>
 *
 * @param extractEventsP
 *            boolean flag indicating whether to extract events in the
 *            underlying file.
 * @throws <code>java.io.FileNotFoundException</code> if the file is not
 *         found.
 * @throws <code>java.io.IOException</code> if an IO exception occurred.
 */
private void readFile(boolean extractEventsP) throws FileNotFoundException, IOException {
    // Open a file input stream to the file
    FileInputStream fis = new FileInputStream(file);

    // Create a byte array to hold the version
    byte[] versionArray = new byte[VERSION_SIZE];

    // Read the version into the byte array
    int numRead = fis.read(versionArray);

    if (numRead < VERSION_SIZE) {
        // If the number of bytes read is less than the number of bytes in
        // the version string, then the file is too small to be an FCS file.
        isFCSP = false;

        // Close the file input stream
        fis.close();

        // Quit
        return;
    }

    // Decode the version using the default encoding
    version = new String(versionArray);

    // Determine whether the file is an FCS file by whether the version
    // string starts with the FCS_PREFIX
    isFCSP = version.startsWith(FCS_PREFIX);

    if (!isFCSP) {
        // If the file is not an FCS file, then close the file and quit.
        // Close the file input stream
        fis.close();

        // Quit
        return;
    }

    /**
     * At this point, we are pretty sure that the file is an FCS file. So,
     * we parse it.
     */
    /**
     * Get the standard HEADER stuff
     */
    // Skip 4 bytes to get to byte 10
    fis.skip(4);

    // Create a byte array to hold the HEADER
    byte[] headerArray = new byte[48];

    // Read the header into the byte array
    numRead = fis.read(headerArray);

    if (numRead < 48) {
        // If the number of bytes read is less than 48, then the file is too
        // small to be an FCS file.
        isFCSP = false;

        // Close the file input stream
        fis.close();

        // Quit
        return;
    }

    try {
        // Try to parse the TEXT segment start and end and DATA segment
        // start and end
        textStart = Integer.parseInt((new String(headerArray, 0, 8)).trim());
        textEnd = Integer.parseInt((new String(headerArray, 8, 8)).trim());
        dataStart = Integer.parseInt((new String(headerArray, 16, 8)).trim());
        dataEnd = Integer.parseInt((new String(headerArray, 24, 8)).trim());
    } catch (NumberFormatException nfe) {
        // If a NumberFormatException occured, then quit because there's
        // nothing we can do without the TEXT or DATA segment.
        // Close the file input stream
        fis.close();

        return;
    }

    /**
     * Get the ANALYSIS segment limits
     */
    try {
        // Try to parse the analysisStart and analysisEnd
        analysisStart = Integer.parseInt((new String(headerArray, 32, 8)).trim());
        analysisEnd = Integer.parseInt((new String(headerArray, 40, 8)).trim());
    } catch (NumberFormatException nfe) {
        // If a NumberFormatException occured, then set the ANALYSIS start
        // and end to 0 since this segment is optional.
        analysisStart = 0;
        analysisEnd = 0;
    }

    /**
     * Use NIO to read the OTHER and TEXT segments
     */
    // Get the channel for the input file
    FileChannel fc = fis.getChannel();

    // Move the channel's position back to 0
    fc.position(0);

    // Map the TEXT segment to memory
    MappedByteBuffer mbb = fc.map(FileChannel.MapMode.READ_ONLY, 0, textEnd + 1);

    /**
     * Create the character decoder for parsing characters
     */
    decoder = charset.newDecoder();

    /**
     * Get the OTHER segment
     */
    mbb.limit(textStart);
    mbb.position(58);
    CharBuffer other = decoder.decode(mbb.slice());

    /**
     * Get the TEXT segment
     */
    mbb.limit(textEnd + 1);
    mbb.position(textStart);
    text = decoder.decode(mbb.slice()).toString();

    /**
     * Close the file since we have the string version of the TEXT segment
     */
    // Close the file channel
    fc.close();

    // Close the file input stream
    fis.close();

    /**
     * Decode the TEXT segment
     */
    // The first character of the primary TEXT segment contains the
    // delimiter character
    delimiter = text.charAt(0);

    /**
     * Key/Value Pairs
     */
    // Generate all the pairs
    String[] pairs;

    if (delimiter == '\\') {
        // If the delimiter character is a backslash, then we have to escape
        // it in the regular expression.
        pairs = text.split("[\\\\]");
    } else {
        // Otherwise, we can just split it normally by using the character
        // in the regular expression.
        pairs = text.split("[" + Character.toString(delimiter) + "]");
    }

    /**
     * Calculate the number of pairs --- The number of pairs is the length
     * of the pairs array minus 1 divided by 2. The one is due to the empty
     * first element from the Java split above.
     */
    int numPairs = (pairs.length - 1) / 2;

    // Create a mapping for each key and its value
    settings = new Properties();

    // Loop through the TEXT segment we just split to get the keys and
    // values
    // The key is in (i * 2) + 1 to account for the empty first element.
    // The value is in (i * 2) + 2 to account for the empty first element.
    for (int i = 0; i < numPairs; i++) {
        settings.setProperty(pairs[(i * 2) + 1].trim(), pairs[(i * 2) + 2].trim());
    }

    // Go through all the key/value pairs and parse them
    parseSettings();

    /**
     * Extract Events
     */
    if (extractEventsP) {
        // If we are extracting data, then do so.
        extractEvents();
    }
}

From source file:com.clustercontrol.agent.job.PublicKeyThread.java

/**
 *  ?Authorized_key????<BR>/*  ww w  . j a va 2  s.c  om*/
 * 
 * @param publicKey
 * @return true : ?false:
 */
private synchronized boolean deleteKey(String publicKey) {
    m_log.debug("delete key start");

    if (SKIP_KEYFILE_UPDATE) {
        m_log.info("skipped deleting publicKey");
        return true;
    }

    Charset charset = Charset.forName("UTF-8");
    CharsetEncoder encoder = charset.newEncoder();
    CharsetDecoder decoder = charset.newDecoder();

    //???
    String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH);
    if (fileName == null || fileName.length() == 0)
        return false;

    //File?
    File fi = new File(fileName);

    RandomAccessFile randomAccessFile = null;
    FileChannel channel = null;
    FileLock lock = null;
    boolean delete = false;
    try {
        //RandomAccessFile?
        randomAccessFile = new RandomAccessFile(fi, "rw");
        //FileChannel?
        channel = randomAccessFile.getChannel();

        // 
        for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) {
            if (null != (lock = channel.tryLock())) {
                break;
            }
            m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT)
                    + " : " + fileName + "]");
            Thread.sleep(FILELOCK_WAIT);
        }
        if (null == lock) {
            m_log.warn("file locking timeout.");
            return false;
        }

        // (?)
        synchronized (authKeyLock) {
            //??
            ByteBuffer buffer = ByteBuffer.allocate((int) channel.size());

            //??
            channel.read(buffer);

            // ???????????0?
            buffer.flip();

            //??
            String contents = decoder.decode(buffer).toString();

            // ?
            m_log.debug("contents " + contents.length() + " : " + contents);

            //??
            List<String> keyCheck = new ArrayList<String>();
            StringTokenizer tokenizer = new StringTokenizer(contents, "\n");
            while (tokenizer.hasMoreTokens()) {
                keyCheck.add(tokenizer.nextToken());
            }

            //??????
            int s = keyCheck.lastIndexOf(publicKey);
            if (s != -1) {
                // ?
                m_log.debug("remobe key : " + keyCheck.get(s));
                keyCheck.remove(s);
            }

            //?????
            encoder.reset();
            buffer.clear();

            int i;
            if (keyCheck.size() > 0) {
                for (i = 0; i < keyCheck.size() - 1; i++) {
                    encoder.encode(CharBuffer.wrap(keyCheck.get(i) + "\n"), buffer, false);
                }
                encoder.encode(CharBuffer.wrap(keyCheck.get(i)), buffer, true);
            }

            //???
            buffer.flip();
            channel.truncate(0);
            channel.position(0);
            channel.write(buffer);
        }

        delete = true;
    } catch (IOException e) {
        m_log.error(e.getMessage(), e);
    } catch (RuntimeException e) {
        m_log.error(e.getMessage(), e);
    } catch (InterruptedException e) {
        m_log.error(e.getMessage(), e);
    } finally {
        try {
            if (channel != null) {
                channel.close();
            }
            if (randomAccessFile != null) {
                randomAccessFile.close();
            }
            //?
            if (lock != null) {
                lock.release();
            }
        } catch (Exception e) {
        }
    }

    return delete;
}