Example usage for java.nio.channels FileChannel read

List of usage examples for java.nio.channels FileChannel read

Introduction

In this page you can find the example usage for java.nio.channels FileChannel read.

Prototype

public final long read(ByteBuffer[] dsts) throws IOException 

Source Link

Document

Reads a sequence of bytes from this channel into the given buffers.

Usage

From source file:no.sesat.search.http.filters.SiteJspLoaderFilter.java

private void downloadJsp(final HttpServletRequest request, final String jsp) throws MalformedURLException {

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();//w  w  w  .  ja v a 2s .  com

    byte[] golden = new byte[0];

    // search skins for the jsp and write it out to "golden"
    for (Site site = (Site) request.getAttribute(Site.NAME_KEY); 0 == golden.length; site = site.getParent()) {

        if (null == site) {
            if (null == config.getServletContext().getResource(jsp)) {
                throw new ResourceLoadException("Unable to find " + jsp + " in any skin");
            }
            break;
        }

        final Site finalSite = site;
        final BytecodeLoader bcLoader = UrlResourceLoader.newBytecodeLoader(finalSite.getSiteContext(), jsp,
                null);
        bcLoader.abut();
        golden = bcLoader.getBytecode();
    }

    // if golden now contains data save it to a local (ie local web application) file
    if (0 < golden.length) {
        try {
            final File file = new File(root + jsp);

            // create the directory structure
            file.getParentFile().mkdirs();

            // check existing file
            boolean needsUpdating = true;
            final boolean fileExisted = file.exists();
            if (!fileExisted) {
                file.createNewFile();
            }

            // channel.lock() only synchronises file access between programs, but not between threads inside
            //  the current JVM. The latter results in the OverlappingFileLockException.
            //  At least this is my current understanding of java.nio.channels
            //   It may be that no synchronisation or locking is required at all. A beer to whom answers :-)
            // So we must provide synchronisation between our own threads,
            //  synchronisation against the file's path (using the JVM's String.intern() functionality)
            //  should work. (I can't imagine this string be used for any other synchronisation purposes).
            synchronized (file.toString().intern()) {

                RandomAccessFile fileAccess = null;
                FileChannel channel = null;

                try {

                    fileAccess = new RandomAccessFile(file, "rws");
                    channel = fileAccess.getChannel();

                    channel.lock();

                    if (fileExisted) {

                        final byte[] bytes = new byte[(int) channel.size()];
                        final ByteBuffer byteBuffer = ByteBuffer.wrap(bytes);
                        int reads;
                        do {
                            reads = channel.read(byteBuffer);
                        } while (0 < reads);

                        needsUpdating = !Arrays.equals(golden, bytes);
                    }

                    if (needsUpdating) {
                        // download file from skin
                        channel.write(ByteBuffer.wrap(golden), 0);
                        file.deleteOnExit();
                    }
                } finally {
                    if (null != channel) {
                        channel.close();
                    }
                    if (null != fileAccess) {
                        fileAccess.close();
                    }

                    LOG.debug("resource created as " + config.getServletContext().getResource(jsp));
                }
            }

        } catch (IOException ex) {
            LOG.error(ex.getMessage(), ex);
        }
    }

    stopWatch.stop();
    LOG.trace("SiteJspLoaderFilter.downloadJsp(..) took " + stopWatch);
}

From source file:org.neo4j.io.pagecache.impl.SingleFilePageSwapper.java

private long lockPositionReadVector(long filePageId, FileChannel channel, long fileOffset, ByteBuffer[] srcs)
        throws IOException {
    try {//from w  ww . j  av a  2s. c  o  m
        long toRead = filePageSize * (long) srcs.length;
        long read, readTotal = 0;
        synchronized (positionLock(channel)) {
            channel.position(fileOffset);
            do {
                read = channel.read(srcs);
            } while (read != -1 && (readTotal += read) < toRead);
            return readTotal;
        }
    } catch (ClosedChannelException e) {
        // AsynchronousCloseException is a subclass of
        // ClosedChannelException, and ClosedByInterruptException is in
        // turn a subclass of AsynchronousCloseException.
        tryReopen(filePageId, e);
        boolean interrupted = Thread.interrupted();
        // Recurse because this is hopefully a very rare occurrence.
        channel = unwrappedChannel(filePageId);
        long bytesWritten = lockPositionReadVector(filePageId, channel, fileOffset, srcs);
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
        return bytesWritten;
    }
}

From source file:eu.stratosphere.nephele.taskmanager.runtime.EnvelopeConsumptionLog.java

private void loadNextOutstandingEnvelopes() {

    final int pos = this.outstandingEnvelopesAsIntBuffer.position();

    if (pos > 0) {

        final int rem = this.outstandingEnvelopesAsIntBuffer.remaining();

        for (int i = 0; i < rem; ++i) {
            this.outstandingEnvelopesAsIntBuffer.put(i, this.outstandingEnvelopesAsIntBuffer.get(i + pos));
        }//from   w  w  w  .  ja v a2s . co m

        this.outstandingEnvelopesAsIntBuffer.position(0);
        this.outstandingEnvelopesAsIntBuffer.limit(rem);
    }

    if (this.numberOfEntriesReadFromLog == this.numberOfInitialLogEntries) {
        return;
    }

    FileChannel fc = null;

    try {

        this.outstandingEnvelopesAsByteBuffer
                .position(this.outstandingEnvelopesAsIntBuffer.limit() * SIZE_OF_INTEGER);
        this.outstandingEnvelopesAsByteBuffer.limit(this.outstandingEnvelopesAsByteBuffer.capacity());

        fc = new FileInputStream(this.logFile).getChannel();
        fc.position(this.numberOfEntriesReadFromLog * SIZE_OF_INTEGER);

        int totalBytesRead = 0;

        while (this.outstandingEnvelopesAsByteBuffer.hasRemaining()) {

            final int bytesRead = fc.read(this.outstandingEnvelopesAsByteBuffer);
            if (bytesRead == -1) {
                break;
            }

            totalBytesRead += bytesRead;
        }

        if (totalBytesRead % SIZE_OF_INTEGER != 0) {
            LOG.error("Read " + totalBytesRead + " from " + this.logFile.getAbsolutePath()
                    + ", file may be corrupt");
        }

        final int numberOfNewEntries = totalBytesRead / SIZE_OF_INTEGER;

        this.outstandingEnvelopesAsIntBuffer
                .limit(this.outstandingEnvelopesAsIntBuffer.limit() + numberOfNewEntries);

        this.numberOfEntriesReadFromLog += numberOfNewEntries;

        fc.close();

    } catch (IOException ioe) {
        LOG.error(StringUtils.stringifyException(ioe));
    } finally {

        if (fc != null) {
            try {
                fc.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:dk.statsbiblioteket.util.LineReaderTest.java

public void testNIO() throws Exception {
    byte[] INITIAL = new byte[] { 1, 2, 3, 4 };
    byte[] EXTRA = new byte[] { 5, 6, 7, 8 };
    byte[] FULL = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
    byte[] FIFTH = new byte[] { 87 };
    byte[] FULL_WITH_FIFTH = new byte[] { 1, 2, 3, 4, 87, 6, 7, 8 };

    // Create temp-file with content
    File temp = createTempFile();
    FileOutputStream fileOut = new FileOutputStream(temp, true);
    fileOut.write(INITIAL);//from   w  w w .j ava2  s  .c o  m
    fileOut.close();

    checkContent("The plain test-file should be correct", temp, INITIAL);
    {
        // Read the 4 bytes
        RandomAccessFile input = new RandomAccessFile(temp, "r");
        FileChannel channelIn = input.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(4096);
        channelIn.position(0);
        assertEquals("Buffer read should read full length", INITIAL.length, channelIn.read(buffer));
        buffer.position(0);

        checkContent("Using buffer should produce the right bytes", INITIAL, buffer);
        channelIn.close();
        input.close();
    }
    {
        // Fill new buffer
        ByteBuffer outBuffer = ByteBuffer.allocate(4096);
        outBuffer.put(EXTRA);
        outBuffer.flip();
        assertEquals("The limit of the outBuffer should be correct", EXTRA.length, outBuffer.limit());

        // Append new buffer to end
        RandomAccessFile output = new RandomAccessFile(temp, "rw");
        FileChannel channelOut = output.getChannel();
        channelOut.position(INITIAL.length);
        assertEquals("All bytes should be written", EXTRA.length, channelOut.write(outBuffer));
        channelOut.close();
        output.close();
        checkContent("The resulting file should have the full output", temp, FULL);
    }

    {
        // Fill single byte buffer
        ByteBuffer outBuffer2 = ByteBuffer.allocate(4096);
        outBuffer2.put(FIFTH);
        outBuffer2.flip();
        assertEquals("The limit of the second outBuffer should be correct", FIFTH.length, outBuffer2.limit());

        // Insert byte in the middle
        RandomAccessFile output2 = new RandomAccessFile(temp, "rw");
        FileChannel channelOut2 = output2.getChannel();
        channelOut2.position(4);
        assertEquals("The FIFTH should be written", FIFTH.length, channelOut2.write(outBuffer2));
        channelOut2.close();
        output2.close();
        checkContent("The resulting file with fifth should be complete", temp, FULL_WITH_FIFTH);
    }
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException {
    FileInputStream tableStream = null;
    FileChannel fileChannel = null;
    try {//from   w ww .  j  av  a  2 s. com
        File latestCommittedFile = getLatestCommitedFile();
        if (latestCommittedFile != null) {
            tableStream = new FileInputStream(latestCommittedFile);
            fileChannel = tableStream.getChannel();
            ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE);
            fileChannel.position(0L);
            int read = fileChannel.read(buffer);
            if (read < HEADERSIZE) {
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Wrong bucket table header size: " + read + "/" + HEADERSIZE);
            }
            // Check content of header. Start with Big Endian (default for Java)
            buffer.rewind();
            byteOrder = ByteOrder.BIG_ENDIAN;
            buffer.order(byteOrder);
            int magic = buffer.getInt();
            if (magic == MAGICSTART_BADENDIAN) {
                byteOrder = ByteOrder.LITTLE_ENDIAN;
                buffer.order(byteOrder);
            } else if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Read number of buckets
            long headerMapSize = buffer.getLong();
            // Read checkPoint
            NeedlePointer includedCheckpoint = new NeedlePointer();
            includedCheckpoint.getNeedlePointerFromBuffer(buffer);
            // Read second magic number
            magic = buffer.getInt();
            if (magic != MAGICEND) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Check number of buckets against requested map size
            if (headerMapSize != mapSize) {
                // Map size does not match
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Requested map size " + mapSize + " does not match header map size " + headerMapSize);
            }
            // Sets initial checkpoint
            bucketTable.setInitialCheckPoint(includedCheckpoint);
            // Now reads all entries
            logger.info("Hot start: loading buckets...");
            for (int i = 0; i < nbBuffers; i++) {
                bucketTable.prepareBufferForReading(i);
                read = fileChannel.read(bucketTable.getBuffer(i));
                if (read < bucketTable.getBuffer(i).limit())
                    throw new BucketTableManagerException("Incomplete bucket table file "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                //else
                //   logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            }
            // Checks second magic marker
            buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
            buffer.rewind();
            buffer.limit(INTSIZE);
            if (fileChannel.read(buffer) < INTSIZE)
                throw new BucketTableManagerException(
                        "Incomplete bucket table file, missing secong magic number "
                                + latestCommittedFile.getName());
            buffer.rewind();
            magic = buffer.getInt();
            if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Now reads clean counters
            while (true) {
                buffer.rewind();
                buffer.limit(NeedleLogInfo.INFOSIZE);
                read = fileChannel.read(buffer);
                if (read > 0 && read < NeedleLogInfo.INFOSIZE)
                    throw new BucketTableManagerException("Incomplete bucket table file, log info too short "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                if (read <= 0)
                    break;
                else {
                    NeedleLogInfo nli = new NeedleLogInfo(useAverage);
                    buffer.rewind();
                    nli.getNeedleLogInfo(buffer);
                    logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli);
                }
            }
            logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets");

        } else {
            // Empty file
            bucketTable.setInitialCheckPoint(new NeedlePointer());
            bucketTable.format();
        }
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed initializing bucket table", ie);
    } catch (BufferUnderflowException bue) {
        throw new BucketTableManagerException("Bucket table too short", bue);
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ex) {
                throw new BucketTableManagerException("Error while closing file channel", ex);
            }
        }
    }
}

From source file:org.alfresco.repo.content.AbstractWritableContentStoreTest.java

/**
 * Tests random access reading//  w  w  w  .ja va  2  s .co  m
 * <p>
 * Only executes if the reader implements {@link RandomAccessContent}.
 */
@Test
public void testRandomAccessRead() throws Exception {
    ContentWriter writer = getWriter();
    // put some content
    String content = "ABC";
    byte[] bytes = content.getBytes();
    writer.putContent(content);
    ContentReader reader = writer.getReader();

    FileChannel fileChannel = reader.getFileChannel();
    assertNotNull("No channel given", fileChannel);

    // check that no other content access is allowed
    try {
        reader.getReadableChannel();
        fail("Second channel access allowed");
    } catch (RuntimeException e) {
        // expected
    }

    // read the content
    ByteBuffer buffer = ByteBuffer.allocate(bytes.length);
    int count = fileChannel.read(buffer);
    assertEquals("Incorrect number of bytes read", bytes.length, count);
    // transfer back to array
    buffer.rewind();
    buffer.get(bytes);
    String checkContent = new String(bytes);
    assertEquals("Content read failure", content, checkContent);
    fileChannel.close();
}

From source file:com.liferay.portal.util.FileImpl.java

public boolean isSameContent(File file, byte[] bytes, int length) {
    FileChannel fileChannel = null;

    try {//w  w  w.j av  a2s . c o  m
        FileInputStream fileInputStream = new FileInputStream(file);

        fileChannel = fileInputStream.getChannel();

        if (fileChannel.size() != length) {
            return false;
        }

        byte[] buffer = new byte[1024];

        ByteBuffer byteBuffer = ByteBuffer.wrap(buffer);

        int bufferIndex = 0;
        int bufferLength = -1;

        while (((bufferLength = fileChannel.read(byteBuffer)) > 0) && (bufferIndex < length)) {

            for (int i = 0; i < bufferLength; i++) {
                if (buffer[i] != bytes[bufferIndex++]) {
                    return false;
                }
            }

            byteBuffer.clear();
        }

        if ((bufferIndex != length) || (bufferLength != -1)) {
            return false;
        } else {
            return true;
        }
    } catch (Exception e) {
        return false;
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:com.colorchen.qbase.utils.FileUtil.java

/**
 * ?/*from  www.  j a v a  2 s.c o  m*/
 *
 * @param outFile
 * @param files
 */
public static void mergeFiles(Context context, File outFile, List<File> files) {
    FileChannel outChannel = null;
    try {
        outChannel = new FileOutputStream(outFile).getChannel();
        for (File f : files) {
            FileChannel fc = new FileInputStream(f).getChannel();
            ByteBuffer bb = ByteBuffer.allocate(BUFSIZE);
            while (fc.read(bb) != -1) {
                bb.flip();
                outChannel.write(bb);
                bb.clear();
            }
            fc.close();
        }
        Log.d(TAG, "?");
    } catch (IOException ioe) {
        ioe.printStackTrace();
    } finally {
        try {
            if (outChannel != null) {
                outChannel.close();
            }
        } catch (IOException ignore) {
        }
    }
}

From source file:org.carbondata.query.aggregator.impl.CustomAggregatorHelper.java

/**
 * Below method will be used to read the level files
 *
 * @param memberFile/*from w w  w  .  j  a v a2 s  . c  o m*/
 * @param fileName
 * @throws IOException
 */
private void readLevelFileAndUpdateCache(File memberFile, String fileName) throws IOException {
    FileInputStream fos = null;
    FileChannel fileChannel = null;
    try {
        // create an object of FileOutputStream
        fos = new FileInputStream(memberFile);

        fileChannel = fos.getChannel();
        Map<Integer, String> memberMap = surrogateKeyMap.get(fileName);

        if (null == memberMap) {
            memberMap = new HashMap<Integer, String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
            surrogateKeyMap.put(fileName, memberMap);
        }

        long size = fileChannel.size();
        int maxKey = 0;
        ByteBuffer rowlengthToRead = null;
        int len = 0;
        ByteBuffer row = null;
        int toread = 0;
        byte[] bb = null;
        String value = null;
        int surrogateValue = 0;

        boolean enableEncoding = Boolean.valueOf(
                CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING,
                        CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT));

        while (fileChannel.position() < size) {
            rowlengthToRead = ByteBuffer.allocate(4);
            fileChannel.read(rowlengthToRead);
            rowlengthToRead.rewind();
            len = rowlengthToRead.getInt();
            if (len == 0) {
                continue;
            }

            row = ByteBuffer.allocate(len);
            fileChannel.read(row);
            row.rewind();
            toread = row.getInt();
            bb = new byte[toread];
            row.get(bb);

            if (enableEncoding) {
                value = new String(Base64.decodeBase64(bb), Charset.defaultCharset());
            } else {
                value = new String(bb, Charset.defaultCharset());
            }

            surrogateValue = row.getInt();
            memberMap.put(surrogateValue, value);

            // check if max key is less than Surrogate key then update the max key
            if (maxKey < surrogateValue) {
                maxKey = surrogateValue;
            }
        }

    } finally {
        CarbonUtil.closeStreams(fileChannel, fos);
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager.java

private ByteBuffer readFileToByteBuffer(File source) throws IOException {
    ByteBuffer buffer = ByteBuffer.allocate(128);
    FileChannel fileChannel = new FileInputStream(source).getChannel();
    fileChannel.read(buffer);
    fileChannel.close();//from  www  . j  a v a  2 s  .  c  om
    buffer.flip();
    return buffer;
}