Example usage for java.nio.channels FileChannel position

List of usage examples for java.nio.channels FileChannel position

Introduction

In this page you can find the example usage for java.nio.channels FileChannel position.

Prototype

public abstract long position() throws IOException;

Source Link

Document

Returns this channel's file position.

Usage

From source file:org.apache.tajo.tuple.offheap.OffHeapRowBlock.java

public boolean copyFromChannel(FileChannel channel, TableStats stats) throws IOException {
    if (channel.position() < channel.size()) {
        clear();/*from  ww  w . java 2  s .c om*/

        buffer.clear();
        channel.read(buffer);
        memorySize = buffer.position();

        while (position < memorySize) {
            long recordPtr = address + position;

            if (remain() < SizeOf.SIZE_OF_INT) {
                channel.position(channel.position() - remain());
                memorySize = (int) (memorySize - remain());
                return true;
            }

            int recordSize = UNSAFE.getInt(recordPtr);

            if (remain() < recordSize) {
                channel.position(channel.position() - remain());
                memorySize = (int) (memorySize - remain());
                return true;
            }

            position += recordSize;
            rowNum++;
        }

        return true;
    } else {
        return false;
    }
}

From source file:com.bittorrent.mpetazzoni.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(Torrent.PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / Torrent.PIECE_LENGTH)) });

        length += file.length();/*from  w w  w. ja  v a2  s.com*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / Torrent.PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.turn.ttorrent.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();/*  w  w  w .  j a v a2 s.c o  m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:ga.rugal.jpt.common.tracker.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        LOG.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(), threads,
                (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();/* ww w .  j  av a2s  . c  o m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    LOG.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    LOG.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.p2p.peercds.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / PIECE_LENGTH)) });

        length += file.length();/*  w w w .j av a 2s .c  om*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.googlecode.onevre.utils.ServerClassLoader.java

/**
 * Creates a new ServerClassLoader/*  w w  w  . j av  a 2 s.c o m*/
 * @param parent The parent class loader
 * @param localCacheDirectory The directory to cache files to
 * @param remoteServer The URL of the remote server
 */
public ServerClassLoader(ClassLoader parent, File localCacheDirectory, URL remoteServer) {
    super(parent);
    this.localCacheDirectory = localCacheDirectory;
    this.localLibDirectory = new File(localCacheDirectory, LIB_DIR);
    File versionFile = new File(localCacheDirectory, "Version");
    boolean versionCorrect = false;
    if (!localCacheDirectory.exists()) {
        localCacheDirectory.mkdirs();
    } else {
        if (versionFile.exists()) {
            try {
                BufferedReader reader = new BufferedReader(new FileReader(versionFile));
                String version = reader.readLine();
                reader.close();
                versionCorrect = Defaults.PAG_VERSION.equals(version);
                log.info(version + " == " + Defaults.PAG_VERSION + " = " + versionCorrect);
            } catch (IOException e) {
                // Do Nothing
            }
        }
        try {
            FileInputStream input = new FileInputStream(new File(localCacheDirectory, INDEX));
            DataInputStream cacheFile = new DataInputStream(input);
            FileChannel channel = input.getChannel();
            while (channel.position() < channel.size()) {
                URL url = new URL(cacheFile.readUTF());
                String file = cacheFile.readUTF();
                if (versionCorrect && url.getHost().equals(remoteServer.getHost())
                        && (url.getPort() == remoteServer.getPort())) {
                    File jar = new File(localCacheDirectory, file);
                    if (jar.exists()) {
                        indexJar(url, jar);
                        CHECKED.put(url, true);
                    }
                }
            }
            input.close();
        } catch (FileNotFoundException e) {
            // Do Nothing - cache will be recreated later

        } catch (IOException e) {
            // Do Nothing - as above

        }
    }
    localLibDirectory.mkdirs();
    try {
        PrintWriter writer = new PrintWriter(versionFile);
        writer.println(Defaults.PAG_VERSION);
        writer.close();
    } catch (IOException e) {
        e.printStackTrace();
    }

    this.remoteServer = remoteServer;
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * //w ww  . j  ava  2  s.  c  o  m
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)
                    / bytesPerChecksum;

            // allocate smaller buffer while using transferTo(). 
            pktSize += checksumSize * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            out.writeInt(0); // mark the end of block        
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }
    } finally {
        if (clientTraceFmt != null) {
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead));
        }
        close();
    }

    blockReadFully = (initialOffset == 0 && offset >= blockLength);

    return totalRead;
}

From source file:org.apache.hadoop.hdfs.server.datanode.RaidBlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * //from  w w  w  .j  a v a 2s.co  m
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @return total bytes reads, including crc.
 */
public long sendBlock(DataOutputStream out, OutputStream baseStream) throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = PacketHeader.PKT_HEADER_LEN;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)
                    / bytesPerChecksum;

            // allocate smaller buffer while using transferTo(). 
            pktSize += checksumSize * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            // send an empty packet to mark the end of the block
            sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    blockReadFully = initialOffset == 0 && offset >= replicaVisibleLength;

    return totalRead;
}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * /*w  w  w.j a v a2  s. c  o m*/
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)
                    / bytesPerChecksum;

            // packet buffer has to be able to do a normal transfer in the case
            // of recomputing checksum
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            out.writeInt(0); // mark the end of block        
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }
    } catch (RuntimeException e) {
        LOG.error("unexpected exception sending block", e);

        throw new IOException("unexpected runtime exception", e);
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    blockReadFully = (initialOffset == 0 && offset >= blockLength);

    return totalRead;
}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * //from   w  w w.j  a v a  2s  . c o m
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = PacketHeader.PKT_HEADER_LEN;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO)
                    + bytesPerChecksum - 1) / bytesPerChecksum;

            // allocate smaller buffer while using transferTo(). 
            pktSize += checksumSize * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1,
                    (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            // send an empty packet to mark the end of the block
            sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        sentEntireByteRange = true;
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    return totalRead;
}