Example usage for java.io RandomAccessFile getChannel

List of usage examples for java.io RandomAccessFile getChannel

Introduction

In this page you can find the example usage for java.io RandomAccessFile getChannel.

Prototype

public final FileChannel getChannel() 

Source Link

Document

Returns the unique java.nio.channels.FileChannel FileChannel object associated with this file.

Usage

From source file:com.facebook.infrastructure.net.TcpConnection.java

public void stream(File file, long startPosition, long endPosition) throws IOException {
    if (!bStream_)
        throw new IllegalStateException("Cannot stream since we are not set up to stream data.");

    lock_.lock();//from w  ww  . ja  va 2  s . c  om
    try {
        /* transfer 64MB in each attempt */
        int limit = 64 * 1024 * 1024;
        long total = endPosition - startPosition;
        /* keeps track of total number of bytes transferred */
        long bytesWritten = 0L;
        RandomAccessFile raf = new RandomAccessFile(file, "r");
        FileChannel fc = raf.getChannel();

        /* 
         * If the connection is not yet established then wait for
         * the timeout period of 2 seconds. Attempt to reconnect 3 times and then 
         * bail with an IOException.
        */
        long waitTime = 2;
        int retry = 0;
        while (!connected_.get()) {
            if (retry == 3)
                throw new IOException("Unable to connect to " + remoteEp_ + " after " + retry + " attempts.");
            waitToContinueStreaming(waitTime, TimeUnit.SECONDS);
            ++retry;
        }

        while (bytesWritten < total) {
            if (startPosition == 0) {
                ByteBuffer buffer = MessagingService.constructStreamHeader(false, true);
                socketChannel_.write(buffer);
                handleIncompleteWrite(buffer);
            }

            /* returns the number of bytes transferred from file to the socket */
            long bytesTransferred = fc.transferTo(startPosition, limit, socketChannel_);
            logger_.trace("Bytes transferred " + bytesTransferred);
            bytesWritten += bytesTransferred;
            startPosition += bytesTransferred;
            /*
             * If the number of bytes transferred is less than intended 
             * then we need to wait till socket becomes writeable again. 
            */
            if (bytesTransferred < limit && bytesWritten != total) {
                if ((key_.interestOps() & SelectionKey.OP_WRITE) == 0) {
                    SelectorManager.getSelectorManager().modifyKeyForWrite(key_);
                }
                waitToContinueStreaming();
            }
        }
    } finally {
        lock_.unlock();
    }
}

From source file:com.emc.vipr.sync.source.FilesystemSource.java

protected void delete(File file) {
    // Try to lock the file first.  If this fails, the file is
    // probably open for write somewhere.
    // Note that on a mac, you can apparently delete files that
    // someone else has open for writing, and can lock files
    // too.//from   ww w .  j  av a 2  s  . c  o  m
    // Must make sure to throw exceptions when necessary to flag actual failures as opposed to skipped files.
    if (file.isDirectory()) {
        File metaDir = getMetaFile(file).getParentFile();
        if (metaDir.exists())
            metaDir.delete();
        // Just try and delete dir
        if (!file.delete()) {
            LogMF.warn(l4j, "Failed to delete directory {0}", file);
        }
    } else {
        boolean tryDelete = true;
        if (deleteOlderThan > 0) {
            if (System.currentTimeMillis() - file.lastModified() < deleteOlderThan) {
                LogMF.info(l4j, "not deleting {0}; it is not at least {1} ms old", file, deleteOlderThan);
                tryDelete = false;
            }
        }
        if (deleteCheckScript != null) {
            String[] args = new String[] { deleteCheckScript.getAbsolutePath(), file.getAbsolutePath() };
            try {
                l4j.debug("delete check: " + Arrays.asList(args));
                Process p = Runtime.getRuntime().exec(args);
                while (true) {
                    try {
                        int exitCode = p.exitValue();

                        if (exitCode == 0) {
                            LogMF.debug(l4j, "delete check OK, exit code {0}", exitCode);
                        } else {
                            LogMF.info(l4j, "delete check failed, exit code {0}.  Not deleting file.",
                                    exitCode);
                            tryDelete = false;
                        }
                        break;
                    } catch (IllegalThreadStateException e) {
                        // Ignore.
                    }
                }
            } catch (IOException e) {
                LogMF.info(l4j, "error executing delete check script: {0}.  Not deleting file.", e.toString());
                tryDelete = false;
            }
        }
        RandomAccessFile raf = null;
        if (tryDelete) {
            try {
                raf = new RandomAccessFile(file, "rw");
                FileChannel fc = raf.getChannel();
                FileLock flock = fc.lock();
                // If we got here, we should be good.
                flock.release();
                if (!file.delete()) {
                    throw new RuntimeException(MessageFormat.format("Failed to delete {0}", file));
                }
            } catch (IOException e) {
                throw new RuntimeException(MessageFormat
                        .format("File {0} not deleted, it appears to be open: {1}", file, e.getMessage()));
            } finally {
                if (raf != null) {
                    try {
                        raf.close();
                    } catch (IOException e) {
                        // Ignore.
                    }
                }
            }
        }
    }
}

From source file:com.thoughtworks.go.config.GoConfigDataSource.java

public synchronized GoConfigSaveResult writeWithLock(UpdateConfigCommand updatingCommand,
        GoConfigHolder configHolder) {/*w w  w. j  a  v a 2s.  c  om*/
    FileChannel channel = null;
    FileOutputStream outputStream = null;
    FileLock lock = null;
    try {
        RandomAccessFile randomAccessFile = new RandomAccessFile(fileLocation(), "rw");
        channel = randomAccessFile.getChannel();
        lock = channel.lock();

        // Need to convert to xml before we try to write it to the config file.
        // If our cruiseConfig fails XSD validation, we don't want to write it incorrectly.
        String configAsXml = getModifiedConfig(updatingCommand, configHolder);

        randomAccessFile.seek(0);
        randomAccessFile.setLength(0);
        outputStream = new FileOutputStream(randomAccessFile.getFD());
        LOGGER.info(String.format("[Configuration Changed] Saving updated configuration."));
        IOUtils.write(configAsXml, outputStream);
        ConfigSaveState configSaveState = shouldMergeConfig(updatingCommand, configHolder)
                ? ConfigSaveState.MERGED
                : ConfigSaveState.UPDATED;
        return new GoConfigSaveResult(internalLoad(configAsXml, getConfigUpdatingUser(updatingCommand)),
                configSaveState);
    } catch (ConfigFileHasChangedException e) {
        LOGGER.warn("Configuration file could not be merged successfully after a concurrent edit: "
                + e.getMessage(), e);
        throw e;
    } catch (GoConfigInvalidException e) {
        LOGGER.warn("Configuration file is invalid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } catch (Exception e) {
        LOGGER.error("Configuration file is not valid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } finally {
        if (channel != null && lock != null) {
            try {
                lock.release();
                channel.close();
                IOUtils.closeQuietly(outputStream);
            } catch (IOException e) {
                LOGGER.error("Error occured when releasing file lock and closing file.", e);
            }
        }
        LOGGER.debug("[Config Save] Done writing with lock");
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java

/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
@Test(timeout = 300000)//from  ww  w  .j  a va  2 s .  c  o m
public void testListCorruptFilesCorruptedBlock() throws Exception {
    MiniDFSCluster cluster = null;
    Random random = new Random();

    try {
        Configuration conf = new HdfsConfiguration();
        conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
        conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
        // Set short retry timeouts so this test runs faster
        conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
        cluster = new MiniDFSCluster.Builder(conf).build();
        FileSystem fs = cluster.getFileSystem();

        // create two files with one block each
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2)
                .setMaxLevels(1).setMaxSize(512).build();
        util.createFiles(fs, "/srcdat10");

        // fetch bad file list from namenode. There should be none.
        final NameNode namenode = cluster.getNameNode();
        Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.getNamesystem()
                .listCorruptFileBlocks("/", null);
        assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0);

        // Now deliberately corrupt one block
        String bpid = cluster.getNamesystem().getBlockPoolId();
        File storageDir = cluster.getInstanceStorageDir(0, 1);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
        assertTrue("Data directory does not contain any blocks or there was an " + "IO error",
                metaFiles != null && !metaFiles.isEmpty());
        File metaFile = metaFiles.get(0);
        RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
        FileChannel channel = file.getChannel();
        long position = channel.size() - 2;
        int length = 2;
        byte[] buffer = new byte[length];
        random.nextBytes(buffer);
        channel.write(ByteBuffer.wrap(buffer), position);
        file.close();
        LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length "
                + length);

        // read all files to trigger detection of corrupted replica
        try {
            util.checkFiles(fs, "/srcdat10");
        } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
        } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException "
                    + " but received IOException " + e, false);
        }

        // fetch bad file list from namenode. There should be one file.
        badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1);
        util.cleanup(fs, "/srcdat10");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFsck.java

public void testCorruptBlock() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000);
    FileSystem fs = null;/*  ww w.  j  ava  2  s  .c  o m*/
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;

    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster(conf, 3, true, null);
        cluster.waitActive();
        fs = cluster.getFileSystem();
        Path file1 = new Path("/testCorruptBlock");
        DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 0);
        // Wait until file replication has completed
        DFSTestUtil.waitReplication(fs, file1, (short) 3);
        String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

        // Make sure filesystem is in healthy state
        outStr = runFsck(conf, 0, true, "/");
        System.out.println(outStr);
        assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

        // corrupt replicas 
        File baseDir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/data");
        for (int i = 0; i < 6; i++) {
            File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
            if (blockFile.exists()) {
                RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
                FileChannel channel = raFile.getChannel();
                String badString = "BADBAD";
                int rand = random.nextInt((int) channel.size() / 2);
                raFile.seek(rand);
                raFile.write(badString.getBytes());
                raFile.close();
            }
        }
        // Read the file to trigger reportBadBlocks
        try {
            IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
        } catch (IOException ie) {
            // Ignore exception
        }

        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
        blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
        while (replicaCount != 3) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ignore) {
            }
            blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
            replicaCount = blocks.get(0).getLocations().length;
        }
        assertTrue(blocks.get(0).isCorrupt());

        // Check if fsck reports the same
        outStr = runFsck(conf, 1, true, "/");
        System.out.println(outStr);
        assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
        assertTrue(outStr.contains("testCorruptBlock"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:interfazGrafica.frmMoverRFC.java

public void mostrarPDF() {
    String curp = "";
    curp = txtCapturaCurp.getText();/*from   w w w  .j  a va 2  s.  com*/
    ArrayList<DocumentoRFC> Docs = new ArrayList<>();
    DocumentoRFC sigExp;
    DocumentoRFC temporal;
    RFCescaneado tempo = new RFCescaneado();

    //tempo.borrartemporal();
    sigExp = expe.obtenerArchivosExp();
    Nombre_Archivo = sigExp.getNombre();
    nombreArchivo.setText(Nombre_Archivo);

    if (Nombre_Archivo != "") {
        doc = sigExp;
        System.out.println("Obtuvo el nombre del archivo.");
        System.out.println(doc.ruta + doc.nombre);
        String file = "C:\\escaneos\\Local\\Temporal\\" + doc.nombre;
        File arch = new File(file);
        System.out.println("Encontr el siguiente archivo:");
        System.out.println(file);
        System.out.println("");
        if (arch.exists()) {
            System.out.println("El archivo existe");
        }
        try {
            System.out.println("Entr al try");
            RandomAccessFile raf = new RandomAccessFile(file, "r");
            System.out.println("Reconoc el archivo" + file);
            FileChannel channel = raf.getChannel();
            System.out.println("Se abrio el canal");
            ByteBuffer buf = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size());
            System.out.println("Channel map");
            PDFFile pdffile = new PDFFile(buf);
            System.out.println("Creando un pdf file");
            PDFPage page = pdffile.getPage(0);
            System.out.println("Obteniendo la pagina con " + 0);

            panelpdf2.showPage(page);
            System.out.println("mostrando el panel pdf2");
            repaint();
            System.gc();

            buf.clear();
            raf.close();

            System.gc();

        } catch (Exception ioe) {
            JOptionPane.showMessageDialog(null, "Error al abrir el archivo");
            ioe.printStackTrace();
        }

    }
    // tempo.borrartemporal();
}

From source file:com.remobile.file.LocalFilesystem.java

@Override
public long truncateFileAtURL(LocalFilesystemURL inputURL, long size) throws IOException {
    File file = new File(filesystemPathForURL(inputURL));

    if (!file.exists()) {
        throw new FileNotFoundException("File at " + inputURL.uri + " does not exist.");
    }// w  w  w .  j a v a  2 s.  co  m

    RandomAccessFile raf = new RandomAccessFile(filesystemPathForURL(inputURL), "rw");
    try {
        if (raf.length() >= size) {
            FileChannel channel = raf.getChannel();
            channel.truncate(size);
            return size;
        }

        return raf.length();
    } finally {
        raf.close();
    }

}

From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java

/**
 * Check that listCorruptFileBlocks works while the namenode is still in safemode.
 *//*from  w w w  .j  a v a  2 s.c om*/
@Test(timeout = 300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception {
    MiniDFSCluster cluster = null;
    Random random = new Random();

    try {
        Configuration conf = new HdfsConfiguration();
        // datanode scans directories
        conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
        // datanode sends block reports
        conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
        // never leave safemode automatically
        conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1.5f);
        // start populating repl queues immediately 
        conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 0f);
        // Set short retry timeouts so this test runs faster
        conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
        cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
        cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
        FileSystem fs = cluster.getFileSystem();

        // create two files with one block each
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode")
                .setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
        util.createFiles(fs, "/srcdat10");

        // fetch bad file list from namenode. There should be none.
        Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.getNameNode().getNamesystem()
                .listCorruptFileBlocks("/", null);
        assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0);

        // Now deliberately corrupt one block
        File storageDir = cluster.getInstanceStorageDir(0, 0);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId());
        assertTrue("data directory does not exist", data_dir.exists());
        List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
        assertTrue("Data directory does not contain any blocks or there was an " + "IO error",
                metaFiles != null && !metaFiles.isEmpty());
        File metaFile = metaFiles.get(0);
        RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
        FileChannel channel = file.getChannel();
        long position = channel.size() - 2;
        int length = 2;
        byte[] buffer = new byte[length];
        random.nextBytes(buffer);
        channel.write(ByteBuffer.wrap(buffer), position);
        file.close();
        LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length "
                + length);

        // read all files to trigger detection of corrupted replica
        try {
            util.checkFiles(fs, "/srcdat10");
        } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
        } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                    + " but received IOException " + e, false);
        }

        // fetch bad file list from namenode. There should be one file.
        badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1);

        // restart namenode
        cluster.restartNameNode(0);
        fs = cluster.getFileSystem();

        // wait until replication queues have been initialized
        while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
            try {
                LOG.info("waiting for replication queues");
                Thread.sleep(1000);
            } catch (InterruptedException ignore) {
            }
        }

        // read all files to trigger detection of corrupted replica
        try {
            util.checkFiles(fs, "/srcdat10");
        } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
        } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                    + " but received IOException " + e, false);
        }

        // fetch bad file list from namenode. There should be one file.
        badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1);

        // check that we are still in safe mode
        assertTrue("Namenode is not in safe mode", cluster.getNameNode().isInSafeMode());

        // now leave safe mode so that we can clean up
        cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);

        util.cleanup(fs, "/srcdat10");
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw e;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:com.koda.integ.hbase.storage.LRUStorageRecycler.java

/**
 * Format of a block in a file:/* w w  w . ja v  a  2s  .co m*/
 * 0..3  - total record size (-4)
 * 4..7  - size of a key in bytes (16 if use hash128)
 * 8 .. x - key data
 * x+1 ..x+1- IN_MEMORY flag ( 1- in memory, 0 - not)
 * x+2 ... block, serialized and compressed
 *
 * @param file the file
 * @throws IOException Signals that an I/O exception has occurred.
 * @throws NativeMemoryException the native memory exception
 */
private void processFile(RandomAccessFile file) throws IOException, NativeMemoryException {

    FileChannel fc = file.getChannel();
    // make sure that file size < 2G
    LOG.info("File length=" + file.length());
    MappedByteBuffer buffer = fc.map(MapMode.READ_ONLY, 0, file.length());

    long fileLength = file.length();
    long saved = 0;
    long startTime = System.currentTimeMillis();

    while (buffer.position() < fileLength) {
        int oldOffset = buffer.position();
        //LOG.info(oldOffset);
        // check IO throttle
        ioThrottle(startTime, oldOffset);

        NumericHistogram histogram = refCache.getObjectHistogram();

        int blockSize = buffer.getInt();
        int keySize = buffer.getInt();

        //LOG.info("block size="+blockSize+" key size="+keySize);

        byte[] key = new byte[keySize];
        // STATISTICS
        totalScannedBytes.addAndGet(blockSize + 4);

        // read key
        // WE HAVE TO USE byte[] keys
        long data = refCache.getEvictionData(key);
        if (data < 0) {
            // not found in in_memory cache
            buffer.position(oldOffset + blockSize + 4);
            continue;
        }

        double quantValue = histogram.quantile(evictionThreshold);
        if (data > quantValue) {
            // save block
            saved = blockSize + 4;
            buffer.position(oldOffset);
            StorageHandle handle = storage.storeData(buffer);
            refCache.put(key, handle);

        } else {
            // STATISTICS
            totalPurgedBytes.addAndGet(blockSize + 4);
        }

        if (oldOffset + blockSize + 4 < fileLength) {
            // Advance pointer
            buffer.position(oldOffset + blockSize + 4);

        } else {
            break;
        }
        // Check panic. W/o adaptive processing support - killing file entirely 
        // is the only option to keep up with the load.
        if (storage.getCurrentStorageSize() >= panicLevelWatermark * storage.getMaxStorageSize()) {
            LOG.warn("[PANIC DELETE]. Storage size exceeded " + panicLevelWatermark + " mark.");
            // STATISTICS
            totalPanicEvents.incrementAndGet();
        }
    }

    // Unmap mapped ByteBuffer
    fc.close();
    FileUtils.unmapMmaped(buffer);
    ;
    LOG.info("Stats: total length=" + fileLength + "; purged data=" + (fileLength - saved)
            + " with eviction threshold=" + evictionThreshold + "; purged ratio=["
            + (((double) (fileLength - saved)) / fileLength) + "]");

}

From source file:org.apache.tajo.worker.dataserver.HttpDataServerHandler.java

private ChannelFuture sendFile(ChannelHandlerContext ctx, Channel ch, FileChunk file) throws IOException {
    RandomAccessFile raf;
    try {// w  ww.j  a va 2 s  . c o  m
        raf = new RandomAccessFile(file.getFile(), "r");
    } catch (FileNotFoundException fnfe) {
        return null;
    }

    ChannelFuture writeFuture;
    if (ch.getPipeline().get(SslHandler.class) != null) {
        // Cannot use zero-copy with HTTPS.
        writeFuture = ch.write(new ChunkedFile(raf, file.startOffset(), file.length(), 8192));
    } else {
        // No encryption - use zero-copy.
        final FileRegion region = new DefaultFileRegion(raf.getChannel(), file.startOffset(), file.length());
        writeFuture = ch.write(region);
        writeFuture.addListener(new ChannelFutureListener() {
            public void operationComplete(ChannelFuture future) {
                region.releaseExternalResources();
            }
        });
    }

    return writeFuture;
}