Example usage for java.nio.channels FileChannel write

List of usage examples for java.nio.channels FileChannel write

Introduction

In this page you can find the example usage for java.nio.channels FileChannel write.

Prototype

public abstract int write(ByteBuffer src, long position) throws IOException;

Source Link

Document

Writes a sequence of bytes to this channel from the given buffer, starting at the given file position.

Usage

From source file:ome.io.nio.RomioPixelBuffer.java

/**
 * Implemented as specified by {@link PixelBuffer} I/F.
 * @see PixelBuffer#setRegion(Integer, Long, ByteBuffer)
*//*from   w  w w.j a  v  a  2  s  .  c o m*/
public void setRegion(Integer size, Long offset, ByteBuffer buffer) throws IOException {
    throwIfReadOnly();
    FileChannel fileChannel = getFileChannel();

    /*
     * fileChannel should not be "null" as it will throw an exception if
     * there happens to be an error.
     */
    fileChannel.write(buffer, offset);
}

From source file:no.sesat.search.http.filters.SiteJspLoaderFilter.java

private void downloadJsp(final HttpServletRequest request, final String jsp) throws MalformedURLException {

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();//  w  w w  .ja va  2  s. c  om

    byte[] golden = new byte[0];

    // search skins for the jsp and write it out to "golden"
    for (Site site = (Site) request.getAttribute(Site.NAME_KEY); 0 == golden.length; site = site.getParent()) {

        if (null == site) {
            if (null == config.getServletContext().getResource(jsp)) {
                throw new ResourceLoadException("Unable to find " + jsp + " in any skin");
            }
            break;
        }

        final Site finalSite = site;
        final BytecodeLoader bcLoader = UrlResourceLoader.newBytecodeLoader(finalSite.getSiteContext(), jsp,
                null);
        bcLoader.abut();
        golden = bcLoader.getBytecode();
    }

    // if golden now contains data save it to a local (ie local web application) file
    if (0 < golden.length) {
        try {
            final File file = new File(root + jsp);

            // create the directory structure
            file.getParentFile().mkdirs();

            // check existing file
            boolean needsUpdating = true;
            final boolean fileExisted = file.exists();
            if (!fileExisted) {
                file.createNewFile();
            }

            // channel.lock() only synchronises file access between programs, but not between threads inside
            //  the current JVM. The latter results in the OverlappingFileLockException.
            //  At least this is my current understanding of java.nio.channels
            //   It may be that no synchronisation or locking is required at all. A beer to whom answers :-)
            // So we must provide synchronisation between our own threads,
            //  synchronisation against the file's path (using the JVM's String.intern() functionality)
            //  should work. (I can't imagine this string be used for any other synchronisation purposes).
            synchronized (file.toString().intern()) {

                RandomAccessFile fileAccess = null;
                FileChannel channel = null;

                try {

                    fileAccess = new RandomAccessFile(file, "rws");
                    channel = fileAccess.getChannel();

                    channel.lock();

                    if (fileExisted) {

                        final byte[] bytes = new byte[(int) channel.size()];
                        final ByteBuffer byteBuffer = ByteBuffer.wrap(bytes);
                        int reads;
                        do {
                            reads = channel.read(byteBuffer);
                        } while (0 < reads);

                        needsUpdating = !Arrays.equals(golden, bytes);
                    }

                    if (needsUpdating) {
                        // download file from skin
                        channel.write(ByteBuffer.wrap(golden), 0);
                        file.deleteOnExit();
                    }
                } finally {
                    if (null != channel) {
                        channel.close();
                    }
                    if (null != fileAccess) {
                        fileAccess.close();
                    }

                    LOG.debug("resource created as " + config.getServletContext().getResource(jsp));
                }
            }

        } catch (IOException ex) {
            LOG.error(ex.getMessage(), ex);
        }
    }

    stopWatch.stop();
    LOG.trace("SiteJspLoaderFilter.downloadJsp(..) took " + stopWatch);
}

From source file:org.alfresco.repo.content.AbstractWritableContentStoreTest.java

/**
 * Tests random access writing/*from w  w  w.  j  a  v a2s . co m*/
 * <p>
 * Only executes if the writer implements {@link RandomAccessContent}.
 */
@Test
public void testRandomAccessWrite() throws Exception {
    ContentWriter writer = getWriter();

    FileChannel fileChannel = writer.getFileChannel(true);
    assertNotNull("No channel given", fileChannel);

    // check that no other content access is allowed
    try {
        writer.getWritableChannel();
        fail("Second channel access allowed");
    } catch (RuntimeException e) {
        // expected
    }

    // write some content in a random fashion (reverse order)
    byte[] content = new byte[] { 1, 2, 3 };
    for (int i = content.length - 1; i >= 0; i--) {
        ByteBuffer buffer = ByteBuffer.wrap(content, i, 1);
        fileChannel.write(buffer, i);
    }

    // close the channel
    fileChannel.close();
    assertTrue("Writer not closed", writer.isClosed());

    // check the content
    ContentReader reader = writer.getReader();
    ReadableByteChannel channelReader = reader.getReadableChannel();
    ByteBuffer buffer = ByteBuffer.allocateDirect(3);
    int count = channelReader.read(buffer);
    assertEquals("Incorrect number of bytes read", 3, count);
    for (int i = 0; i < content.length; i++) {
        assertEquals("Content doesn't match", content[i], buffer.get(i));
    }

    // get a new writer from the store, using the existing content and perform a truncation check
    ContentContext writerTruncateCtx = new ContentContext(writer.getReader(), null);
    ContentWriter writerTruncate = getStore().getWriter(writerTruncateCtx);
    assertEquals("Content size incorrect", 0, writerTruncate.getSize());
    // get the channel with truncation
    FileChannel fcTruncate = writerTruncate.getFileChannel(true);
    fcTruncate.close();
    assertEquals("Content not truncated", 0, writerTruncate.getSize());

    // get a new writer from the store, using the existing content and perform a non-truncation check
    ContentContext writerNoTruncateCtx = new ContentContext(writer.getReader(), null);
    ContentWriter writerNoTruncate = getStore().getWriter(writerNoTruncateCtx);
    assertEquals("Content size incorrect", 0, writerNoTruncate.getSize());
    // get the channel without truncation
    FileChannel fcNoTruncate = writerNoTruncate.getFileChannel(false);
    fcNoTruncate.close();
    assertEquals("Content was truncated", writer.getSize(), writerNoTruncate.getSize());
}

From source file:org.apache.hadoop.hdfs.TestCrcCorruption.java

/** 
  * check if DFS can handle corrupted CRC blocks
  *///w w  w .j a  va 2s .  co m
private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    short replFactor = 2;
    Random random = new Random();

    try {
        cluster = new MiniDFSCluster.Builder(conf).numNameNodes(1).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        util.createFiles(fs, "/srcdat", replFactor);
        util.waitReplication(fs, "/srcdat", (short) 2);

        // Now deliberately remove/truncate meta blocks from the first
        // directory of the first datanode. The complete absense of a meta
        // file disallows this Datanode to send data to another datanode.
        // However, a client is alowed access to this block.
        //
        File storageDir = MiniDFSCluster.getStorageDir(0, 1);
        String bpid = cluster.getNamesystem().getBlockPoolId();
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        File[] blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
        int num = 0;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                num++;
                if (num % 3 == 0) {
                    //
                    // remove .meta file
                    //
                    LOG.info("Deliberately removing file " + blocks[idx].getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                } else if (num % 3 == 1) {
                    //
                    // shorten .meta file
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    int newsize = random.nextInt((int) channel.size() / 2);
                    LOG.info("Deliberately truncating file " + blocks[idx].getName() + " to size " + newsize
                            + " bytes.");
                    channel.truncate(newsize);
                    file.close();
                } else {
                    //
                    // corrupt a few bytes of the metafile
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    long position = 0;
                    //
                    // The very first time, corrupt the meta header at offset 0
                    //
                    if (num != 2) {
                        position = (long) random.nextInt((int) channel.size());
                    }
                    int length = random.nextInt((int) (channel.size() - position + 1));
                    byte[] buffer = new byte[length];
                    random.nextBytes(buffer);
                    channel.write(ByteBuffer.wrap(buffer), position);
                    LOG.info("Deliberately corrupting file " + blocks[idx].getName() + " at offset " + position
                            + " length " + length);
                    file.close();
                }
            }
        }

        //
        // Now deliberately corrupt all meta blocks from the second
        // directory of the first datanode
        //
        storageDir = MiniDFSCluster.getStorageDir(0, 1);
        data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));

        int count = 0;
        File previous = null;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                //
                // Move the previous metafile into the current one.
                //
                count++;
                if (count % 2 == 0) {
                    LOG.info("Deliberately insertimg bad crc into files " + blocks[idx].getName() + " "
                            + previous.getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                    assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
                    assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
                    previous = null;
                } else {
                    previous = blocks[idx];
                }
            }
        }

        //
        // Only one replica is possibly corrupted. The other replica should still
        // be good. Verify.
        //
        assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
        LOG.info("All File still have a valid replica");

        //
        // set replication factor back to 1. This causes only one replica of
        // of each block to remain in HDFS. The check is to make sure that 
        // the corrupted replica generated above is the one that gets deleted.
        // This test is currently disabled until HADOOP-1557 is solved.
        //
        util.setReplication(fs, "/srcdat", (short) 1);
        //util.waitReplication(fs, "/srcdat", (short)1);
        //LOG.info("All Files done with removing replicas");
        //assertTrue("Excess replicas deleted. Corrupted replicas found.",
        //           util.checkFiles(fs, "/srcdat"));
        LOG.info("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557");

        util.cleanup(fs, "/srcdat");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:com.edgenius.wiki.service.impl.SitemapServiceImpl.java

private void appendSitemapIndex(String sitemap) throws IOException {
    File sitemapIndexFile = new File(mapResourcesRoot.getFile(), SITEMAP_INDEX_NAME);
    if (!sitemapIndexFile.exists()) {
        //if a new sitemap file
        List<String> lines = new ArrayList<String>();
        lines.add("<?xml version=\"1.0\" encoding=\"utf-8\"?>");
        lines.add("<sitemapindex xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">");
        lines.add("</sitemapindex>");
        FileUtils.writeLines(sitemapIndexFile, lines);
    }//from ww w  .j a v a2s  . c o  m

    RandomAccessFile rfile = new RandomAccessFile(sitemapIndexFile, "rw");
    FileChannel channel = rfile.getChannel();

    //this new content will append to end of file before XML end tag
    StringBuilder lines = new StringBuilder();
    lines.append("   <sitemap>\n");
    lines.append("     <loc>" + WebUtil.getHostAppURL() + SITEMAP_URL_CONTEXT + sitemap + "</loc>\n");
    lines.append("     <lastmod>" + TIME_FORMAT.format(new Date()) + " </lastmod>\n");
    lines.append("   </sitemap>\n");
    //the last tag will be overwrite, so append it again to new content. 
    lines.append(SITEMAP_INDEX_TAIL_FLAG);
    byte[] content = lines.toString().getBytes();

    ByteBuffer byteBuf = ByteBuffer.allocate(512);
    // seek first
    int len = 0, headIdx = 0;
    long tailIdx = channel.size() - 512;
    tailIdx = tailIdx < 0 ? 0 : tailIdx;

    long headPos = -1;
    StringBuilder header = new StringBuilder();
    while ((len = channel.read(byteBuf, tailIdx)) > 0) {
        byteBuf.rewind();
        byte[] dst = new byte[len];
        byteBuf.get(dst, 0, len);
        header.append(new String(dst, "UTF8"));
        headIdx = header.indexOf(SITEMAP_INDEX_TAIL_FLAG);
        if (headIdx != -1) {
            headPos = channel.size() - header.substring(headIdx).getBytes().length;
            break;
        }
    }
    FileLock lock = channel.tryLock(headPos, content.length, false);
    try {
        channel.write(ByteBuffer.wrap(content), headPos);
    } finally {
        lock.release();
    }

    channel.force(false);
    rfile.close();

}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Corrupt all of the blocks in the blocksBeingWritten dir
 * for the specified datanode number. The corruption is
 * specifically the last checksum chunk of the file being
 * modified by writing random data into it.
 *//*from w w  w .  j a  va 2s.c  o  m*/
private void corruptDataNode(int dnNumber, CorruptionType type) throws Exception {
    // get the FS data of the specified datanode
    File data_dir = new File(System.getProperty("test.build.data"),
            "dfs/data/data" + Integer.toString(dnNumber * 2 + 1) + "/blocksBeingWritten");
    int corrupted = 0;
    for (File block : data_dir.listFiles()) {
        // only touch the actual data, not the metadata (with CRC)
        if (block.getName().startsWith("blk_") && !block.getName().endsWith("meta")) {
            if (type == CorruptionType.CORRUPT_LAST_CHUNK) {
                RandomAccessFile file = new RandomAccessFile(block, "rw");
                FileChannel channel = file.getChannel();
                Random r = new Random();
                long lastBlockSize = channel.size() % 512;
                long position = channel.size() - lastBlockSize;
                int length = r.nextInt((int) (channel.size() - position + 1));
                byte[] buffer = new byte[length];
                r.nextBytes(buffer);

                channel.write(ByteBuffer.wrap(buffer), position);
                System.out.println("Deliberately corrupting file " + block.getName() + " at offset " + position
                        + " length " + length);
                file.close();

            } else if (type == CorruptionType.TRUNCATE_BLOCK_TO_ZERO) {
                LOG.info("Truncating block file at " + block);
                RandomAccessFile blockFile = new RandomAccessFile(block, "rw");
                blockFile.setLength(0);
                blockFile.close();

                RandomAccessFile metaFile = new RandomAccessFile(FSDataset.findMetaFile(block), "rw");
                metaFile.setLength(0);
                metaFile.close();
            } else if (type == CorruptionType.TRUNCATE_BLOCK_HALF) {
                FSDatasetTestUtil.truncateBlockFile(block, block.length() / 2);
            } else {
                assert false;
            }
            ++corrupted;
        }
    }
    assertTrue("Should have some data in bbw to corrupt", corrupted > 0);
}

From source file:com.twinsoft.convertigo.beans.steps.WriteXMLStep.java

protected void writeFile(String filePath, NodeList nodeList) throws EngineException {
    if (nodeList == null) {
        throw new EngineException("Unable to write to xml file: element is Null");
    }/*from w  ww.ja  va  2  s  .c  o m*/

    String fullPathName = getAbsoluteFilePath(filePath);
    synchronized (Engine.theApp.filePropertyManager.getMutex(fullPathName)) {
        try {
            String encoding = getEncoding();
            encoding = encoding.length() > 0 && Charset.isSupported(encoding) ? encoding : "UTF-8";
            if (!isReallyAppend(fullPathName)) {
                String tTag = defaultRootTagname.length() > 0 ? StringUtils.normalize(defaultRootTagname)
                        : "document";
                FileUtils.write(new File(fullPathName),
                        "<?xml version=\"1.0\" encoding=\"" + encoding + "\"?>\n<" + tTag + "/>", encoding);
            }

            StringBuffer content = new StringBuffer();

            /* do the content, only append child element */
            for (int i = 0; i < nodeList.getLength(); i++) {
                if (nodeList.item(i).getNodeType() == Node.ELEMENT_NODE) {
                    content.append(XMLUtils.prettyPrintElement((Element) nodeList.item(i), true, true));
                }
            }

            /* detect current xml encoding */
            RandomAccessFile randomAccessFile = null;
            try {
                randomAccessFile = new RandomAccessFile(fullPathName, "rw");
                FileChannel fc = randomAccessFile.getChannel();
                ByteBuffer buf = ByteBuffer.allocate(60);
                int nb = fc.read(buf);
                String sbuf = new String(buf.array(), 0, nb, "ASCII");
                String enc = sbuf.replaceFirst("^.*encoding=\"", "").replaceFirst("\"[\\d\\D]*$", "");

                if (!Charset.isSupported(enc)) {
                    enc = encoding;
                }

                buf.clear();

                /* retrieve last header tag*/
                long pos = fc.size() - buf.capacity();
                if (pos < 0) {
                    pos = 0;
                }

                nb = fc.read(buf, pos);

                boolean isUTF8 = Charset.forName(enc) == Charset.forName("UTF-8");

                if (isUTF8) {
                    for (int i = 0; i < buf.capacity(); i++) {
                        sbuf = new String(buf.array(), i, nb - i, enc);
                        if (!sbuf.startsWith("")) {
                            pos += i;
                            break;
                        }
                    }
                } else {
                    sbuf = new String(buf.array(), 0, nb, enc);
                }

                int lastTagIndex = sbuf.lastIndexOf("</");
                if (lastTagIndex == -1) {
                    int iend = sbuf.lastIndexOf("/>");
                    if (iend != -1) {
                        lastTagIndex = sbuf.lastIndexOf("<", iend);
                        String tagname = sbuf.substring(lastTagIndex + 1, iend);
                        content = new StringBuffer(
                                "<" + tagname + ">\n" + content.toString() + "</" + tagname + ">");
                    } else {
                        throw new EngineException("Malformed XML file");
                    }
                } else {
                    content.append(sbuf.substring(lastTagIndex));

                    if (isUTF8) {
                        String before = sbuf.substring(0, lastTagIndex);
                        lastTagIndex = before.getBytes(enc).length;
                    }
                }
                fc.write(ByteBuffer.wrap(content.toString().getBytes(enc)), pos + lastTagIndex);
            } finally {
                if (randomAccessFile != null) {
                    randomAccessFile.close();
                }
            }
        } catch (IOException e) {
            throw new EngineException("Unable to write to xml file", e);
        } finally {
            Engine.theApp.filePropertyManager.releaseMutex(fullPathName);
        }
    }
}