Example usage for java.io RandomAccessFile setLength

List of usage examples for java.io RandomAccessFile setLength

Introduction

In this page you can find the example usage for java.io RandomAccessFile setLength.

Prototype

public native void setLength(long newLength) throws IOException;

Source Link

Document

Sets the length of this file.

Usage

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Corrupt all of the blocks in the blocksBeingWritten dir
 * for the specified datanode number. The corruption is
 * specifically the last checksum chunk of the file being
 * modified by writing random data into it.
 *//* w w  w .  j  a v a  2s . c om*/
private void corruptDataNode(int dnNumber, CorruptionType type) throws Exception {
    // get the FS data of the specified datanode
    File data_dir = new File(System.getProperty("test.build.data"),
            "dfs/data/data" + Integer.toString(dnNumber * 2 + 1) + "/blocksBeingWritten");
    int corrupted = 0;
    for (File block : data_dir.listFiles()) {
        // only touch the actual data, not the metadata (with CRC)
        if (block.getName().startsWith("blk_") && !block.getName().endsWith("meta")) {
            if (type == CorruptionType.CORRUPT_LAST_CHUNK) {
                RandomAccessFile file = new RandomAccessFile(block, "rw");
                FileChannel channel = file.getChannel();
                Random r = new Random();
                long lastBlockSize = channel.size() % 512;
                long position = channel.size() - lastBlockSize;
                int length = r.nextInt((int) (channel.size() - position + 1));
                byte[] buffer = new byte[length];
                r.nextBytes(buffer);

                channel.write(ByteBuffer.wrap(buffer), position);
                System.out.println("Deliberately corrupting file " + block.getName() + " at offset " + position
                        + " length " + length);
                file.close();

            } else if (type == CorruptionType.TRUNCATE_BLOCK_TO_ZERO) {
                LOG.info("Truncating block file at " + block);
                RandomAccessFile blockFile = new RandomAccessFile(block, "rw");
                blockFile.setLength(0);
                blockFile.close();

                RandomAccessFile metaFile = new RandomAccessFile(FSDataset.findMetaFile(block), "rw");
                metaFile.setLength(0);
                metaFile.close();
            } else if (type == CorruptionType.TRUNCATE_BLOCK_HALF) {
                FSDatasetTestUtil.truncateBlockFile(block, block.length() / 2);
            } else {
                assert false;
            }
            ++corrupted;
        }
    }
    assertTrue("Should have some data in bbw to corrupt", corrupted > 0);
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.java

static private void truncateBlock(File blockFile, File metaFile, long oldlen, long newlen) throws IOException {
    LOG.info("truncateBlock: blockFile=" + blockFile + ", metaFile=" + metaFile + ", oldlen=" + oldlen
            + ", newlen=" + newlen);

    if (newlen == oldlen) {
        return;//from www  .  j a v  a2s.  c  o  m
    }
    if (newlen > oldlen) {
        throw new IOException(
                "Cannot truncate block to from oldlen (=" + oldlen + ") to newlen (=" + newlen + ")");
    }

    DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
    int checksumsize = dcs.getChecksumSize();
    int bpc = dcs.getBytesPerChecksum();
    long n = (newlen - 1) / bpc + 1;
    long newmetalen = BlockMetadataHeader.getHeaderSize() + n * checksumsize;
    long lastchunkoffset = (n - 1) * bpc;
    int lastchunksize = (int) (newlen - lastchunkoffset);
    byte[] b = new byte[Math.max(lastchunksize, checksumsize)];

    RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
    try {
        //truncate blockFile 
        blockRAF.setLength(newlen);

        //read last chunk
        blockRAF.seek(lastchunkoffset);
        blockRAF.readFully(b, 0, lastchunksize);
    } finally {
        blockRAF.close();
    }

    //compute checksum
    dcs.update(b, 0, lastchunksize);
    dcs.writeValue(b, 0, false);

    //update metaFile 
    RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
    try {
        metaRAF.setLength(newmetalen);
        metaRAF.seek(newmetalen - checksumsize);
        metaRAF.write(b, 0, checksumsize);
    } finally {
        metaRAF.close();
    }
}

From source file:jag.sftp.VirtualFileSystem.java

/**
*
*
* @param path//from   w w w .ja  v a 2  s  .  c  om
* @param flags
* @param attrs
*
* @return
*
* @throws PermissionDeniedException
* @throws FileNotFoundException
* @throws IOException
*/
public byte[] openFile(String path, UnsignedInteger32 flags, FileAttributes attrs)
        throws PermissionDeniedException, FileNotFoundException, IOException {
    System.out.println(path);
    path = VirtualFileSystem.translateVFSPath(path);
    System.out.println(path);
    File f = new File(path);
    verifyPermissions(SshThread.getCurrentThreadUser(), path, "r");

    // Check if the file does not exist and process according to flags
    if (!f.exists()) {
        if ((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE) {
            // The file does not exist and the create flag is present so lets create it
            if (!f.createNewFile()) {
                throw new IOException(translateNFSPath(path) + " could not be created");
            }
        } else {
            // The file does not exist and no create flag present
            throw new FileNotFoundException(translateNFSPath(path) + " does not exist");
        }
    } else {
        if (((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE)
                && ((flags.intValue()
                        & NativeFileSystemProvider.OPEN_EXCLUSIVE) == NativeFileSystemProvider.OPEN_EXCLUSIVE)) {
            // The file exists but the EXCL flag is set which requires that the
            // file should not exist prior to creation, so throw a status exception
            throw new IOException(translateNFSPath(path) + " already exists");
        }
    }

    // The file now exists so open the file according to the flags yb building the relevant
    // flags for the RandomAccessFile class
    String mode = "r"
            + (((flags.intValue() & NativeFileSystemProvider.OPEN_WRITE) == NativeFileSystemProvider.OPEN_WRITE)
                    ? "ws"
                    : "");
    RandomAccessFile raf = new RandomAccessFile(f, mode);

    // Determine whether we need to truncate the file
    if (((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE)
            && ((flags.intValue()
                    & NativeFileSystemProvider.OPEN_TRUNCATE) == NativeFileSystemProvider.OPEN_TRUNCATE)) {
        // Set the length to zero
        raf.setLength(0);
    }

    // Record the open file
    openFiles.put(raf.toString(), new OpenFile(f, raf, flags));

    // Return the handle
    return raf.toString().getBytes("US-ASCII");
}

From source file:com.sshtools.daemon.vfs.VirtualFileSystem.java

/**
 *
 *
 * @param path/*from   ww  w.j a v a 2s  . c om*/
 * @param flags
 * @param attrs
 *
 * @return
 *
 * @throws PermissionDeniedException
 * @throws FileNotFoundException
 * @throws IOException
 */
public byte[] openFile(String path, UnsignedInteger32 flags, FileAttributes attrs)
        throws PermissionDeniedException, FileNotFoundException, IOException {
    path = VirtualFileSystem.translateVFSPath(path);

    File f = new File(path);

    verifyPermissions(SshThread.getCurrentThreadUser(), path, "r");

    // Check if the file does not exist and process according to flags
    if (!f.exists()) {
        if ((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE) {
            // The file does not exist and the create flag is present so lets create it
            if (!f.createNewFile()) {
                throw new IOException(translateNFSPath(path) + " could not be created");
            }
        } else {
            // The file does not exist and no create flag present
            throw new FileNotFoundException(translateNFSPath(path) + " does not exist");
        }
    } else {
        if (((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE)
                && ((flags.intValue()
                        & NativeFileSystemProvider.OPEN_EXCLUSIVE) == NativeFileSystemProvider.OPEN_EXCLUSIVE)) {
            // The file exists but the EXCL flag is set which requires that the
            // file should not exist prior to creation, so throw a status exception
            throw new IOException(translateNFSPath(path) + " already exists");
        }
    }

    // The file now exists so open the file according to the flags yb building the relevant
    // flags for the RandomAccessFile class
    String mode = "r"
            + (((flags.intValue() & NativeFileSystemProvider.OPEN_WRITE) == NativeFileSystemProvider.OPEN_WRITE)
                    ? "ws"
                    : "");

    RandomAccessFile raf = new RandomAccessFile(f, mode);

    // Determine whether we need to truncate the file
    if (((flags.intValue() & NativeFileSystemProvider.OPEN_CREATE) == NativeFileSystemProvider.OPEN_CREATE)
            && ((flags.intValue()
                    & NativeFileSystemProvider.OPEN_TRUNCATE) == NativeFileSystemProvider.OPEN_TRUNCATE)) {
        // Set the length to zero
        raf.setLength(0);
    }

    // Record the open file
    openFiles.put(raf.toString(), new OpenFile(f, raf, flags));

    // Return the handle
    return raf.toString().getBytes("US-ASCII");
}

From source file:org.apache.hadoop.hive.llap.cache.BuddyAllocator.java

private ByteBuffer preallocateArenaBuffer(int arenaSize) {
    if (isMapped) {
        RandomAccessFile rwf = null;
        File rf = null;/* w  w w.j a  v a2  s .  com*/
        Preconditions.checkArgument(isDirect, "All memory mapped allocations have to be direct buffers");
        try {
            rf = File.createTempFile("arena-", ".cache", cacheDir.toFile());
            rwf = new RandomAccessFile(rf, "rw");
            rwf.setLength(arenaSize); // truncate (TODO: posix_fallocate?)
            // Use RW, not PRIVATE because the copy-on-write is irrelevant for a deleted file
            // see discussion in YARN-5551 for the memory accounting discussion
            ByteBuffer rwbuf = rwf.getChannel().map(MapMode.READ_WRITE, 0, arenaSize);
            return rwbuf;
        } catch (IOException ioe) {
            LlapIoImpl.LOG.warn("Failed trying to allocate memory mapped arena", ioe);
            // fail similarly when memory allocations fail
            throw new OutOfMemoryError("Failed trying to allocate memory mapped arena: " + ioe.getMessage());
        } finally {
            // A mapping, once established, is not dependent upon the file channel that was used to
            // create it. delete file and hold onto the map
            IOUtils.closeQuietly(rwf);
            if (rf != null) {
                rf.delete();
            }
        }
    }
    return isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestCheckpoint.java

/**
 * Test that a fault while downloading edits does not prevent future
 * checkpointing//from ww  w . j a  v a  2 s.c  o m
 */
@Test(timeout = 30000)
public void testEditFailureBeforeRename() throws IOException {
    Configuration conf = new HdfsConfiguration();
    SecondaryNameNode secondary = null;
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        secondary = startSecondaryNameNode(conf);
        DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
        secondary.doCheckpoint();

        // Cause edit rename to fail during next checkpoint
        Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector)
                .beforeEditsRename();
        DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);

        try {
            secondary.doCheckpoint();
            fail("Fault injection failed.");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Injecting failure before edit rename", ioe);
        }
        Mockito.reset(faultInjector);
        // truncate the tmp edits file to simulate a partial download
        for (StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
            assertTrue("Expected a single tmp edits file in directory " + sd.toString(), tmpEdits.length == 1);
            RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw");
            randFile.setLength(0);
            randFile.close();
        }
        // Next checkpoint should succeed
        secondary.doCheckpoint();
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
        if (fs != null) {
            fs.close();
        }
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
        Mockito.reset(faultInjector);
    }
}

From source file:org.apache.hadoop.hive.service.HSSessionItem.java

public boolean uploadProto(String user, String fileName, byte[] array) throws HiveServerException {
    boolean res = true;
    String fname;/*from   w  w w .  j av  a  2  s .com*/

    String dname = getHome() + "/protobuf/upload/" + user;
    File d = new File(dname);
    if (!d.exists()) {
        if (!d.mkdirs()) {
            l4j.error(getSessionName() + " try to mkdir " + dname + " failed.");
            throw new HiveServerException("Create user proto directory failed.");
        }
    }
    if (!fileName.trim().toLowerCase().endsWith(".proto")) {
        throw new HiveServerException(
                "Upload proto command can only handle .proto file, Check your file suffix");
    }

    fname = dname + "/" + fileName;

    RandomAccessFile raf;
    File f;
    try {
        f = new File(fname);
        if (!f.exists()) {
            if (!f.createNewFile()) {
                l4j.error("Try to create file " + fname + " failed.");
                throw new HiveServerException("Create user upload file " + fname + " failed.");
            }
        }
    } catch (java.io.IOException ex) {
        l4j.error(getSessionName() + " try to create file " + fname + " failed w/ " + ex);
        return false;
    }
    if (array.length == 0) {
        if (!f.delete()) {
            l4j.error("Try to delete file " + fname + " failed.");
            throw new HiveServerException("Delete user proto file " + fname + " failed.");
        } else {
            return true;
        }
    }
    try {
        raf = new RandomAccessFile(f, "rw");
    } catch (java.io.FileNotFoundException ex) {
        l4j.error(getSessionName() + " try to open file " + fname + " failed, not found.");
        return false;
    }
    try {
        raf.setLength(0);
        raf.seek(0);
        raf.write(array);
    } catch (java.io.IOException ex) {
        l4j.error(getSessionName() + " try to truncate/write file " + fname + "failed w/ " + ex);
        return false;
    }
    return res;
}

From source file:org.apache.hadoop.hive.service.HSSessionItem.java

public boolean upload(String rtype, String user, String fileName, String data) throws HiveServerException {
    boolean res = true;
    String fname;//from w w w.  java 2s.c  o  m
    if (rtype.equalsIgnoreCase("jar")) {
        fname = getHome() + "/auxlib/" + fileName;
    } else if (rtype.equalsIgnoreCase("proto")) {
        String dname = getHome() + "/protobuf/upload/" + user;
        File d = new File(dname);
        if (!d.exists()) {
            if (!d.mkdirs()) {
                l4j.error(getSessionName() + " try to mkdir " + dname + " failed.");
                throw new HiveServerException("Create user proto directory failed.");
            }
        }
        if (!fileName.trim().toLowerCase().endsWith(".proto")) {
            throw new HiveServerException(
                    "Upload proto command can only handle .proto file, Check your file suffix");
        }

        fname = dname + "/" + fileName;
    } else {
        String errorMsg = "Can't upload filetype: " + rtype;
        l4j.error(getSessionName() + " upload failed: " + errorMsg);
        throw new HiveServerException("errorMsg");
    }

    RandomAccessFile raf;
    File f;
    try {
        f = new File(fname);
        if (!f.exists()) {
            if (!f.createNewFile()) {
                l4j.error("Try to create file " + fname + " failed.");
                throw new HiveServerException("Create user upload file " + fname + " failed.");
            }
        }
    } catch (java.io.IOException ex) {
        l4j.error(getSessionName() + " try to create file " + fname + " failed w/ " + ex);
        return false;
    }
    if (data.equalsIgnoreCase("")) {
        if (!f.delete()) {
            l4j.error("Try to delete file " + fname + " failed.");
            throw new HiveServerException("Delete user file " + fname + " failed.");
        } else {
            return true;
        }
    }

    try {
        raf = new RandomAccessFile(f, "rw");
    } catch (java.io.FileNotFoundException ex) {
        l4j.error(getSessionName() + " try to open file " + fname + " failed, not found.");
        return false;
    }
    try {
        raf.setLength(0);
        raf.seek(0);
        raf.write(data.getBytes());
    } catch (java.io.IOException ex) {
        l4j.error(getSessionName() + " try to truncate/write file " + fname + "failed w/ " + ex);
        return false;
    }
    return res;
}