Example usage for java.io RandomAccessFile close

List of usage examples for java.io RandomAccessFile close

Introduction

In this page you can find the example usage for java.io RandomAccessFile close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this random access file stream and releases any system resources associated with the stream.

Usage

From source file:org.apache.hadoop.hdfs.TestFileAppend3.java

/**
 * TC7: Corrupted replicas are present./*from   w w  w.  j  av  a2 s .  co  m*/
 * @throws IOException an exception might be thrown
 */
@Test
public void testTC7() throws Exception {
    final short repl = 2;
    final Path p = new Path("/TC7/foo");
    System.out.println("p=" + p);

    //a. Create file with replication factor of 2. Write half block of data. Close file.
    final int len1 = (int) (BLOCK_SIZE / 2);
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, len1);
        out.close();
    }
    DFSTestUtil.waitReplication(fs, p, repl);

    //b. Log into one datanode that has one replica of this block.
    //   Find the block file on this datanode and truncate it to zero size.
    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
    assertEquals(1, locatedblocks.locatedBlockCount());
    final LocatedBlock lb = locatedblocks.get(0);
    final ExtendedBlock blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final File f = DataNodeTestUtils.getBlockFile(dn, blk.getBlockPoolId(), blk.getLocalBlock());
    final RandomAccessFile raf = new RandomAccessFile(f, "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();

    //c. Open file in "append mode".  Append a new block worth of data. Close file.
    final int len2 = (int) BLOCK_SIZE;
    {
        FSDataOutputStream out = fs.append(p);
        AppendTestUtil.write(out, len1, len2);
        out.close();
    }

    //d. Reopen file and read two blocks worth of data.
    AppendTestUtil.check(fs, p, len1 + len2);
}

From source file:com.jk.framework.util.FakeRunnable.java

/**
 * Write on file.//from   ww w .  j a  va2s  .c om
 *
 * @author Mohamde Kiswani
 * @param file
 *            the file
 * @param string
 *            the string
 * @param lineIndex
 *            the line index
 * @throws IOException
 *             Signals that an I/O exception has occurred.
 * @since 28-1-2010
 * @description : to write on file by using random access ssechanism
 */
public static void writeOnFile(final File file, final String string, final long lineIndex) throws IOException {
    final RandomAccessFile rand = new RandomAccessFile(file, "rw");
    rand.seek(lineIndex); // Seek to lineIndex of file
    rand.writeBytes(string); // Write end of file
    rand.close();
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFileJournalManager.java

/** 
 * Corrupt an edit log file after the start segment transaction
 *//*  w  w w.  jav a 2 s . com*/
private void corruptAfterStartSegment(File f) throws IOException {
    RandomAccessFile raf = new RandomAccessFile(f, "rw");
    raf.seek(0x20); // skip version and first tranaction and a bit of next transaction
    for (int i = 0; i < 1000; i++) {
        raf.writeInt(0xdeadbeef);
    }
    raf.close();
}

From source file:edu.tsinghua.lumaqq.qq.Util.java

/**
 * MD5???10002432/*from   www . j  ava 2 s . c  o  m*/
 * @param filename
 * @return
 */
public static byte[] getFileMD5(String filename) {
    try {
        RandomAccessFile file = new RandomAccessFile(filename, "r");
        byte[] md5 = getFileMD5(file);
        file.close();
        return md5;
    } catch (Exception e) {
        return null;
    }
}

From source file:com.netease.qa.emmagee.utils.CpuInfo.java

/**
 * get CPU name.//from  ww w  .  ja v a 2 s.  c  o m
 * 
 * @return CPU name
 */
public String getCpuName() {
    try {
        RandomAccessFile cpuStat = new RandomAccessFile(CPU_INFO_PATH, "r");
        // check cpu type
        String line;
        while (null != (line = cpuStat.readLine())) {
            String[] values = line.split(":");
            if (values[0].contains(INTEL_CPU_NAME) || values[0].contains("Processor")) {
                cpuStat.close();
                Log.d(LOG_TAG, "CPU name=" + values[1]);
                return values[1];
            }
        }
    } catch (IOException e) {
        Log.e(LOG_TAG, "IOException: " + e.getMessage());
    }
    return "";
}

From source file:org.apache.hadoop.dfs.TestFsck.java

public void testCorruptBlock() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000);
    FileSystem fs = null;/*ww  w  .  j a va 2s  . c  om*/
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;

    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, (short) 3);
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("HEALTHY"));

    // corrupt replicas 
    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
    for (int i = 0; i < 6; i++) {
        File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
        if (blockFile.exists()) {
            RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
            FileChannel channel = raFile.getChannel();
            String badString = "BADBAD";
            int rand = random.nextInt((int) channel.size() / 2);
            raFile.seek(rand);
            raFile.write(badString.getBytes());
            raFile.close();
        }
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        // Ignore exception
    }

    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != 3) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue(blocks.get(0).isCorrupt());

    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("CORRUPT"));
    assertTrue(outStr.contains("testCorruptBlock"));

    cluster.shutdown();
}

From source file:fr.gael.dhus.server.ftp.DHuSFtpProduct.java

@Override
public InputStream createInputStream(long offset) throws IOException {
    File file = new File(product.getDownloadablePath());
    logger.debug("Retrieving File stream from " + file.getPath());
    /*//from  w  ww .  ja v  a2s.c  o m
    return new FileInputStream(file);
    */
    // permission check
    if (!doesExist()) {
        throw new IOException("No read permission : " + file.getName());
    }

    // move to the appropriate offset and create input stream
    final RandomAccessFile raf = new RandomAccessFile(file, "r");
    try {
        raf.seek(offset);
        // The IBM jre needs to have both the stream and the random access file
        // objects closed to actually close the file
        return new RegulatedInputStream.Builder(new FileInputStream(raf.getFD()) {
            public void close() throws IOException {
                super.close();
                raf.close();
            }
        }, TrafficDirection.OUTBOUND).userName(user.getName())
                .copyStreamListener(new DownloadActionRecordListener(product.getUuid(), product.getIdentifier(),
                        vfsService.getDhusUserFromFtpUser(user)))
                .build();
    } catch (IOException e) {
        raf.close();
        throw e;
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testCheckpointVersionNotEqualToMeta() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    try {//from w w  w  .  ja v a  2  s  .  com
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
        writer.writeLong(2L);
        writer.getFD().sync();
        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testCheckpointBadVersion() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    try {/*  w w  w  .jav a  2 s.c  o  m*/
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
        writer.writeLong(94L);
        writer.getFD().sync();

        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:TestFuseDFS.java

/**
 * Test random access to a file/*w w  w .  j  ava2s  .  c o m*/
 */
@Test
public void testRandomAccess() throws IOException {
    final String contents = "hello world";
    File f = new File(mountPoint, "file1");

    createFile(f, contents);

    RandomAccessFile raf = new RandomAccessFile(f, "rw");
    raf.seek(f.length());
    try {
        raf.write('b');
    } catch (IOException e) {
        // Expected: fuse-dfs not yet support append
        assertEquals("Operation not supported", e.getMessage());
    } finally {
        raf.close();
    }

    raf = new RandomAccessFile(f, "rw");
    raf.seek(0);
    try {
        raf.write('b');
        fail("Over-wrote existing bytes");
    } catch (IOException e) {
        // Expected: can-not overwrite a file
        assertEquals("Invalid argument", e.getMessage());
    } finally {
        raf.close();
    }
    execAssertSucceeds("rm " + f.getAbsolutePath());
}