Example usage for java.io RandomAccessFile readInt

List of usage examples for java.io RandomAccessFile readInt

Introduction

In this page you can find the example usage for java.io RandomAccessFile readInt.

Prototype

public final int readInt() throws IOException 

Source Link

Document

Reads a signed 32-bit integer from this file.

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.FSImageUtil.java

public static FileSummary loadSummary(RandomAccessFile file) throws IOException {
    final int FILE_LENGTH_FIELD_SIZE = 4;
    long fileLength = file.length();
    file.seek(fileLength - FILE_LENGTH_FIELD_SIZE);
    int summaryLength = file.readInt();

    if (summaryLength <= 0) {
        throw new IOException("Negative length of the file");
    }/*from   ww  w  .  j  a  v  a 2 s .  com*/
    file.seek(fileLength - FILE_LENGTH_FIELD_SIZE - summaryLength);

    byte[] summaryBytes = new byte[summaryLength];
    file.readFully(summaryBytes);

    FileSummary summary = FileSummary.parseDelimitedFrom(new ByteArrayInputStream(summaryBytes));
    if (summary.getOndiskVersion() != FILE_VERSION) {
        throw new IOException("Unsupported file version " + summary.getOndiskVersion());
    }

    if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT, summary.getLayoutVersion())) {
        throw new IOException("Unsupported layout version " + summary.getLayoutVersion());
    }
    return summary;
}

From source file:com.example.android.vault.EncryptedDocument.java

private static void assertMagic(RandomAccessFile f) throws IOException {
    final int magic = f.readInt();
    if (magic != MAGIC_NUMBER) {
        throw new ProtocolException("Bad magic number: " + Integer.toHexString(magic));
    }/*from w w  w . j  ava2 s.  c o m*/
}

From source file:org.ut.biolab.medsavant.shared.util.IOUtils.java

/**
 * Checks if a file is zipped./*w w w.j ava  2s  .c  om*/
 *
 * @param f
 * @return
 */
public static boolean isZipped(File f) {

    try {
        RandomAccessFile raf = new RandomAccessFile(f, "r");
        long n = raf.readInt();
        raf.close();
        if (n == 0x504B0304) {
            return true;
        } else {
            return false;
        }
    } catch (Throwable e) {
        e.printStackTrace(System.err);
        return false;
    }

}

From source file:com.sangupta.snowpack.SnowpackRecover.java

/**
 * Try and recover from a chunk./*from   w  ww  .  j  a v  a2  s .  c om*/
 * 
 * @param chunkID
 * @param chunkFile
 * @param metadataDB 
 * @return
 * @throws IOException 
 */
private static ChunkInfo recoverChunkInfo(final int chunkID, final File chunkFile,
        SnowpackMetadataDB metadataDB) throws IOException {
    // open the file for reading
    RandomAccessFile raf = new RandomAccessFile(chunkFile, "r");

    // read the length first
    int nameLength, length, terminator, headerLength, numFiles = 0;
    long offset;

    List<FlakeMetadata> metas = new ArrayList<FlakeMetadata>();

    try {
        while (raf.getFilePointer() < raf.length()) {
            offset = raf.getFilePointer();

            nameLength = raf.readInt();
            byte[] name = new byte[nameLength];
            raf.readFully(name);

            length = raf.readInt();
            raf.readLong();
            raf.skipBytes((int) length);

            terminator = raf.readByte();
            if (terminator != 0) {
                System.out.print(" invalid descriptor found...");
                return null;
            }

            headerLength = 4 + name.length + 4 + 8;

            numFiles++;

            metas.add(new FlakeMetadata(new String(name), nameLength, chunkID, offset, headerLength));
        }
    } finally {
        raf.close();
    }

    // all clear for recovery

    // save all metadata in new DB
    for (FlakeMetadata meta : metas) {
        metadataDB.save(meta);
    }

    // return chunk info
    ChunkInfo info = new ChunkInfo();
    info.chunkID = chunkID;
    info.numFiles = numFiles;
    info.writePointer = -1;

    return info;
}

From source file:de.tuebingen.uni.sfs.germanet.api.GermaNet.java

/**
 * Checks whether the <code>File</code> is a <code>ZipFile</code>.
 * @param file the <code>File</code> to check
 * @return true if this <code>File</code> is a <code>ZipFile</code>
 * @throws javax.xml.stream.IOException/* w  w  w.j  av a 2  s .  co  m*/
 */
protected static boolean isZipFile(File file) throws IOException {
    RandomAccessFile raf = new RandomAccessFile(file, "r");
    long n = raf.readInt();
    raf.close();
    if (n == 0x504B0304) {
        return true;
    } else {
        return false;
    }
}

From source file:org.apache.hadoop.hdfs.TestRaidDfs.java

public static void corruptBlock(Path file, Block blockNum, int numDataNodes, long offset,
        MiniDFSCluster cluster) throws IOException {
    long id = blockNum.getBlockId();

    // Now deliberately remove/truncate data blocks from the block.
    ///*from  w w  w.java  2  s .c  o m*/
    for (int i = 0; i < numDataNodes; i++) {
        File[] dirs = getDataNodeDirs(i, cluster);

        for (int j = 0; j < dirs.length; j++) {
            File[] blocks = dirs[j].listFiles();
            assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0));
            for (int idx = 0; idx < blocks.length; idx++) {
                if (blocks[idx].getName().startsWith("blk_" + id) && !blocks[idx].getName().endsWith(".meta")) {
                    // Corrupt
                    File f = blocks[idx];
                    RandomAccessFile raf = new RandomAccessFile(f, "rw");
                    raf.seek(offset);
                    int data = raf.readInt();
                    raf.seek(offset);
                    raf.writeInt(data + 1);
                    LOG.info("Corrupted block " + blocks[idx]);
                }
            }
        }
    }
}

From source file:org.apache.hadoop.hdfs.TestRaidDfs.java

public static void corruptBlock(Path file, Block blockNum, int numDataNodes, boolean delete,
        MiniDFSCluster cluster) throws IOException {
    long id = blockNum.getBlockId();

    // Now deliberately remove/truncate data blocks from the block.
    int numDeleted = 0;
    int numCorrupted = 0;
    for (int i = 0; i < numDataNodes; i++) {
        File[] dirs = getDataNodeDirs(i, cluster);

        for (int j = 0; j < dirs.length; j++) {
            File[] blocks = dirs[j].listFiles();
            assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0));
            for (int idx = 0; idx < blocks.length; idx++) {
                LOG.info("block file: " + blocks[idx]);
                if (blocks[idx].getName().startsWith("blk_" + id) && !blocks[idx].getName().endsWith(".meta")) {
                    if (delete) {
                        blocks[idx].delete();
                        LOG.info("Deleted block " + blocks[idx]);
                        numDeleted++;/*from  w  w  w. j  a  v  a 2  s  .  c  om*/
                    } else {
                        // Corrupt
                        File f = blocks[idx];
                        long seekPos = f.length() / 2;
                        RandomAccessFile raf = new RandomAccessFile(f, "rw");
                        raf.seek(seekPos);
                        int data = raf.readInt();
                        raf.seek(seekPos);
                        raf.writeInt(data + 1);
                        LOG.info("Corrupted block " + blocks[idx]);
                        numCorrupted++;
                    }
                }
            }
        }
    }
    assertTrue("Nothing corrupted or deleted", (numCorrupted + numDeleted) > 0);
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestEditLogFileInputStream.java

/**
 * Regression test for HDFS-8965 which verifies that
 * FSEditLogFileInputStream#scanOp verifies Op checksums.
 *//*from   w  w  w.j av a 2s  .  com*/
@Test(timeout = 60000)
public void testScanCorruptEditLog() throws Exception {
    Configuration conf = new Configuration();
    File editLog = new File(System.getProperty("test.build.data", "/tmp"), "testCorruptEditLog");

    LOG.debug("Creating test edit log file: " + editLog);
    EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, editLog.getAbsoluteFile(), 8192);
    elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache();
    FSEditLogOp.MkdirOp mkdirOp = FSEditLogOp.MkdirOp.getInstance(cache);
    mkdirOp.reset();
    mkdirOp.setRpcCallId(123);
    mkdirOp.setTransactionId(1);
    mkdirOp.setInodeId(789L);
    mkdirOp.setPath("/mydir");
    PermissionStatus perms = PermissionStatus.createImmutable("myuser", "mygroup",
            FsPermission.createImmutable((short) 0777));
    mkdirOp.setPermissionStatus(perms);
    elos.write(mkdirOp);
    mkdirOp.reset();
    mkdirOp.setRpcCallId(456);
    mkdirOp.setTransactionId(2);
    mkdirOp.setInodeId(123L);
    mkdirOp.setPath("/mydir2");
    perms = PermissionStatus.createImmutable("myuser", "mygroup", FsPermission.createImmutable((short) 0666));
    mkdirOp.setPermissionStatus(perms);
    elos.write(mkdirOp);
    elos.setReadyToFlush();
    elos.flushAndSync(false);
    elos.close();
    long fileLen = editLog.length();

    LOG.debug("Corrupting last 4 bytes of edit log file " + editLog + ", whose length is " + fileLen);
    RandomAccessFile rwf = new RandomAccessFile(editLog, "rw");
    rwf.seek(fileLen - 4);
    int b = rwf.readInt();
    rwf.seek(fileLen - 4);
    rwf.writeInt(b + 1);
    rwf.close();

    EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
    Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, elis.getVersion(true));
    Assert.assertEquals(1, elis.scanNextOp());
    LOG.debug("Read transaction 1 from " + editLog);
    try {
        elis.scanNextOp();
        Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
    } catch (IOException e) {
        LOG.debug("Caught expected checksum error when reading corrupt " + "transaction 2", e);
        GenericTestUtils.assertExceptionContains("Transaction is corrupt.", e);
    }
    elis.close();
}

From source file:com.btoddb.fastpersitentqueue.JournalFileTest.java

@Test
public void testInitForWritingThenClose() throws IOException {
    JournalFile jf1 = new JournalFile(theFile);
    jf1.initForWriting(new UUID());
    assertThat(jf1.isWriteMode(), is(true));
    assertThat(jf1.isOpen(), is(true));/*from w  w w.j a v  a2  s  . c  o  m*/
    assertThat(jf1.getFilePosition(), is((long) JournalFile.HEADER_SIZE));
    jf1.close();

    assertThat(jf1.isOpen(), is(false));

    RandomAccessFile raFile = new RandomAccessFile(theFile, "rw");
    assertThat(raFile.readInt(), is(JournalFile.VERSION));
    assertThat(Utils.readUuidFromFile(raFile), is(jf1.getId()));
    assertThat(raFile.readLong(), is(0L));
    raFile.close();
}

From source file:name.martingeisse.stackd.server.section.storage.FolderBasedSectionStorage.java

/**
 * /*from w w w.  jav  a  2  s  .c om*/
 */
private boolean loadSectionFromFile(final OutputStream out, final RandomAccessFile access, final int tocIndex)
        throws IOException {

    // read the ToC entry
    access.seek(tocIndex * 12);
    final int dataStartAddress = access.readInt();
    final int dataSize = access.readInt();
    /* int dataFlags = */access.readInt();

    // handle missing sections
    if (dataStartAddress < 1) {
        return false;
    }

    // read the data
    final byte[] compressedCubeData = new byte[dataSize];
    access.seek(dataStartAddress);
    access.readFully(compressedCubeData);

    // write data to the stream
    out.write(compressedCubeData);

    return true;

}