Example usage for java.io RandomAccessFile setLength

List of usage examples for java.io RandomAccessFile setLength

Introduction

In this page you can find the example usage for java.io RandomAccessFile setLength.

Prototype

public native void setLength(long newLength) throws IOException;

Source Link

Document

Sets the length of this file.

Usage

From source file:org.duracloud.mill.audit.generator.LogManagerImplTest.java

private void createFileOfLength(File file, long length) {
    try {/*from   w w w.  ja va 2s.com*/
        RandomAccessFile f = new RandomAccessFile(file, "rw");
        f.setLength(length);
        f.close();
    } catch (Exception e) {
        System.err.println(e);
    }

}

From source file:org.finra.dm.core.AbstractCoreTest.java

/**
 * Creates a file of the specified size relative to the base directory.
 *
 * @param baseDirectory the local parent directory path, relative to which we want our file to be created
 * @param file the file path (including file name) relative to the base directory for the file to be created
 * @param size the file size in bytes// w  w w.  j  a v  a2  s. c  o  m
 *
 * @return the created file
 */
protected File createLocalFile(String baseDirectory, String file, long size) throws IOException {
    Path filePath = Paths.get(baseDirectory, file);
    // We don't check the "mkdirs" response because the directory may already exist which would return false.
    // But we want to create sub-directories if they don't yet exist which is why we're calling "mkdirs" in the first place.
    // If an actual directory couldn't be created, then the new file below will throw an exception anyway.
    filePath.toFile().getParentFile().mkdirs();
    RandomAccessFile randomAccessFile = new RandomAccessFile(filePath.toString(), "rw");
    randomAccessFile.setLength(size);
    randomAccessFile.close();
    return filePath.toFile();
}

From source file:ch.cyberduck.core.io.FileBuffer.java

@Override
public void truncate(final Long length) {
    this.length = length;
    if (temporary.exists()) {
        try {//from w w w  .  j  av  a  2  s  .  c  o  m
            final RandomAccessFile file = random();
            if (length < file.length()) {
                // Truncate current
                file.setLength(length);
            }
        } catch (IOException e) {
            log.warn(String.format("Failure truncating file %s to %d", temporary, length));
        }
    }
}

From source file:com.thoughtworks.go.config.GoConfigFileWriter.java

public synchronized void writeToConfigXmlFile(String content) {
    FileChannel channel = null;//  w  ww .j  a  v a  2s  . c  o  m
    FileOutputStream outputStream = null;
    FileLock lock = null;

    try {
        RandomAccessFile randomAccessFile = new RandomAccessFile(fileLocation(), "rw");
        channel = randomAccessFile.getChannel();
        lock = channel.lock();
        randomAccessFile.seek(0);
        randomAccessFile.setLength(0);
        outputStream = new FileOutputStream(randomAccessFile.getFD());

        IOUtils.write(content, outputStream, UTF_8);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (channel != null && lock != null) {
            try {
                lock.release();
                channel.close();
                IOUtils.closeQuietly(outputStream);
            } catch (IOException e) {
                LOGGER.error("Error occured when releasing file lock and closing file.", e);
            }
        }
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testTruncateMeta() throws Exception {
    EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    backingStore.close();/*from w  w w . j av  a 2  s. c om*/
    Assert.assertTrue(checkpoint.exists());
    File metaFile = Serialization.getMetaDataFile(checkpoint);
    Assert.assertTrue(metaFile.length() != 0);
    RandomAccessFile writer = new RandomAccessFile(metaFile, "rw");
    writer.setLength(0);
    writer.getFD().sync();
    writer.close();
    backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestTransferBlock.java

public void testTransferZeroChecksumFile() throws IOException {
    for (DataNode dn : cluster.getDataNodes()) {
        dn.useInlineChecksum = false;/*from  w w w  .j  ava 2 s  . c o m*/
    }

    // create a new file in the root, write data, do no close
    String filestr = "/testTransferZeroChecksumFile";
    DistributedFileSystem dfs = (DistributedFileSystem) fileSystem;

    DFSTestUtil.createFile(dfs, new Path(filestr), 9L, (short) 1, 0L);

    BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr, cluster, dfs.getClient());

    // Delete the checksum file
    RandomAccessFile meta = new RandomAccessFile(blockPathInfo.getMetaPath(), "rw");
    meta.setLength(0);
    meta.close();

    RandomAccessFile block = new RandomAccessFile(blockPathInfo.getBlockPath(), "rw");
    block.setLength(0);
    block.close();

    int ns = cluster.getNameNode().getNamespaceID();
    DataNode dnWithBlk = null, dnWithoutBlk = null;
    for (DataNode dn : cluster.getDataNodes()) {
        FSDataset fds = (FSDataset) dn.data;
        DatanodeBlockInfo dbi = fds.getDatanodeBlockInfo(ns, blockPathInfo);
        if (dbi != null) {
            dbi.syncInMemorySize();
            dnWithBlk = dn;
        } else {
            dnWithoutBlk = dn;
        }
    }
    if (dnWithoutBlk == null || dnWithBlk == null) {
        TestCase.fail();
    }
    DatanodeInfo[] list = new DatanodeInfo[1];
    for (DatanodeInfo di : dfs.getClient().datanodeReport(DatanodeReportType.LIVE)) {
        if (dnWithoutBlk.getPort() == di.getPort()) {
            list[0] = di;
            break;
        }
    }
    blockPathInfo.setNumBytes(0);
    dnWithBlk.transferBlocks(ns, new Block[] { blockPathInfo }, new DatanodeInfo[][] { list });

    long size = -1;
    for (int i = 0; i < 3; i++) {
        try {
            size = ((FSDataset) dnWithoutBlk.data).getFinalizedBlockLength(ns, blockPathInfo);
            if (size == 0) {
                break;
            }
        } catch (IOException ioe) {
        }

        if (i != 2) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        } else {
            TestCase.fail();
        }
    }
    TestCase.assertEquals(0, size);
}

From source file:sernet.verinice.service.test.AttachmentTest.java

private Attachment createAttachment(Organization org) throws Exception {
    File f = File.createTempFile("veriniceAttachment", "test");
    f.deleteOnExit();/*  w  w  w .j  a  v a 2  s  .com*/
    RandomAccessFile raf = new RandomAccessFile(f, "rw");
    long length = Math.round(Math.random() * (1024 * 1024.0 * maxFileSizeInMb));
    raf.setLength(length); // create 1mb of trash data
    assertNotNull(f);
    assertNotNull(raf);

    // create AttachmentObject
    Attachment a = createAttachment(org, f);

    // save attachment
    a = saveAttachment(a);
    assertNotNull(a);

    // create and save file to attachment
    attachFileData(f, a);

    String hashSum = FileUtil.getMD5Checksum(f.getAbsolutePath());
    dbIdHashSumMap.put(a.getDbId(), hashSum);

    if (LOG.isDebugEnabled()) {
        LOG.debug("File created, length: " + length + ", path: " + f.getAbsolutePath() + ", hash sum: "
                + hashSum);
    }

    return a;
}

From source file:org.apache.bookkeeper.bookie.EntryLogTest.java

@Test(timeout = 60000)
public void testCorruptEntryLog() throws Exception {
    File tmpDir = createTempDir("bkTest", ".dir");
    File curDir = Bookie.getCurrentDirectory(tmpDir);
    Bookie.checkDirectoryStructure(curDir);

    int gcWaitTime = 1000;
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    conf.setGcWaitTime(gcWaitTime);/*w w  w  .j a va2 s. com*/
    conf.setLedgerDirNames(new String[] { tmpDir.toString() });
    Bookie bookie = new Bookie(conf);
    // create some entries
    EntryLogger logger = ((InterleavedLedgerStorage) bookie.ledgerStorage).entryLogger;
    logger.addEntry(1, generateEntry(1, 1));
    logger.addEntry(3, generateEntry(3, 1));
    logger.addEntry(2, generateEntry(2, 1));
    logger.flush();
    // now lets truncate the file to corrupt the last entry, which simulates a partial write
    File f = new File(curDir, "0.log");
    RandomAccessFile raf = new RandomAccessFile(f, "rw");
    raf.setLength(raf.length() - 10);
    raf.close();
    // now see which ledgers are in the log
    logger = new EntryLogger(conf, bookie.getLedgerDirsManager());

    EntryLogMetadata meta = logger.getEntryLogMetadata(0L);
    LOG.info("Extracted Meta From Entry Log {}", meta);
    assertNotNull(meta.getLedgersMap().get(1L));
    assertNull(meta.getLedgersMap().get(2L));
    assertNotNull(meta.getLedgersMap().get(3L));
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestTransferBlock.java

public void testTransferZeroChecksumFileInlineChecksum() throws IOException {
    for (DataNode dn : cluster.getDataNodes()) {
        dn.useInlineChecksum = true;//from w  w w. j a v a2 s .com
    }

    // create a new file in the root, write data, do no close
    String filestr = "/testTransferZeroChecksumFile";
    DistributedFileSystem dfs = (DistributedFileSystem) fileSystem;

    DFSTestUtil.createFile(dfs, new Path(filestr), 9L, (short) 1, 0L);

    LocatedBlocks locations = cluster.getNameNode().getBlockLocations(filestr, 0, Long.MAX_VALUE);
    LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);

    int ns = cluster.getNameNode().getNamespaceID();
    DataNode dnWithBlk = null, dnWithoutBlk = null;
    for (DataNode dn : cluster.getDataNodes()) {
        FSDataset fds = (FSDataset) dn.data;
        DatanodeBlockInfo dbi = fds.getDatanodeBlockInfo(ns, locatedblock.getBlock());

        if (dbi != null) {
            RandomAccessFile block = new RandomAccessFile(dbi.getBlockDataFile().file.toString(), "rw");
            block.setLength(0);
            block.close();

            dbi.syncInMemorySize();
            dnWithBlk = dn;
        } else {
            dnWithoutBlk = dn;
        }
    }
    if (dnWithoutBlk == null || dnWithBlk == null) {
        TestCase.fail();
    }
    DatanodeInfo[] list = new DatanodeInfo[1];
    for (DatanodeInfo di : dfs.getClient().datanodeReport(DatanodeReportType.LIVE)) {
        if (dnWithoutBlk.getPort() == di.getPort()) {
            list[0] = di;
            break;
        }
    }
    locatedblock.getBlock().setNumBytes(0);
    dnWithBlk.transferBlocks(ns, new Block[] { locatedblock.getBlock() }, new DatanodeInfo[][] { list });

    long size = -1;
    for (int i = 0; i < 3; i++) {
        try {
            size = ((FSDataset) dnWithoutBlk.data).getFinalizedBlockLength(ns, locatedblock.getBlock());
            if (size == 0) {
                break;
            }
        } catch (IOException ioe) {
        }

        if (i != 2) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        } else {
            TestCase.fail();
        }
    }
    TestCase.assertEquals(0, size);
}

From source file:tayler.TailerTest.java

protected void eraseFile(File file) throws IOException {
    RandomAccessFile raf = new RandomAccessFile(file, "rws");
    try {/*from ww w.j  av a  2s. c  o m*/
        raf.setLength(0);
        raf.getFD().sync();
    } finally {
        IOUtils.closeQuietly(raf);
    }
}