Example usage for java.io RandomAccessFile close

List of usage examples for java.io RandomAccessFile close

Introduction

In this page you can find the example usage for java.io RandomAccessFile close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this random access file stream and releases any system resources associated with the stream.

Usage

From source file:org.apache.sshd.server.filesystem.NativeSshFile.java

/**
 * Create output stream for writing./*  w ww .  j a  v  a2 s . c o m*/
 */
public OutputStream createOutputStream(final long offset) throws IOException {

    // permission check
    if (!isWritable()) {
        throw new IOException("No write permission : " + file.getName());
    }

    // create output stream
    final RandomAccessFile raf = new RandomAccessFile(file, "rw");
    raf.setLength(offset);
    raf.seek(offset);

    // The IBM jre needs to have both the stream and the random access file
    // objects closed to actually close the file
    return new FileOutputStream(raf.getFD()) {
        public void close() throws IOException {
            super.close();
            raf.close();
        }
    };
}

From source file:org.commoncrawl.service.listcrawler.CacheWriterThread.java

@Override
public void run() {

    boolean shutdown = false;

    while (!shutdown) {
        try {/* w  ww.  j  av a 2s.  c  o  m*/
            final CacheWriteRequest request = _writeRequestQueue.take();

            switch (request._requestType) {

            case ExitThreadRequest: {
                // shutdown condition ... 
                CacheManager.LOG.info("Disk Writer Thread Received Shutdown. Exiting!");
                shutdown = true;
            }
                break;

            case WriteRequest: {

                long timeStart = System.currentTimeMillis();

                try {
                    // reset crc calculator (single thread so no worries on synchronization)
                    _crc32Out.reset();

                    // figure out if we need to compress the item ... 
                    if ((request._item.getFlags() & CacheItem.Flags.Flag_IsCompressed) == 0
                            && request._item.getContent().getCount() != 0) {
                        LOG.info("Incoming Cache Request Content for:" + request._item.getUrl()
                                + " is not compressed. Compressing...");
                        ByteStream compressedBytesOut = new ByteStream(request._item.getContent().getCount());
                        ThriftyGZIPOutputStream gzipOutputStream = new ThriftyGZIPOutputStream(
                                compressedBytesOut);
                        gzipOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                                request._item.getContent().getCount());
                        gzipOutputStream.finish();
                        LOG.info("Finished Compressing Incoming Content for:" + request._item.getUrl()
                                + " BytesIn:" + request._item.getContent().getCount() + " BytesOut:"
                                + compressedBytesOut.size());
                        // replace buffer

                        request._item.setContent(
                                new FlexBuffer(compressedBytesOut.getBuffer(), 0, compressedBytesOut.size()));
                        request._item.setFlags((request._item.getFlags() | CacheItem.Flags.Flag_IsCompressed));
                    }

                    // create streams ...
                    ByteStream bufferOutputStream = new ByteStream(8192);

                    CheckedOutputStream checkedStream = new CheckedOutputStream(bufferOutputStream, _crc32Out);
                    DataOutputStream dataOutputStream = new DataOutputStream(checkedStream);

                    // remember if this item has content ... 
                    boolean hasContent = request._item.isFieldDirty(CacheItem.Field_CONTENT);
                    // now mark the content field as clean, so that it will not be serialized in our current serialization attempt ... 
                    request._item.setFieldClean(CacheItem.Field_CONTENT);
                    // and go ahead and write out the data to the intermediate buffer while also computing partial checksum 
                    request._item.write(dataOutputStream);

                    request._item.setFieldDirty(CacheItem.Field_CONTENT);

                    // ok, now ... write out file header ... 
                    CacheItemHeader itemHeader = new CacheItemHeader(_manager.getLocalLogSyncBytes());

                    itemHeader._status = CacheItemHeader.STATUS_ALIVE;
                    itemHeader._lastAccessTime = System.currentTimeMillis();
                    itemHeader._fingerprint = request._itemFingerprint;
                    // compute total length ... 

                    // first the header bytes in the cacheItem 
                    itemHeader._dataLength = bufferOutputStream.size();
                    // next the content length (encoded - as in size + bytes) ... 
                    itemHeader._dataLength += 4 + request._item.getContent().getCount();
                    // lastly the crc value iteself ... 
                    itemHeader._dataLength += 8;
                    // open the log file ... 
                    DataOutputBuffer logStream = new DataOutputBuffer();

                    // ok, go ahead and write the header 
                    itemHeader.writeHeader(logStream);
                    // ok now write out the item data minus content... 
                    logStream.write(bufferOutputStream.getBuffer(), 0, bufferOutputStream.size());
                    // now create a checked stream for the content ... 
                    CheckedOutputStream checkedStream2 = new CheckedOutputStream(logStream,
                            checkedStream.getChecksum());

                    dataOutputStream = new DataOutputStream(checkedStream2);

                    // content size 
                    dataOutputStream.writeInt(request._item.getContent().getCount());
                    // now write out the content (via checked stream so that we can calc checksum on content)
                    dataOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                            request._item.getContent().getCount());
                    // ok ... lastly write out the checksum bytes ... 
                    dataOutputStream.writeLong(checkedStream2.getChecksum().getValue());
                    // and FINALLY, write out the total item bytes (so that we can seek in reverse to read last request log 
                    logStream.writeInt(CacheItemHeader.SIZE + itemHeader._dataLength);

                    // ok flush everyting to the memory stream 
                    dataOutputStream.flush();

                    //ok - time to acquire the log semaphore 
                    //LOG.info("Acquiring Local Log Semaphore");
                    _manager.getLocalLogAccessSemaphore().acquireUninterruptibly();

                    try {

                        // now time to acquire the write semaphore ... 
                        _manager.getLocalLogWriteAccessSemaphore().acquireUninterruptibly();

                        // get the current file position 
                        long recordOffset = _manager.getLocalLogFilePos();

                        try {

                            long ioTimeStart = System.currentTimeMillis();

                            RandomAccessFile logFile = new RandomAccessFile(_manager.getActiveLogFilePath(),
                                    "rw");

                            try {
                                // seek to our known record offset 
                                logFile.seek(recordOffset);
                                // write out the data
                                logFile.write(logStream.getData(), 0, logStream.getLength());
                            } finally {
                                logFile.close();
                            }
                            // now we need to update the file header 
                            _manager.updateLogFileHeader(_manager.getActiveLogFilePath(), 1,
                                    CacheItemHeader.SIZE + itemHeader._dataLength + 4 /*trailing bytes*/);

                            CacheManager.LOG
                                    .info("#### Wrote Cache Item in:" + (System.currentTimeMillis() - timeStart)
                                            + " iotime:" + (System.currentTimeMillis() - ioTimeStart)
                                            + " QueueSize:" + _writeRequestQueue.size());

                        } finally {
                            // release write semaphore quickly 
                            _manager.getLocalLogWriteAccessSemaphore().release();
                        }

                        // now inform the manager of the completed request ... 
                        _manager.writeRequestComplete(request, recordOffset);
                    } finally {
                        //LOG.info("Releasing Local Log Semaphore");
                        _manager.getLocalLogAccessSemaphore().release();
                    }
                } catch (IOException e) {
                    CacheManager.LOG.error("### FUC# BATMAN! - GONNA LOSE THIS REQUEST!!!!:"
                            + CCStringUtils.stringifyException(e));
                    _manager.writeRequestFailed(request, e);
                }
            }
                break;
            }
        } catch (InterruptedException e) {

        }
    }
}

From source file:org.apache.cassandra.db.ScrubTest.java

private void overrideWithGarbage(SSTableReader sstable, long startPosition, long endPosition)
        throws IOException {
    RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw");
    file.seek(startPosition);//  w  ww  .  ja  va  2  s. c o m
    file.writeBytes(StringUtils.repeat('z', (int) (endPosition - startPosition)));
    file.close();
}

From source file:org.apache.lucene.store.FSDirectory.java

protected void fsync(File fullFile) throws IOException {
    boolean success = false;
    int retryCount = 0;
    IOException exc = null;/*  ww  w.j  a  v  a2  s .c  om*/
    while (!success && retryCount < 5) {
        retryCount++;
        RandomAccessFile file = null;
        try {
            try {
                file = new RandomAccessFile(fullFile, "rw");
                file.getFD().sync();
                success = true;
            } finally {
                if (file != null)
                    file.close();
            }
        } catch (IOException ioe) {
            if (exc == null)
                exc = ioe;
            try {
                // Pause 5 msec
                Thread.sleep(5);
            } catch (InterruptedException ie) {
                throw new ThreadInterruptedException(ie);
            }
        }
    }
    if (!success)
        // Throw original exception
        throw exc;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java

/**
 * Check that listCorruptFileBlocks works while the namenode is still in safemode.
 *//*  w w  w  .j a va  2s.  c o  m*/
@Test(timeout = 300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception {
    MiniDFSCluster cluster = null;
    Random random = new Random();

    try {
        Configuration conf = new HdfsConfiguration();
        // datanode scans directories
        conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
        // datanode sends block reports
        conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
        // never leave safemode automatically
        conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1.5f);
        // start populating repl queues immediately 
        conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 0f);
        // Set short retry timeouts so this test runs faster
        conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
        cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
        cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
        FileSystem fs = cluster.getFileSystem();

        // create two files with one block each
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode")
                .setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
        util.createFiles(fs, "/srcdat10");

        // fetch bad file list from namenode. There should be none.
        Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.getNameNode().getNamesystem()
                .listCorruptFileBlocks("/", null);
        assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0);

        // Now deliberately corrupt one block
        File storageDir = cluster.getInstanceStorageDir(0, 0);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId());
        assertTrue("data directory does not exist", data_dir.exists());
        List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
        assertTrue("Data directory does not contain any blocks or there was an " + "IO error",
                metaFiles != null && !metaFiles.isEmpty());
        File metaFile = metaFiles.get(0);
        RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
        FileChannel channel = file.getChannel();
        long position = channel.size() - 2;
        int length = 2;
        byte[] buffer = new byte[length];
        random.nextBytes(buffer);
        channel.write(ByteBuffer.wrap(buffer), position);
        file.close();
        LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length "
                + length);

        // read all files to trigger detection of corrupted replica
        try {
            util.checkFiles(fs, "/srcdat10");
        } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
        } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                    + " but received IOException " + e, false);
        }

        // fetch bad file list from namenode. There should be one file.
        badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1);

        // restart namenode
        cluster.restartNameNode(0);
        fs = cluster.getFileSystem();

        // wait until replication queues have been initialized
        while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
            try {
                LOG.info("waiting for replication queues");
                Thread.sleep(1000);
            } catch (InterruptedException ignore) {
            }
        }

        // read all files to trigger detection of corrupted replica
        try {
            util.checkFiles(fs, "/srcdat10");
        } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
        } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                    + " but received IOException " + e, false);
        }

        // fetch bad file list from namenode. There should be one file.
        badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1);

        // check that we are still in safe mode
        assertTrue("Namenode is not in safe mode", cluster.getNameNode().isInSafeMode());

        // now leave safe mode so that we can clean up
        cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);

        util.cleanup(fs, "/srcdat10");
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw e;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.caboclo.clients.GoogleDriveClient.java

@Override
public void sendNextPart(MultiPartUpload mpu) {
    byte[] chunk = new byte[Constants.CHUNK_UPLOAD_SIZE];

    try {/*from   w w w .jav  a2s  .co  m*/
        RandomAccessFile raf = new RandomAccessFile(mpu.getFile(), "r");

        raf.seek(mpu.getOffset());

        int chunkLen = raf.read(chunk);

        if (chunkLen < 0) {
            mpu.setFinished();
            raf.close();
            return;
        }

        if (chunkLen < Constants.CHUNK_UPLOAD_SIZE) {
            chunk = Arrays.copyOfRange(chunk, 0, chunkLen);
        }

        String sessionURL = (String) mpu.getObject("location");
        String mimeType = (String) mpu.getObject("mimeType");

        long start = mpu.getOffset();
        long end = start + chunkLen - 1;
        long total = mpu.getFile().length();
        String contentRange = "bytes " + start + "-" + end + "/" + total;

        Client client = Client.create();

        WebResource webResource = client.resource(sessionURL);

        ClientResponse response = webResource.header("Host", "www.googleapis.com")
                .header("Authorization", "Bearer " + token).header("Content-Length", "" + chunkLen)
                .header("Content-Type", mimeType).header("Content-Range", contentRange)
                .put(ClientResponse.class, chunk);

        mpu.incrOffset(chunkLen);

    } catch (IOException ex) {
        ex.printStackTrace();
    }
}

From source file:com.polyvi.xface.extension.advancedfiletransfer.FileUploader.java

/**
 * ?buffer?,??//from  w  w  w .  j a  v a  2  s  .com
 *
 * @param buffer
 *            :??
 */
private int readFileData(byte[] buffer) {
    int len = READ_FILE_END;
    RandomAccessFile accessFile = null;
    try {
        accessFile = new RandomAccessFile(mUploadFile, "r");
        accessFile.seek(mStartedPosition);
        len = accessFile.read(buffer);
        if (mStartedPosition != mAlreadyUploadLength) {
            mAlreadyUploadLength = mStartedPosition;
        }
        accessFile.close();
    } catch (FileNotFoundException e) {
        len = READ_FILE_END;
        onError(FILE_NOT_FOUND_ERR);
    } catch (IOException e) {
        len = READ_FILE_END;
        onError(FILE_NOT_FOUND_ERR);
    }
    return len;
}

From source file:com.freesundance.contacts.google.ContactsExample.java

public void listContacts() throws IOException, ServiceException, GeneralSecurityException {

    service = authenticate();/*from  w  w  w  . j av a2 s.co m*/

    ContactFeed resultFeed = service.getFeed(feedUrl, ContactFeed.class);
    // Print the results
    LOG.debug(resultFeed.getTitle().getPlainText());
    for (ContactEntry entry : resultFeed.getEntries()) {
        printContact(entry);
        // Since 2.0, the photo link is always there, the presence of an actual
        // photo is indicated by the presence of an ETag.
        Link photoLink = entry.getLink("http://schemas.google.com/contacts/2008/rel#photo", "image/*");
        if (photoLink.getEtag() != null) {
            Service.GDataRequest request = service.createLinkQueryRequest(photoLink);
            request.execute();
            InputStream in = request.getResponseStream();
            ByteArrayOutputStream out = new ByteArrayOutputStream();
            RandomAccessFile file = new RandomAccessFile("/tmp/" + entry.getSelfLink().getHref()
                    .substring(entry.getSelfLink().getHref().lastIndexOf('/') + 1), "rw");
            byte[] buffer = new byte[4096];
            for (int read = 0; (read = in.read(buffer)) != -1; out.write(buffer, 0, read)) {
            }
            file.write(out.toByteArray());
            file.close();
            in.close();
            request.end();
        }
        LOG.debug("Total: " + resultFeed.getEntries().size() + " entries found");
    }
}

From source file:com.netease.qa.emmagee.utils.CpuInfo.java

/**
 * read stat of each CPU cores//  w  w w  .j  a v a2s  .c o  m
 */
private void readTotalCpuStat() {
    try {
        // monitor total and idle cpu stat of certain process
        RandomAccessFile cpuInfo = new RandomAccessFile(CPU_STAT, "r");
        String line = "";
        while ((null != (line = cpuInfo.readLine())) && line.startsWith("cpu")) {
            String[] toks = line.split("\\s+");
            idleCpu.add(Long.parseLong(toks[4]));
            totalCpu.add(Long.parseLong(toks[1]) + Long.parseLong(toks[2]) + Long.parseLong(toks[3])
                    + Long.parseLong(toks[4]) + Long.parseLong(toks[6]) + Long.parseLong(toks[5])
                    + Long.parseLong(toks[7]));
        }
        cpuInfo.close();
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.andrewberman.sync.InheritMe.java

void downloadURLToFile(String url, File destFile) throws Exception {
    String origUrl = url;/*  ww  w .  j  a  va  2  s  .c  o  m*/
    File origFile = destFile;

    Thread.sleep(200);
    try {
        destFile.getParentFile().mkdirs();
        destFile.createNewFile();
    } catch (Exception e) {
        errS.println("Error creating new file: " + destFile + " . Skipping this PDF...");
        throw e;
    }
    out.print("   Downloading: ");

    url = StringEscapeUtils.escapeHtml(url);
    System.out.println(url);
    url = url.replaceAll(" ", "%20");
    GetMethod get = new GetMethod(url);
    ByteArrayOutputStream outS = new ByteArrayOutputStream();
    try {
        System.out.println("     Executing get...");
        httpclient.executeMethod(get);
        System.out.println("     Done!");

        BufferedInputStream in = new BufferedInputStream(get.getResponseBodyAsStream());
        int i = 0;
        int ind = 0;
        long length = get.getResponseContentLength();
        int starRatio = (int) length / 20;
        int numStars = 0;
        while ((i = in.read()) != -1) {
            if (length != -1 && ind % starRatio == 0) {
                status(" Downloading..." + repeat(".", ++numStars));
                out.print("*");
            }
            if (ind % 512 == 0) {
                waitOrExit();
            }
            outS.write(i);
            ind++;
        }

        in.close();
        outS.flush();

        RandomAccessFile raf = new RandomAccessFile(destFile, "rw");
        raf.write(outS.toByteArray());
        //         raf.write(get.getResponseBody());
        raf.close();
    } catch (java.net.SocketTimeoutException ste) {
        ste.printStackTrace();
        if (this.retriesLeft > 0) {
            this.retriesLeft--;
            System.out.println("Retries left: " + this.retriesLeft);
            this.downloadURLToFile(origUrl, origFile);
        } else {
            throw ste;
        }
    } finally {
        outS.close();
        get.releaseConnection();
        outS = null;
        out.print("\n");
    }
}