Example usage for java.io RandomAccessFile close

List of usage examples for java.io RandomAccessFile close

Introduction

In this page you can find the example usage for java.io RandomAccessFile close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this random access file stream and releases any system resources associated with the stream.

Usage

From source file:org.apache.hadoop.hdfs.TestCrcCorruption.java

/** 
  * check if DFS can handle corrupted CRC blocks
  *///from  www.  j  a  va  2s. c om
private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    short replFactor = 2;
    Random random = new Random();

    try {
        cluster = new MiniDFSCluster.Builder(conf).numNameNodes(1).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        util.createFiles(fs, "/srcdat", replFactor);
        util.waitReplication(fs, "/srcdat", (short) 2);

        // Now deliberately remove/truncate meta blocks from the first
        // directory of the first datanode. The complete absense of a meta
        // file disallows this Datanode to send data to another datanode.
        // However, a client is alowed access to this block.
        //
        File storageDir = MiniDFSCluster.getStorageDir(0, 1);
        String bpid = cluster.getNamesystem().getBlockPoolId();
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        File[] blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
        int num = 0;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                num++;
                if (num % 3 == 0) {
                    //
                    // remove .meta file
                    //
                    LOG.info("Deliberately removing file " + blocks[idx].getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                } else if (num % 3 == 1) {
                    //
                    // shorten .meta file
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    int newsize = random.nextInt((int) channel.size() / 2);
                    LOG.info("Deliberately truncating file " + blocks[idx].getName() + " to size " + newsize
                            + " bytes.");
                    channel.truncate(newsize);
                    file.close();
                } else {
                    //
                    // corrupt a few bytes of the metafile
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    long position = 0;
                    //
                    // The very first time, corrupt the meta header at offset 0
                    //
                    if (num != 2) {
                        position = (long) random.nextInt((int) channel.size());
                    }
                    int length = random.nextInt((int) (channel.size() - position + 1));
                    byte[] buffer = new byte[length];
                    random.nextBytes(buffer);
                    channel.write(ByteBuffer.wrap(buffer), position);
                    LOG.info("Deliberately corrupting file " + blocks[idx].getName() + " at offset " + position
                            + " length " + length);
                    file.close();
                }
            }
        }

        //
        // Now deliberately corrupt all meta blocks from the second
        // directory of the first datanode
        //
        storageDir = MiniDFSCluster.getStorageDir(0, 1);
        data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));

        int count = 0;
        File previous = null;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                //
                // Move the previous metafile into the current one.
                //
                count++;
                if (count % 2 == 0) {
                    LOG.info("Deliberately insertimg bad crc into files " + blocks[idx].getName() + " "
                            + previous.getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                    assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
                    assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
                    previous = null;
                } else {
                    previous = blocks[idx];
                }
            }
        }

        //
        // Only one replica is possibly corrupted. The other replica should still
        // be good. Verify.
        //
        assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
        LOG.info("All File still have a valid replica");

        //
        // set replication factor back to 1. This causes only one replica of
        // of each block to remain in HDFS. The check is to make sure that 
        // the corrupted replica generated above is the one that gets deleted.
        // This test is currently disabled until HADOOP-1557 is solved.
        //
        util.setReplication(fs, "/srcdat", (short) 1);
        //util.waitReplication(fs, "/srcdat", (short)1);
        //LOG.info("All Files done with removing replicas");
        //assertTrue("Excess replicas deleted. Corrupted replicas found.",
        //           util.checkFiles(fs, "/srcdat"));
        LOG.info("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557");

        util.cleanup(fs, "/srcdat");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.hive.hcatalog.pig.TestHCatLoader.java

@Test
public void testGetInputBytes() throws Exception {
    assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS));
    File file = new File(TEST_WAREHOUSE_DIR + "/" + SPECIFIC_SIZE_TABLE + "/part-m-00000");
    file.deleteOnExit();//w  ww. j  a  va2  s .  com
    RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
    randomAccessFile.setLength(2L * 1024 * 1024 * 1024);
    randomAccessFile.close();
    Job job = new Job();
    HCatLoader hCatLoader = new HCatLoader();
    hCatLoader.setUDFContextSignature("testGetInputBytes");
    hCatLoader.setLocation(SPECIFIC_SIZE_TABLE, job);
    ResourceStatistics statistics = hCatLoader.getStatistics(file.getAbsolutePath(), job);
    assertEquals(2048, (long) statistics.getmBytes());
}

From source file:Interface.FramePrincipal.java

private void bt_uplActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_bt_uplActionPerformed

    File diretorio = new File(dir_arq.getText());
    File file = new File(dir_arq.getText() + "/" + nom_arq.getText());

    if (!diretorio.exists()) {
        JOptionPane.showMessageDialog(null, "Informe um diretrio vlido!");
    } else if (!file.exists() || "".equals(nom_arq.getText())) {
        JOptionPane.showMessageDialog(null, "Informe um arquivo vlido!");
    } else {/* w  w  w  .j av  a 2s .  c om*/
        try {
            //////////////////////////////////////// Validar tamanho de arquivo/////////////////////////////////////////////////               
            RandomAccessFile arquivo = new RandomAccessFile(dir_arq.getText() + "/" + nom_arq.getText(), "r");
            long tamanho = arquivo.length();

            if (tamanho >= 104857600) {
                JOptionPane.showMessageDialog(null, "Arquivo excedeu o tamanho mximo de 100 MB!");
                arquivo.close();
                return;
            }

            //////////////////////////////////////////// Carrega arquivo para o bucket /////////////////////////////////////////
            HttpClient client = new HttpClient();
            client.getParams().setParameter("http.useragent", "Test Client");

            BufferedReader br = null;
            String apikey = "AIzaSyAuKiAdUluAz4IEaOUoXldA8XuwEbty5V8";

            File input = new File(dir_arq.getText() + "/" + nom_arq.getText());

            PostMethod method = new PostMethod("https://www.googleapis.com/upload/storage/v1/b/" + bac.getNome()
                    + "/o?uploadType=media&name=" + nom_arq.getText());
            method.addParameter("uploadType", "media");
            method.addParameter("name", nom_arq.getText());
            method.setRequestEntity(new InputStreamRequestEntity(new FileInputStream(input), input.length()));
            //       method.setRequestHeader("Content-type", "image/png; charset=ISO-8859-1");
            method.setRequestHeader("Content-type", "application/octet-stream");

            //       try{
            int returnCode = client.executeMethod(method);

            if (returnCode == HttpStatus.SC_NOT_IMPLEMENTED) {
                System.err.println("The Post method is not implemented by this URI");
                method.getResponseBodyAsString();
            } else {
                br = new BufferedReader(new InputStreamReader(method.getResponseBodyAsStream()));
                String readLine;
                while (((readLine = br.readLine()) != null)) {
                    System.err.println(readLine);
                }
                br.close();
            }

        } catch (Exception e) {
            System.err.println(e);

        }
        JOptionPane.showMessageDialog(null, "Arquivo carregado com sucesso!");
    }
}

From source file:com.ibm.watson.developer_cloud.android.text_to_speech.v1.TTSUtility.java

private byte[] analyzeOpusData(InputStream is) {
    String inFilePath = getBaseDir() + "Watson.opus";
    String outFilePath = getBaseDir() + "Watson.pcm";
    File inFile = new File(inFilePath);
    File outFile = new File(outFilePath);
    outFile.deleteOnExit();//  www.  jav a 2 s.  co m
    inFile.deleteOnExit();

    try {
        RandomAccessFile inRaf = new RandomAccessFile(inFile, "rw");
        byte[] opus = IOUtils.toByteArray(is);
        inRaf.write(opus);

        sampleRate = OggOpus.decode(inFilePath, outFilePath, sampleRate); // zero means to detect the sample rate by decoder

        RandomAccessFile outRaf = new RandomAccessFile(outFile, "r");

        byte[] data = new byte[(int) outRaf.length()];

        int outLength = outRaf.read(data);

        inRaf.close();
        outRaf.close();
        if (outLength == 0) {
            throw new IOException("Data reading failed");
        }
        return data;
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
    return new byte[0];
}

From source file:com.remobile.file.LocalFilesystem.java

@Override
public long truncateFileAtURL(LocalFilesystemURL inputURL, long size) throws IOException {
    File file = new File(filesystemPathForURL(inputURL));

    if (!file.exists()) {
        throw new FileNotFoundException("File at " + inputURL.uri + " does not exist.");
    }/*w w  w.  j a  va2s  .  com*/

    RandomAccessFile raf = new RandomAccessFile(filesystemPathForURL(inputURL), "rw");
    try {
        if (raf.length() >= size) {
            FileChannel channel = raf.getChannel();
            channel.truncate(size);
            return size;
        }

        return raf.length();
    } finally {
        raf.close();
    }

}

From source file:com.kkbox.toolkit.image.KKImageRequest.java

private void cryptToFile(String sourceFilePath, String targetFilePath) throws Exception {
    // FIXME: should have two functions: decyptToFile and encryptToFile
    RandomAccessFile sourceFile = new RandomAccessFile(sourceFilePath, "r");
    RandomAccessFile targetFile = new RandomAccessFile(targetFilePath, "rw");
    int readLength;
    do {//w  ww  .  j a  v  a 2  s. c o m
        readLength = sourceFile.read(buffer, 0, BUFFER_SIZE);
        if (readLength != -1) {
            if (cipher != null) {
                buffer = cipher.doFinal(buffer);
            }
            targetFile.write(buffer, 0, readLength);
        }
    } while (readLength != -1);
    sourceFile.close();
    targetFile.close();
}

From source file:com.baidu.terminator.manager.service.LogServiceImpl.java

@Override
public Log readLog(int linkId, long offset) throws IOException {
    String logFileLocation = LinkLogger.getLogFileLocation(linkId);
    FileUtils.createFile(logFileLocation);

    RandomAccessFile raf = null;
    List<String> lines = new ArrayList<String>();
    long length = 0;

    try {//from  w ww  . j a v a 2  s .  c om
        raf = new RandomAccessFile(logFileLocation, "r");
        raf.seek(offset);
        length = raf.length();

        long point = raf.getFilePointer();
        while (point < length) {
            String line = raf.readLine();
            String utf8Line = new String(line.getBytes("8859_1"), "utf-8");
            lines.add(utf8Line);

            if (point - offset >= MAX_READ_BYTES) {
                length = point;
                break;
            }
            point = raf.getFilePointer();
        }
    } finally {
        if (raf != null) {
            raf.close();
        }
    }

    Log log = new Log();
    log.setLogLocation(logFileLocation);
    log.setOffset(length);
    log.setContent(lines);
    return log;
}

From source file:com.netease.qa.emmagee.utils.CpuInfo.java

/**
 * read the status of CPU./*from   www .  ja v  a2  s.c  o m*/
 * 
 * @throws FileNotFoundException
 */
public void readCpuStat() {
    String processPid = Integer.toString(pid);
    String cpuStatPath = "/proc/" + processPid + "/stat";
    try {
        // monitor cpu stat of certain process
        RandomAccessFile processCpuInfo = new RandomAccessFile(cpuStatPath, "r");
        String line = "";
        StringBuffer stringBuffer = new StringBuffer();
        stringBuffer.setLength(0);
        while ((line = processCpuInfo.readLine()) != null) {
            stringBuffer.append(line + "\n");
        }
        String[] tok = stringBuffer.toString().split(" ");
        processCpu = Long.parseLong(tok[13]) + Long.parseLong(tok[14]);
        processCpuInfo.close();
    } catch (FileNotFoundException e) {
        Log.w(LOG_TAG, "FileNotFoundException: " + e.getMessage());
    } catch (IOException e) {
        e.printStackTrace();
    }
    readTotalCpuStat();
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.BlockPoolSlice.java

/**
 * Find out the number of bytes in the block that match its crc.
 * <p/>//from  ww w.  j  a  v  a 2s. c  om
 * This algorithm assumes that data corruption caused by unexpected
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 *
 * @param blockFile
 *     the block file
 * @param genStamp
 *     generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
    DataInputStream checksumIn = null;
    InputStream blockIn = null;
    try {
        final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
        long blockFileLen = blockFile.length();
        long metaFileLen = metaFile.length();
        int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
        if (!blockFile.exists() || blockFileLen == 0 || !metaFile.exists() || metaFileLen < crcHeaderLen) {
            return 0;
        }
        checksumIn = new DataInputStream(
                new BufferedInputStream(new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE));

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
            FsDatasetImpl.LOG
                    .warn("Wrong version (" + version + ") for metadata file " + metaFile + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        int bytesPerChecksum = checksum.getBytesPerChecksum();
        int checksumSize = checksum.getChecksumSize();
        long numChunks = Math.min((blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum,
                (metaFileLen - crcHeaderLen) / checksumSize);
        if (numChunks == 0) {
            return 0;
        }
        IOUtils.skipFully(checksumIn, (numChunks - 1) * checksumSize);
        blockIn = new FileInputStream(blockFile);
        long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
        IOUtils.skipFully(blockIn, lastChunkStartPos);
        int lastChunkSize = (int) Math.min(bytesPerChecksum, blockFileLen - lastChunkStartPos);
        byte[] buf = new byte[lastChunkSize + checksumSize];
        checksumIn.readFully(buf, lastChunkSize, checksumSize);
        IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

        checksum.update(buf, 0, lastChunkSize);
        long validFileLength;
        if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
            validFileLength = lastChunkStartPos + lastChunkSize;
        } else { // last chunck is corrupt
            validFileLength = lastChunkStartPos;
        }

        // truncate if extra bytes are present without CRC
        if (blockFile.length() > validFileLength) {
            RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
            try {
                // truncate blockFile
                blockRAF.setLength(validFileLength);
            } finally {
                blockRAF.close();
            }
        }

        return validFileLength;
    } catch (IOException e) {
        FsDatasetImpl.LOG.warn(e);
        return 0;
    } finally {
        IOUtils.closeStream(checksumIn);
        IOUtils.closeStream(blockIn);
    }
}

From source file:com.piaoyou.util.FileUtil.java

public static String[] getLastLines(File file, int linesToReturn) throws IOException, FileNotFoundException {

    final int AVERAGE_CHARS_PER_LINE = 250;
    final int BYTES_PER_CHAR = 2;

    RandomAccessFile randomAccessFile = null;
    StringBuffer buffer = new StringBuffer(linesToReturn * AVERAGE_CHARS_PER_LINE);
    int lineTotal = 0;
    try {// ww w . ja  v  a  2  s .c om
        randomAccessFile = new RandomAccessFile(file, "r");
        long byteTotal = randomAccessFile.length();
        long byteEstimateToRead = linesToReturn * AVERAGE_CHARS_PER_LINE * BYTES_PER_CHAR;

        long offset = byteTotal - byteEstimateToRead;
        if (offset < 0) {
            offset = 0;
        }

        randomAccessFile.seek(offset);
        //log.debug("SKIP IS ::" + offset);

        String line = null;
        String lineUTF8 = null;
        while ((line = randomAccessFile.readLine()) != null) {
            lineUTF8 = new String(line.getBytes("ISO8859_1"), "UTF-8");
            lineTotal++;
            buffer.append(lineUTF8).append('\n');
        }
    } finally {
        if (randomAccessFile != null) {
            try {
                randomAccessFile.close();
            } catch (IOException ex) {
            }
        }
    }

    String[] resultLines = new String[linesToReturn];
    BufferedReader in = null;
    try {
        in = new BufferedReader(new StringReader(buffer.toString()));

        int start = lineTotal /* + 2 */ - linesToReturn; // Ex : 55 - 10 = 45 ~ offset
        if (start < 0)
            start = 0; // not start line
        for (int i = 0; i < start; i++) {
            in.readLine(); // loop until the offset. Ex: loop 0, 1 ~~ 2 lines
        }

        int i = 0;
        String line = null;
        while ((line = in.readLine()) != null) {
            resultLines[i] = line;
            i++;
        }
    } catch (IOException ie) {
        log.error("Error" + ie);
        throw ie;
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (IOException ex) {
            }
        }
    }
    return resultLines;
}