Example usage for org.apache.hadoop.fs FSDataInputStream releaseBuffer

List of usage examples for org.apache.hadoop.fs FSDataInputStream releaseBuffer

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataInputStream releaseBuffer.

Prototype

@Override
    public void releaseBuffer(ByteBuffer buffer) 

Source Link

Usage

From source file:HdfsCacheReader.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length < 1) {
        System.err.println("HdfsReader [FileSize i.e. 1g/10g/100g/200g]");
        return 1;
    }/*from  ww  w  . j  a  v  a2s .  c o m*/

    double fileSize;
    double fileSizeInMB;
    if (args[0].equals("1g")) {
        fileSize = 1073741824.0;
        fileSizeInMB = 1024.0;
    } else if (args[0].equals("10g")) {
        fileSize = 10737418240.0;
        fileSizeInMB = 10240.0;
    } else if (args[0].equals("100g")) {
        fileSize = 107374182400.0;
        fileSizeInMB = 102400.0;
    } else if (args[0].equals("200g")) {
        fileSize = 214748364800.0;
        fileSizeInMB = 204800.0;
    } else {
        throw new IllegalArgumentException("Invalid arg: " + args[0]);
    }

    String fileName = "cacheRead-" + args[0] + "-avg.txt";
    File avgFile = new File(fileName);
    PrintWriter avgPW = new PrintWriter(avgFile);
    fileName = "cacheRead-" + args[0] + "-min.txt";
    File minFile = new File(fileName);
    PrintWriter minPW = new PrintWriter(minFile);
    fileName = "cacheRead-" + args[0] + "-max.txt";
    File maxFile = new File(fileName);
    PrintWriter maxPW = new PrintWriter(maxFile);

    int numIters = 10;
    int bufferSize = 65536;
    long blockSize[] = new long[] { 67108864, 134217728, 268435456, 536870912, 1073741824 };
    short replication[] = new short[] { 1, 4 };
    String hdfsFile = "/hdfs_test/" + args[0] + "/1.in";
    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);
    Path hdfsFilePath = new Path(hdfsFile);

    for (int i = 0; i < 5; i++) { // blockSize
        for (int j = 0; j < 2; j++) { // replication
            OutputStream os = fs.create(hdfsFilePath, true, bufferSize, replication[j], blockSize[i]);
            byte[] buf = new byte[bufferSize];
            for (int m = 0; m < bufferSize; m += 4) {
                buf[m] = (byte) m;
            }
            double numBufPerFile = fileSize / (double) bufferSize;

            for (double m = 0.0; m < numBufPerFile; m++) {
                os.write(buf);
            }
            os.close();
            String cmdStr = "/usr/local/hadoop/bin/hdfs cacheadmin -addDirective -path " + hdfsFile
                    + " -pool hdfs_test";
            Process p = Runtime.getRuntime().exec(cmdStr);
            p.waitFor();
            String cmdOutLine = "";
            StringBuffer cmdOut = new StringBuffer();
            BufferedReader cmdOutReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
            while ((cmdOutLine = cmdOutReader.readLine()) != null) {
                cmdOut.append(cmdOutLine + "\n");
            }
            // System.out.println (cmdOut.toString());

            long avg = 0, min = Long.MAX_VALUE, max = Long.MIN_VALUE;
            for (int k = 0; k < numIters; k++) {
                FSDataInputStream in = fs.open(hdfsFilePath);
                ByteBuffer bbuf = null;
                ElasticByteBufferPool ebbp = new ElasticByteBufferPool();
                long startTime = System.currentTimeMillis();
                while ((bbuf = in.read(ebbp, bufferSize, EnumSet.of(ReadOption.SKIP_CHECKSUMS))) != null) {
                    in.releaseBuffer(bbuf);
                }
                long endTime = System.currentTimeMillis();
                in.close();
                long duration = (endTime - startTime);
                avg += duration;
                if (duration < min) {
                    min = duration;
                }
                if (duration > max) {
                    max = duration;
                }
            }
            // write result to output
            double avgBW = fileSizeInMB * 1000.0 * (double) numIters / (double) avg;
            avgPW.print(avgBW);
            avgPW.print("\t");
            double minBW = fileSizeInMB * 1000.0 / (double) max;
            minPW.print(minBW);
            minPW.print("\t");
            double maxBW = fileSizeInMB * 1000.0 / (double) min;
            maxPW.print(maxBW);
            maxPW.print("\t");
            cmdStr = "/usr/local/hadoop/bin/hdfs cacheadmin -removeDirectives -path " + hdfsFile;
            p = Runtime.getRuntime().exec(cmdStr);
            p.waitFor();
            cmdOutLine = "";
            cmdOut.setLength(0);
            cmdOutReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
            while ((cmdOutLine = cmdOutReader.readLine()) != null) {
                cmdOut.append(cmdOutLine + "\n");
            }
            // System.out.println (cmdOut.toString());
            fs.delete(hdfsFilePath, true);
        }
        avgPW.println();
        minPW.println();
        maxPW.println();
    }
    avgPW.close();
    minPW.close();
    maxPW.close();
    return 0;
}

From source file:com.cloudera.ByteBufferRecordReader.java

License:Apache License

@Override
public synchronized boolean nextKeyValue() throws IOException {
    if (key == null) {
        key = new LongWritable();
    }//from  w  w  w .j a va 2  s  .c  o m
    if (value == null) {
        value = new ByteBufferWritable();
    }
    if (pos >= end) {
        return false;
    }

    int numBytesRead = 0;
    // Use zero-copy ByteBuffer reads if available
    if (inputStream instanceof FSDataInputStream) {
        FSDataInputStream fsIn = (FSDataInputStream) inputStream;
        ByteBuffer buf = fsIn.read(bufferPool, (int) (end - start), readOption);
        numBytesRead += buf.limit();
        pos += buf.limit();
        // Update stats
        InputStream wrappedStream = fsIn.getWrappedStream();
        if (wrappedStream instanceof DFSInputStream) {
            DFSInputStream dfsIn = (DFSInputStream) wrappedStream;
            updateStats(dfsIn.getReadStatistics());
        }
        // Switch out the buffers
        if (value.getBuffer() != null) {
            fsIn.releaseBuffer(value.getBuffer());
        }
        value.setByteBuffer(buf);
    }
    // Fallback to normal byte[] based reads with a copy to the ByteBuffer
    else {
        byte[] b = new byte[(int) (end - start)];
        IOUtils.readFully(inputStream, b);
        numBytesRead += b.length;
        pos += b.length;
        value.setByteBuffer(ByteBuffer.wrap(b));
    }

    return numBytesRead > 0;
}

From source file:parquet.hadoop.Zcopy.java

License:Apache License

public static int getInt(FSDataInputStream f) throws IOException {
    ByteBuffer int32Buf = getBuf(f, 4).order(ByteOrder.LITTLE_ENDIAN);
    if (int32Buf.remaining() == 4) {
        final int res = int32Buf.getInt();
        f.releaseBuffer(int32Buf);
        return res;
    }//from   ww w  .j a  va  2 s .com
    ByteBuffer tmpBuf = int32Buf;
    int32Buf = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN);
    int32Buf.put(tmpBuf);
    f.releaseBuffer(tmpBuf);
    while (int32Buf.hasRemaining()) {
        tmpBuf = getBuf(f, int32Buf.remaining());
        int32Buf.put(tmpBuf);
        f.releaseBuffer(tmpBuf);
    }
    return int32Buf.getInt();
}