Example usage for org.apache.hadoop.fs FSDataOutputStream getWrappedStream

List of usage examples for org.apache.hadoop.fs FSDataOutputStream getWrappedStream

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataOutputStream getWrappedStream.

Prototype

@InterfaceAudience.LimitedPrivate({ "HDFS" })
public OutputStream getWrappedStream() 

Source Link

Document

Get a reference to the wrapped output stream.

Usage

From source file:com.huayu.metis.flume.sink.hdfs.AbstractHDFSWriter.java

License:Apache License

/**
 * Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream.
 * @return Method or null./*from w w  w .j  a  va 2 s.  c om*/
 */
private Method reflectGetNumCurrentReplicas(FSDataOutputStream os) {
    Method m = null;
    if (os != null) {
        Class<? extends OutputStream> wrappedStreamClass = os.getWrappedStream().getClass();
        try {
            m = wrappedStreamClass.getDeclaredMethod("getNumCurrentReplicas", new Class<?>[] {});
            m.setAccessible(true);
        } catch (NoSuchMethodException e) {
            logger.info("FileSystem's output stream doesn't support"
                    + " getNumCurrentReplicas; --HDFS-826 not available; fsOut=" + wrappedStreamClass.getName()
                    + "; err=" + e);
        } catch (SecurityException e) {
            logger.info("Doesn't have access to getNumCurrentReplicas on "
                    + "FileSystems's output stream --HDFS-826 not available; fsOut="
                    + wrappedStreamClass.getName(), e);
            m = null; // could happen on setAccessible()
        }
    }
    if (m != null) {
        logger.debug("Using getNumCurrentReplicas--HDFS-826");
    }
    return m;
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * Test hsync (with updating block length in NameNode) while no data is
 * actually written yet/*from  w  ww. j  a  v a  2s . c  o  m*/
 */
@Test
public void hSyncUpdateLength_00() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem();

    try {
        Path path = new Path(fName);
        FSDataOutputStream stm = fileSystem.create(path, true, 4096, (short) 2,
                MiniDFSClusterBridge.getAppendTestUtil_BLOCK_SIZE());
        System.out.println("Created file " + path.toString());
        ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
        long currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(0L, currentFileLength);
        stm.close();
    } finally {
        fileSystem.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The method starts new cluster with defined Configuration; creates a file
 * with specified block_size and writes 10 equal sections in it; it also calls
 * hflush/hsync after each write and throws an IOException in case of an error.
 * /*from  www .j  av  a  2  s. com*/
 * @param conf cluster configuration
 * @param fileName of the file to be created and processed as required
 * @param block_size value to be used for the file's creation
 * @param replicas is the number of replicas
 * @param isSync hsync or hflush         
 * @param syncFlags specify the semantic of the sync/flush
 * @throws IOException in case of any errors
 */
public static void doTheJob(Configuration conf, final String fileName, long block_size, short replicas,
        boolean isSync, EnumSet<SyncFlag> syncFlags) throws IOException {
    byte[] fileContent;
    final int SECTIONS = 10;

    fileContent = AppendTestUtil.initBuffer(MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE());
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicas).build();
    // Make sure we work with DFS in order to utilize all its functionality
    DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem();

    FSDataInputStream is;
    try {
        Path path = new Path(fileName);
        FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas, block_size);
        System.out.println("Created file " + fileName);

        int tenth = MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE() / SECTIONS;
        int rounding = MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE() - tenth * SECTIONS;
        for (int i = 0; i < SECTIONS; i++) {
            System.out.println(
                    "Writing " + (tenth * i) + " to " + (tenth * (i + 1)) + " section to file " + fileName);
            // write to the file
            stm.write(fileContent, tenth * i, tenth);

            // Wait while hflush/hsync pushes all packets through built pipeline
            if (isSync) {
                ((DFSOutputStream) stm.getWrappedStream()).hsync(syncFlags);
            } else {
                ((DFSOutputStream) stm.getWrappedStream()).hflush();
            }

            // Check file length if updatelength is required
            if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
                long currentFileLength = fileSystem.getFileStatus(path).getLen();
                assertEquals("File size doesn't match for hsync/hflush with updating the length",
                        tenth * (i + 1), currentFileLength);
            }
            byte[] toRead = new byte[tenth];
            byte[] expected = new byte[tenth];
            System.arraycopy(fileContent, tenth * i, expected, 0, tenth);
            // Open the same file for read. Need to create new reader after every write operation(!)
            is = fileSystem.open(path);
            is.seek(tenth * i);
            int readBytes = is.read(toRead, 0, tenth);
            System.out.println("Has read " + readBytes);
            assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
            is.close();
            checkData(toRead, 0, readBytes, expected, "Partial verification");
        }
        System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding)
                + " section to file " + fileName);
        stm.write(fileContent, tenth * SECTIONS, rounding);
        stm.close();

        assertEquals("File size doesn't match ", MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE(),
                fileSystem.getFileStatus(path).getLen());
        AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
    } finally {
        fileSystem.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test/*from  w w  w.  j a v a  2 s.c  om*/
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:io.hops.erasure_coding.Encoder.java

License:Apache License

/**
 * The interface to use to generate a parity file.
 * This method can be called multiple times with the same Encoder object,
 * thus allowing reuse of the buffers allocated by the Encoder object.
 *
 * @param fs/*from  www  .  j av  a2 s .  co m*/
 *     The filesystem containing the source file.
 * @param srcFile
 *     The source file.
 * @param parityFile
 *     The parity file to be generated.
 */
public void encodeFile(Configuration jobConf, FileSystem fs, Path srcFile, FileSystem parityFs, Path parityFile,
        short parityRepl, long numStripes, long blockSize, Progressable reporter, StripeReader sReader)
        throws IOException {
    long expectedParityBlocks = numStripes * codec.parityLength;
    long expectedParityFileSize = numStripes * blockSize * codec.parityLength;

    if (!parityFs.mkdirs(parityFile.getParent())) {
        throw new IOException("Could not create parent dir " + parityFile.getParent());
    }
    // delete destination if exists
    if (parityFs.exists(parityFile)) {
        parityFs.delete(parityFile, false);
    }

    // Writing out a large parity file at replication 1 is difficult since
    // some datanode could die and we would not be able to close() the file.
    // So write at replication 2 and then reduce it after close() succeeds.
    short tmpRepl = parityRepl;
    if (expectedParityBlocks >= conf.getInt("raid.encoder.largeparity.blocks", 20)) {
        if (parityRepl == 1) {
            tmpRepl = 2;
        }
    }
    FSDataOutputStream out = parityFs.create(parityFile, true, conf.getInt("io.file.buffer.size", 64 * 1024),
            tmpRepl, blockSize);

    DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
    dfsOut.enableParityStream(codec.getStripeLength(), codec.getParityLength(), srcFile.toUri().getPath());

    try {
        encodeFileToStream(fs, srcFile, parityFile, sReader, blockSize, out, reporter);
        out.close();
        out = null;
        LOG.info("Wrote parity file " + parityFile);
        FileStatus tmpStat = parityFs.getFileStatus(parityFile);
        if (tmpStat.getLen() != expectedParityFileSize) {
            throw new IOException("Expected parity size " + expectedParityFileSize + " does not match actual "
                    + tmpStat.getLen());
        }
        if (tmpRepl > parityRepl) {
            parityFs.setReplication(parityFile, parityRepl);
        }
        LOG.info("Wrote parity file " + parityFile);
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:org.apache.flume.sink.customhdfs.MockFsDataOutputStream.java

License:Apache License

public MockFsDataOutputStream(FSDataOutputStream wrapMe, boolean closeSucceed) throws IOException {
    super(wrapMe.getWrappedStream(), null);
    this.closeSucceed = closeSucceed;
}

From source file:org.apache.flume.sink.hdfs.MockFsDataOutputStream.java

License:Apache License

public MockFsDataOutputStream(FSDataOutputStream wrapMe, int numberOfClosesRequired) throws IOException {
    super(wrapMe.getWrappedStream(), null);

    this.numberOfClosesRequired = numberOfClosesRequired;

}

From source file:org.apache.flume.sink.hdfs.MockFsDataOutputStreamCloseRetryWrapper.java

License:Apache License

public MockFsDataOutputStreamCloseRetryWrapper(FSDataOutputStream wrapMe, int numberOfClosesRequired,
        boolean throwExceptionsOfFailedClose) throws IOException {
    super(wrapMe.getWrappedStream(), null);

    this.numberOfClosesRequired = numberOfClosesRequired;
    this.throwExceptionsOfFailedClose = throwExceptionsOfFailedClose;

}