Example usage for org.apache.hadoop.fs FSDataOutputStream hflush

List of usage examples for org.apache.hadoop.fs FSDataOutputStream hflush

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataOutputStream hflush.

Prototype

@Override 
    public void hflush() throws IOException 

Source Link

Usage

From source file:alluxio.client.hadoop.FileSystemRenameIntegrationTest.java

License:Apache License

@Test
@Ignore/*from www.  j ava 2 s  .  c  o  m*/
// TODO(jiri): The test logic below does not work in the presence of transparent naming.
// The current implementation renames files on UFS if they are marked as persisted. They are
// marked as persisted when they are closed. Thus, if the Alluxio path of the file being
// written to changes before it is closed, renaming the temporary underlying file to its final
// destination fails.
public void basicRenameTest7() throws Exception {
    // Rename /dirA to /dirB, /dirA/fileA should become /dirB/fileA even if it was not closed

    Path dirA = new Path("/dirA");
    Path dirB = new Path("/dirB");
    Path fileA = new Path("/dirA/fileA");
    Path finalDst = new Path("/dirB/fileA");

    sTFS.mkdirs(dirA);
    FSDataOutputStream o = sTFS.create(fileA);
    o.writeBytes("Test Bytes");
    // Due to Hadoop 1 support we stick with the deprecated version. If we drop support for it
    // FSDataOutputStream.hflush will be the new one.
    //#ifdef HADOOP1
    o.sync();
    //#else
    o.hflush();
    //#endif

    Assert.assertTrue(sTFS.rename(dirA, dirB));

    Assert.assertFalse(sTFS.exists(dirA));
    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertTrue(sTFS.exists(dirB));
    Assert.assertTrue(sTFS.exists(finalDst));

    o.close();

    Assert.assertFalse(sTFS.exists(dirA));
    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertTrue(sTFS.exists(dirB));
    Assert.assertTrue(sTFS.exists(finalDst));
    cleanup(sTFS);
}

From source file:co.cask.cdap.common.logging.SyncTest.java

License:Apache License

@Test
@Ignore//from   w  w w.  java 2  s  . c om
public void testSync() throws IOException {
    FileSystem fs = FileSystem.get(config);
    // create a file and write n bytes, then sync
    Path path = new Path("/myfile");
    FSDataOutputStream out = fs.create(path, false, 4096, (short) 2, 4096L);
    int numBytes = 5000;
    for (int i = 0; i < numBytes; i++) {
        out.write((byte) i);
    }
    out.hflush();
    // verify the file is there
    Assert.assertTrue(fs.exists(path));
    // do not verify the length of the file, hflush() does not update that
    //Assert.assertEquals(numBytes, fs.getFileStatus(path).getLen());
    // read back and verify all bytes
    FSDataInputStream in = fs.open(path);
    byte[] buffer = new byte[numBytes];
    in.readFully(buffer);
    for (int i = 0; i < numBytes; i++) {
        Assert.assertEquals((byte) i, buffer[i]);
    }
    in.close();
    // now close the writer
    out.close();
}

From source file:com.curiousby.baoyou.cn.hadoop.HDFSUtils.java

License:Open Source License

/**
 * hdfs /*  w  ww .  j  a v a  2 s  . c  o m*/
 * @param hdfs
 * @throws IOException
 */
public void writeAppendFile(String dst, String content) throws IOException {
    FSDataOutputStream out = fileSystem.append(new Path(dst));
    out.write(content.getBytes("UTF-8"));
    out.hflush();
    out.close();

}

From source file:com.datatorrent.lib.io.fs.AbstractFileOutputOperator.java

License:Open Source License

/**
 * This method is used to force buffers to be flushed at the end of the window.
 * flush must be used on a local file system, so an if statement checks to
 * make sure that hflush is used on local file systems.
 * @param fsOutput/*from   w  w w  . ja  v  a 2s . c o m*/
 * @throws IOException
 */
protected void flush(FSDataOutputStream fsOutput) throws IOException {
    if (fs instanceof LocalFileSystem || fs instanceof RawLocalFileSystem) {
        fsOutput.flush();
    } else {
        fsOutput.hflush();
    }
}

From source file:com.datatorrent.lib.io.fs.AbstractFileOutputOperator.java

License:Open Source License

@Override
public void endWindow() {
    for (String fileName : streamsCache.asMap().keySet()) {
        try {/*from   w ww  . j  av  a  2 s  . c o m*/
            FSDataOutputStream fsOutput = streamsCache.get(fileName);
            fsOutput.hflush();
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    long currentTimeStamp = System.currentTimeMillis();
    totalTime += currentTimeStamp - lastTimeStamp;
    lastTimeStamp = currentTimeStamp;

    fileCounters.getCounter(Counters.TOTAL_TIME_ELAPSED).setValue(totalTime);
    fileCounters.getCounter(Counters.TOTAL_BYTES_WRITTEN).setValue(totalBytesWritten);
    context.setCounters(fileCounters);
}

From source file:com.datatorrent.stram.FSRecoveryHandler.java

License:Apache License

@Override
public DataOutputStream rotateLog() throws IOException {

    if (fs.exists(logBackupPath)) {
        // log backup is purged on snapshot/restore
        throw new AssertionError("Snapshot state prior to log rotation: " + logBackupPath);
    }// w w w  . ja  va 2s .c o  m

    if (fs.exists(logPath)) {
        LOG.debug("Creating log backup {}", logBackupPath);
        if (!fs.rename(logPath, logBackupPath)) {
            throw new IOException("Failed to rotate log: " + logPath);
        }
    }

    LOG.info("Creating {}", logPath);
    final FSDataOutputStream fsOutputStream;
    String scheme = null;
    try {
        scheme = fs.getScheme();
    } catch (UnsupportedOperationException e) {
        LOG.warn("{} doesn't implement getScheme() method", fs.getClass().getName());
    }
    if ("file".equals(scheme)) {
        // local FS does not support hflush and does not flush native stream
        FSUtil.mkdirs(fs, logPath.getParent());
        fsOutputStream = new FSDataOutputStream(
                new FileOutputStream(Path.getPathWithoutSchemeAndAuthority(logPath).toString()), null);
    } else {
        fsOutputStream = fs.create(logPath);
    }

    DataOutputStream osWrapper = new DataOutputStream(fsOutputStream) {
        @Override
        public void flush() throws IOException {
            super.flush();
            fsOutputStream.hflush();
        }

        @Override
        public void close() throws IOException {
            LOG.debug("Closing {}", logPath);
            super.close();
        }
    };
    return osWrapper;
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/** This creates a slow writer and check to see 
 * if pipeline heartbeats work fine//ww w  . j  a va 2  s  .  c  o  m
 */
@Test
public void testPipelineHeartbeat() throws Exception {
    final int DATANODE_NUM = 2;
    final int fileLen = 6;
    Configuration conf = new HdfsConfiguration();
    final int timeout = 2000;
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);

    final Path p = new Path("/pipelineHeartbeat/foo");
    System.out.println("p=" + p);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    try {
        DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();

        byte[] fileContents = AppendTestUtil.initBuffer(fileLen);

        // create a new file.
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);

        stm.write(fileContents, 0, 1);
        Thread.sleep(timeout);
        stm.hflush();
        System.out.println("Wrote 1 byte and hflush " + p);

        // write another byte
        Thread.sleep(timeout);
        stm.write(fileContents, 1, 1);
        stm.hflush();

        stm.write(fileContents, 2, 1);
        Thread.sleep(timeout);
        stm.hflush();

        stm.write(fileContents, 3, 1);
        Thread.sleep(timeout);
        stm.write(fileContents, 4, 1);
        stm.hflush();

        stm.write(fileContents, 5, 1);
        Thread.sleep(timeout);
        stm.close();

        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file");
    } finally {
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

@Test
public void testHFlushInterrupted() throws Exception {
    final int DATANODE_NUM = 2;
    final int fileLen = 6;
    byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
    Configuration conf = new HdfsConfiguration();
    final Path p = new Path("/hflush-interrupted");

    System.out.println("p=" + p);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    try {// ww w .j  ava 2  s  .  co  m
        DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();

        // create a new file.
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);

        stm.write(fileContents, 0, 2);
        Thread.currentThread().interrupt();
        try {
            stm.hflush();
            // If we made it past the hflush(), then that means that the ack made it back
            // from the pipeline before we got to the wait() call. In that case we should
            // still have interrupted status.
            assertTrue(Thread.currentThread().interrupted());
        } catch (InterruptedIOException ie) {
            System.out.println("Got expected exception during flush");
        }
        assertFalse(Thread.currentThread().interrupted());

        // Try again to flush should succeed since we no longer have interrupt status
        stm.hflush();

        // Write some more data and flush
        stm.write(fileContents, 2, 2);
        stm.hflush();

        // Write some data and close while interrupted

        stm.write(fileContents, 4, 2);
        Thread.currentThread().interrupt();
        try {
            stm.close();
            // If we made it past the close(), then that means that the ack made it back
            // from the pipeline before we got to the wait() call. In that case we should
            // still have interrupted status.
            assertTrue(Thread.currentThread().interrupted());
        } catch (InterruptedIOException ioe) {
            System.out.println("Got expected exception during close");
            // If we got the exception, we shouldn't have interrupted status anymore.
            assertFalse(Thread.currentThread().interrupted());

            // Now do a successful close.
            stm.close();
        }

        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to deal with thread interruptions");
    } finally {
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestWriteRead.java

License:Apache License

/**
 * Common routine to do position read while open the file for write. 
 * After each iteration of write, do a read of the file from begin to end. 
 * Return 0 on success, else number of failure.
 *//*from  ww w  . ja  v  a  2  s.  c  om*/
private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition)
        throws IOException {

    int countOfFailures = 0;
    long byteVisibleToRead = 0;
    FSDataOutputStream out = null;

    byte[] outBuffer = new byte[BUFFER_SIZE];
    byte[] inBuffer = new byte[BUFFER_SIZE];

    for (int i = 0; i < BUFFER_SIZE; i++) {
        outBuffer[i] = (byte) (i & 0x00ff);
    }

    try {
        Path path = getFullyQualifiedPath(fname);
        long fileLengthBeforeOpen = 0;

        if (ifExists(path)) {
            if (truncateOption) {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE))
                        : mfs.create(path, truncateOption);
                LOG.info("File already exists. File open with Truncate mode: " + path);
            } else {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path);
                fileLengthBeforeOpen = getFileLengthFromNN(path);
                LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: "
                        + path);
            }
        } else {
            out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path);
        }

        long totalByteWritten = fileLengthBeforeOpen;
        long totalByteVisible = fileLengthBeforeOpen;
        long totalByteWrittenButNotVisible = 0;

        boolean toFlush;
        for (int i = 0; i < loopN; i++) {
            toFlush = (i % 2) == 0;

            writeData(out, outBuffer, chunkSize);

            totalByteWritten += chunkSize;

            if (toFlush) {
                out.hflush();
                totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
                totalByteWrittenButNotVisible = 0;
            } else {
                totalByteWrittenButNotVisible += chunkSize;
            }

            if (verboseOption) {
                LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten
                        + ". TotalByteVisible = " + totalByteVisible + " to file " + fname);
            }
            byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

            String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                    + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;

            if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
                readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]";
            } else {
                countOfFailures++;
                readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
                if (abortTestOnFailure) {
                    throw new IOException(readmsg);
                }
            }
            LOG.info(readmsg);
        }

        // test the automatic flush after close
        writeData(out, outBuffer, chunkSize);
        totalByteWritten += chunkSize;
        totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
        totalByteWrittenButNotVisible += 0;

        out.close();

        byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

        String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
        String readmsg;

        if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
            readmsg = "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]";
        } else {
            countOfFailures++;
            readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]";
            LOG.info(readmsg);
            if (abortTestOnFailure)
                throw new IOException(readmsg);
        }

        // now check if NN got the same length 
        long lenFromFc = getFileLengthFromNN(path);
        if (lenFromFc != byteVisibleToRead) {
            readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]";
            throw new IOException(readmsg);
        }
    } catch (IOException e) {
        throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. "
                + "Total Byte Read so far = " + byteVisibleToRead, e);
    } finally {
        if (out != null)
            out.close();
    }
    return -countOfFailures;
}

From source file:com.uber.hoodie.common.model.HoodiePartitionMetadata.java

License:Apache License

/**
 * Write the metadata safely into partition atomically.
 *//*w w  w. j a v a 2  s .  c  o  m*/
public void trySave(int taskPartitionId) {
    Path tmpMetaPath = new Path(partitionPath,
            HoodiePartitionMetadata.HOODIE_PARTITION_METAFILE + "_" + taskPartitionId);
    Path metaPath = new Path(partitionPath, HoodiePartitionMetadata.HOODIE_PARTITION_METAFILE);
    boolean metafileExists = false;

    try {
        metafileExists = fs.exists(metaPath);
        if (!metafileExists) {
            // write to temporary file
            FSDataOutputStream os = fs.create(tmpMetaPath, true);
            props.store(os, "partition metadata");
            os.hsync();
            os.hflush();
            os.close();

            // move to actual path
            fs.rename(tmpMetaPath, metaPath);
        }
    } catch (IOException ioe) {
        log.warn("Error trying to save partition metadata (this is okay, as long as "
                + "atleast 1 of these succced), " + partitionPath, ioe);
    } finally {
        if (!metafileExists) {
            try {
                // clean up tmp file, if still lying around
                if (fs.exists(tmpMetaPath)) {
                    fs.delete(tmpMetaPath, false);
                }
            } catch (IOException ioe) {
                log.warn("Error trying to clean up temporary files for " + partitionPath, ioe);
            }
        }
    }
}