Example usage for org.apache.hadoop.fs FSDataOutputStream hflush

List of usage examples for org.apache.hadoop.fs FSDataOutputStream hflush

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataOutputStream hflush.

Prototype

@Override 
    public void hflush() throws IOException 

Source Link

Usage

From source file:org.apache.solr.cloud.hdfs.HdfsThreadLeakTest.java

License:Apache License

@Test
public void testBasic() throws IOException {
    String uri = HdfsTestUtil.getURI(dfsCluster);
    Path path = new Path(uri);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs = FileSystem.get(path.toUri(), conf);
    Path testFile = new Path(uri.toString() + "/testfile");
    FSDataOutputStream out = fs.create(testFile);

    out.write(5);/*from w w w.  ja  va  2 s.  c om*/
    out.hflush();
    out.close();

    ((DistributedFileSystem) fs).recoverLease(testFile);

    fs.close();
}

From source file:org.apache.tajo.master.ha.HAServiceHDFSImpl.java

License:Apache License

private void createMasterFile(boolean isActive) throws IOException {
    String fileName = masterName.replaceAll(":", "_");
    Path path = null;// www  . j a  va  2s  .  c om

    if (isActive) {
        path = new Path(activePath, fileName);
    } else {
        path = new Path(backupPath, fileName);
    }

    StringBuilder sb = new StringBuilder();
    InetSocketAddress address = getHostAddress(HAConstants.MASTER_CLIENT_RPC_ADDRESS);
    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");

    address = getHostAddress(HAConstants.RESOURCE_TRACKER_RPC_ADDRESS);
    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");

    address = getHostAddress(HAConstants.CATALOG_ADDRESS);
    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort()).append("_");

    address = getHostAddress(HAConstants.MASTER_INFO_ADDRESS);
    sb.append(address.getAddress().getHostAddress()).append(":").append(address.getPort());

    FSDataOutputStream out = fs.create(path);

    try {
        out.writeUTF(sb.toString());
        out.hflush();
        out.close();
    } catch (FileAlreadyExistsException e) {
        createMasterFile(false);
    }

    if (isActive) {
        isActiveStatus = true;
    } else {
        isActiveStatus = false;
    }

    startPingChecker();
}

From source file:org.apache.tez.dag.history.recovery.RecoveryService.java

License:Apache License

private void doFlush(FSDataOutputStream outputStream, long currentTime) throws IOException {
    outputStream.hflush();

    if (LOG.isDebugEnabled()) {
        LOG.debug("Flushing output stream" + ", lastTimeSinceFLush=" + lastFlushTime + ", timeSinceLastFlush="
                + (currentTime - lastFlushTime) + ", unflushedEventsCount=" + unflushedEventsCount
                + ", maxUnflushedEvents=" + maxUnflushedEvents);
    }//from   w w w .  ja  v  a 2s. com

    unflushedEventsCount = 0;
    lastFlushTime = currentTime;
}

From source file:org.kitesdk.data.spi.filesystem.AvroAppender.java

License:Apache License

@Override
public void flush() throws IOException {
    // Avro sync forces the end of the current block so the data is recoverable
    dataFileWriter.flush();/*from  w  w  w.  java2 s .  co m*/
    Hadoop.FSDataOutputStream.hflush.invoke(out);
}

From source file:org.kitesdk.data.spi.filesystem.CSVAppender.java

License:Apache License

@Override
public void flush() throws IOException {
    writer.flush();
    Hadoop.FSDataOutputStream.hflush.invoke(outgoing);
}