Example usage for org.apache.hadoop.fs FSDataOutputStream hflush

List of usage examples for org.apache.hadoop.fs FSDataOutputStream hflush

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataOutputStream hflush.

Prototype

@Override 
    public void hflush() throws IOException 

Source Link

Usage

From source file:edu.iu.wdamds.DataGen.java

License:Apache License

/**
 * Partition file generation is very important.
 * We need to verify it later. The format in
 * each partition file is: #ROWS Row ID ...
 * #FILES Distance File Weight File V File
 * //ww  w .j a  va 2  s.c om
 * @param inputFolder
 * @param inputPrefix
 * @param weightPrefix
 * @param vPrefix
 * @param workDirName
 * @param fs
 * @param numMapTasks
 * @return
 */
public static boolean generatePartitionFiles(String inputFolder, String inputPrefix, String weightPrefix,
        String vPrefix, Path dataDirPath, FileSystem fs, int numMapTasks) {
    // Assume inputFolder is a place where all
    // processes can access
    // (on some shared file system such as NFS)
    File inputDir = new File(inputFolder);
    if (!inputDir.exists() || !inputDir.isDirectory()) {
        return false;
    }
    String inputDirPath = inputDir.getAbsolutePath();
    File[] files = inputDir.listFiles();
    // The number of files should be even
    // One data file matches with one weight file
    // and one v file
    if (files.length % 3 != 0) {
        return false;
    }
    int numFileSets = files.length / 3;
    System.out.println("Number of file sets. " + numFileSets);
    int numSetPerPartition = numFileSets / numMapTasks;
    System.out.println("Number of file set per partition. " + numSetPerPartition);
    int restPartitions = numFileSets % numMapTasks;
    int partitionID = 0;
    int count = 0;
    int countLimit = numSetPerPartition;
    LinkedList<Integer> rowIDs = new LinkedList<>();
    LinkedList<String> filePaths = new LinkedList<>();
    for (int i = 0; i < files.length; i++) {
        String fileName = files[i].getName();
        System.out.println("File_" + i + " " + fileName);
        if (fileName.startsWith(inputPrefix)) {
            // Find a set of files and create rowID
            int rowID = Integer.parseInt(fileName.replaceFirst(inputPrefix, ""));
            // String inputFileName = fileName;
            String weightFileName = fileName.replaceFirst(inputPrefix, weightPrefix);
            String vFileName = fileName.replaceFirst(inputPrefix, vPrefix);
            rowIDs.add(rowID);
            filePaths.add(inputDirPath + "/" + fileName);
            filePaths.add(inputDirPath + "/" + weightFileName);
            filePaths.add(inputDirPath + "/" + vFileName);
            count++;
            // The base count limit is
            // numSetPerPartition,
            // I adjust it if there is still
            // partitions left.
            if (count == numSetPerPartition) {
                if (restPartitions > 0) {
                    countLimit = numSetPerPartition + 1;
                    restPartitions--;
                } else {
                    countLimit = numSetPerPartition;
                }
            }
            if (count == countLimit) {
                // Create a partition file
                Path partitionFilePath = new Path(dataDirPath, "partition_" + partitionID);
                try {
                    FSDataOutputStream out = fs.create(partitionFilePath, true);
                    BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out));
                    bw.write(MDSConstants.ROWS_TAG);
                    bw.newLine();
                    for (int j = 0; j < rowIDs.size(); j++) {
                        bw.write(rowIDs.get(j).intValue() + "");
                        bw.newLine();
                    }
                    bw.write(MDSConstants.FILES_TAG);
                    bw.newLine();
                    for (int j = 0; j < filePaths.size(); j++) {
                        bw.write(filePaths.get(j));
                        bw.newLine();
                    }
                    bw.flush();
                    out.hflush();
                    out.hsync();
                    bw.close();
                } catch (IOException e) {
                    e.printStackTrace();
                    return false;
                }
                // Reset row ID holder
                rowIDs.clear();
                // Reset file list holder
                filePaths.clear();
                // Reset count
                count = 0;
                // Reset count limit
                countLimit = numSetPerPartition;
                // Increase partition ID
                partitionID++;
            }
        }
    }
    return true;
}

From source file:io.hops.hopsworks.common.security.CertificateMaterializer.java

License:Open Source License

private void writeToHDFS(DistributedFileSystemOps dfso, Path path, byte[] data) throws IOException {
    if (dfso == null) {
        throw new IOException("DistributedFilesystemOps is null");
    }/*from  www . j  a  v a 2 s  .c om*/
    FSDataOutputStream fsStream = dfso.getFilesystem().create(path);
    try {
        fsStream.write(data);
        fsStream.hflush();
    } finally {
        if (fsStream != null) {
            fsStream.close();
        }
    }
}

From source file:net.arp7.HdfsPerfTest.WriteFile.java

License:Apache License

/**
 * Write a single file to HDFS.//  w ww. jav a  2 s. co m
 *
 * @param file
 * @param fs
 * @param data
 * @param stats object to accumulate write stats.
 * @throws IOException
 * @throws InterruptedException
 */
private static void writeOneFile(final Path file, final FileSystem fs, final byte[] data,
        final FileIoStats stats) throws IOException, InterruptedException {

    final long startTime = System.nanoTime();
    final EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, OVERWRITE);
    if (params.isLazyPersist()) {
        createFlags.add(LAZY_PERSIST);
    }

    LOG.info("Writing file " + file.toString());
    final FSDataOutputStream os = fs.create(file, FsPermission.getFileDefault(), createFlags,
            Constants.BUFFER_SIZE, params.getReplication(), params.getBlockSize(), null);
    final long createEndTime = System.nanoTime();
    stats.addCreateTime(createEndTime - startTime);
    final boolean isThrottled = params.maxWriteBps() > 0;
    final long expectedIoTimeNs = (isThrottled ? (((long) data.length * 1_000_000_000) / params.maxWriteBps())
            : 0);

    try {
        long lastLoggedPercent = 0;
        long writeStartTime = System.nanoTime();
        for (long j = 0; j < params.getFileSize() / params.getIoSize(); ++j) {
            final long ioStartTimeNs = (isThrottled ? System.nanoTime() : 0);
            os.write(data, 0, data.length);

            if (params.isHsync()) {
                os.hsync();
            } else if (params.isHflush()) {
                os.hflush();
            }

            final long ioEndTimeNs = (isThrottled ? System.nanoTime() : 0);
            Utils.enforceThrottle(ioEndTimeNs - ioStartTimeNs, expectedIoTimeNs);

            if (LOG.isDebugEnabled()) {
                long percentWritten = (j * params.getIoSize() * 100) / params.getFileSize();
                if (percentWritten > lastLoggedPercent) {
                    LOG.debug("  >> Wrote " + j * params.getIoSize() + "/" + params.getFileSize() + " ["
                            + percentWritten + "%]");
                    lastLoggedPercent = percentWritten;
                }
            }
        }

        final long writeEndTime = System.nanoTime();
        stats.addWriteTime(writeEndTime - writeStartTime);
        stats.incrFilesWritten();
        stats.incrBytesWritten(params.getFileSize());
    } finally {
        final long closeStartTime = System.nanoTime();
        os.close();
        final long closeEndTime = System.nanoTime();
        stats.addCloseTime(closeEndTime - closeStartTime);
    }
}

From source file:org.apache.giraffa.TestLeaseManagement.java

License:Apache License

@Test
public void testLeaseRecovery() throws IOException {
    String src = "/testLeaseRecovery";
    Path path = new Path(src);

    HRegionServer server = UTIL.getHBaseCluster().getRegionServer(0);
    LeaseManager leaseManager = LeaseManager
            .originateSharedLeaseManager(server.getRpcServer().getListenerAddress().toString());

    FSDataOutputStream outputStream = grfs.create(path);
    String clientName = grfs.grfaClient.getClientName();
    outputStream.write(1);//w ww  .j  a v  a2s  . c om
    outputStream.write(2);
    outputStream.hflush();
    try {
        leaseManager.setHardLimit(10L);
        INodeFile iNode = null;
        for (int i = 0; i < 100; i++) {
            leaseManager.triggerLeaseRecovery();
            try {
                Thread.sleep(100L);
            } catch (InterruptedException ignored) {
            }
            iNode = INodeFile.valueOf(nodeManager.getINode(src));
            if (iNode.getFileState() == FileState.CLOSED)
                break;
        }
        assertThat(iNode.getFileState(), is(FileState.CLOSED));
        assertThat(iNode.getLen(), is(2L));
        assertThat(iNode.getLease(), is(nullValue()));
        assertThat(leaseManager.getLeases(clientName), is(nullValue()));
    } finally {
        leaseManager.setHardLimit(HdfsConstants.LEASE_HARDLIMIT_PERIOD);
        IOUtils.closeStream(outputStream);
    }
}

From source file:org.apache.giraffa.TestLeaseManagement.java

License:Apache License

@Test
public void testClientLeaseRecovery() throws IOException {
    String src = "/testLeaseRecovery";
    Path path = new Path(src);

    HRegionServer server = UTIL.getHBaseCluster().getRegionServer(0);
    LeaseManager leaseManager = LeaseManager
            .originateSharedLeaseManager(server.getRpcServer().getListenerAddress().toString());

    FSDataOutputStream outputStream = grfs.create(path);
    String clientName = grfs.grfaClient.getClientName();
    outputStream.write(1);// w w w .  j  a v  a2s .c  o  m
    outputStream.write(2);
    outputStream.hflush();
    try {
        boolean recovered = grfs.grfaClient.getNamespaceService().recoverLease(src,
                grfs.grfaClient.getClientName());
        assertThat(recovered, is(true));
        INodeFile iNode = INodeFile.valueOf(nodeManager.getINode(src));
        assertThat(iNode.getFileState(), is(FileState.CLOSED));
        assertThat(iNode.getLen(), is(2L));
        assertThat(iNode.getLease(), is(nullValue()));
        assertThat(leaseManager.getLeases(clientName), is(nullValue()));
    } finally {
        IOUtils.closeStream(outputStream);
    }
}

From source file:org.apache.ranger.audit.provider.hdfs.HdfsLogDestination.java

License:Apache License

@Override
public boolean flush() {
    mLogger.debug("==> HdfsLogDestination.flush()");

    boolean ret = false;

    OutputStreamWriter writer = mWriter;

    if (writer != null) {
        try {//from  w w w .j  a  va2 s  . c  o m
            writer.flush();

            ret = true;
        } catch (IOException excp) {
            logException("HdfsLogDestination: flush() failed", excp);
        }
    }

    FSDataOutputStream ostream = mFsDataOutStream;

    if (ostream != null) {
        try {
            ostream.hflush();

            ret = true;
        } catch (IOException excp) {
            logException("HdfsLogDestination: hflush() failed", excp);
        }
    }

    if (ret) {
        mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L);
    }

    mLogger.debug("<== HdfsLogDestination.flush()");

    return ret;
}

From source file:org.apache.sentry.core.common.utils.PolicyFiles.java

License:Apache License

public static void copyToDir(FileSystem fs, Path dest, String... resources)
        throws FileNotFoundException, IOException {
    for (String resource : resources) {
        InputStream in = Resources.getResource(resource).openStream();
        FSDataOutputStream out = fs.create(new Path(dest, resource));
        long bytes = ByteStreams.copy(in, out);
        in.close();/*from w  ww  .j  a  v  a 2s . c om*/
        out.hflush();
        out.close();
        LOGGER.debug("Copying " + resource + " to " + dest + ", bytes " + bytes);
    }
}

From source file:org.apache.sentry.provider.file.PolicyFiles.java

License:Apache License

public static void copyToDir(FileSystem fs, Path dest, String... resources)
        throws FileNotFoundException, IOException {
    for (String resource : resources) {
        InputStream in = Resources.getResource(resource).openStream();
        FSDataOutputStream out = fs.create(new Path(dest, resource));
        long bytes = ByteStreams.copy(in, out);
        in.close();/*from  w  ww.ja v a  2s  .co  m*/
        out.hflush();
        out.close();
        LOGGER.info("Copying " + resource + " to " + dest + ", bytes " + bytes);
    }
}

From source file:org.apache.solr.cloud.hdfs.HdfsRecoverLeaseTest.java

License:Apache License

@Test
public void testBasic() throws IOException {
    long startRecoverLeaseSuccessCount = FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get();

    URI uri = dfsCluster.getURI();
    Path path = new Path(uri);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs1 = FileSystem.get(path.toUri(), conf);
    Path testFile = new Path(uri.toString() + "/testfile");
    FSDataOutputStream out = fs1.create(testFile);

    out.write(5);//from  w  ww.java 2 s.  c  om
    out.hflush();
    out.close();

    FSHDFSUtils.recoverFileLease(fs1, testFile, conf, new CallerInfo() {

        @Override
        public boolean isCallerClosed() {
            return false;
        }
    });
    assertEquals(0, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount);

    fs1.close();

    FileSystem fs2 = FileSystem.get(path.toUri(), conf);
    Path testFile2 = new Path(uri.toString() + "/testfile2");
    FSDataOutputStream out2 = fs2.create(testFile2);

    if (random().nextBoolean()) {
        int cnt = random().nextInt(100);
        for (int i = 0; i < cnt; i++) {
            out2.write(random().nextInt(20000));
        }
        out2.hflush();
    }

    // closing the fs will close the file it seems
    // fs2.close();

    FileSystem fs3 = FileSystem.get(path.toUri(), conf);

    FSHDFSUtils.recoverFileLease(fs3, testFile2, conf, new CallerInfo() {

        @Override
        public boolean isCallerClosed() {
            return false;
        }
    });
    assertEquals(1, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount);

    fs3.close();
    fs2.close();
}

From source file:org.apache.solr.cloud.hdfs.HdfsRecoverLeaseTest.java

License:Apache License

@Test
public void testMultiThreaded() throws Exception {
    long startRecoverLeaseSuccessCount = FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get();

    final URI uri = dfsCluster.getURI();
    final Path path = new Path(uri);
    final Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);

    // n threads create files
    class WriterThread extends Thread {
        private FileSystem fs;
        private int id;

        public WriterThread(int id) {
            this.id = id;
            try {
                fs = FileSystem.get(path.toUri(), conf);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }//www.  ja v a 2 s  . c  o m
        }

        @Override
        public void run() {
            Path testFile = new Path(uri.toString() + "/file-" + id);
            FSDataOutputStream out;
            try {
                out = fs.create(testFile);

                if (random().nextBoolean()) {
                    int cnt = random().nextInt(100);
                    for (int i = 0; i < cnt; i++) {
                        out.write(random().nextInt(20000));
                    }
                    out.hflush();
                }
            } catch (IOException e) {
                throw new RuntimeException();
            }
        }

        public void close() throws IOException {
            fs.close();
        }

        public int getFileId() {
            return id;
        }
    }

    class RecoverThread extends Thread {
        private FileSystem fs;
        private int id;

        public RecoverThread(int id) {
            this.id = id;
            try {
                fs = FileSystem.get(path.toUri(), conf);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }

        @Override
        public void run() {
            Path testFile = new Path(uri.toString() + "/file-" + id);
            try {
                FSHDFSUtils.recoverFileLease(fs, testFile, conf, new CallerInfo() {

                    @Override
                    public boolean isCallerClosed() {
                        return false;
                    }
                });
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }

        public void close() throws IOException {
            fs.close();
        }
    }

    Set<WriterThread> writerThreads = new HashSet<WriterThread>();
    Set<RecoverThread> recoverThreads = new HashSet<RecoverThread>();

    int threadCount = 3;
    for (int i = 0; i < threadCount; i++) {
        WriterThread wt = new WriterThread(i);
        writerThreads.add(wt);
        wt.run();
    }

    for (WriterThread wt : writerThreads) {
        wt.join();
    }

    Thread.sleep(2000);

    for (WriterThread wt : writerThreads) {
        RecoverThread rt = new RecoverThread(wt.getFileId());
        recoverThreads.add(rt);
        rt.run();
    }

    for (WriterThread wt : writerThreads) {
        wt.close();
    }

    for (RecoverThread rt : recoverThreads) {
        rt.close();
    }

    assertEquals(threadCount, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount);

}