Example usage for org.apache.hadoop.fs CreateFlag APPEND

List of usage examples for org.apache.hadoop.fs CreateFlag APPEND

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CreateFlag APPEND.

Prototype

CreateFlag APPEND

To view the source code for org.apache.hadoop.fs CreateFlag APPEND.

Click Source Link

Document

Append to a file.

Usage

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Append to an existing file if {@link CreateFlag#APPEND} is present
 *///from  w  w w. ja  v a  2 s. c  o m
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag, int buffersize,
        Progressable progress) throws IOException {
    if (flag.contains(CreateFlag.APPEND)) {
        HdfsFileStatus stat = getFileInfo(src);
        if (stat == null) { // No file to append to
            // New file needs to be created if create option is present
            if (!flag.contains(CreateFlag.CREATE)) {
                throw new FileNotFoundException(
                        "failed to append to non-existent file " + src + " on client " + clientName);
            }
            return null;
        }
        return callAppend(src, buffersize, flag, progress, null);
    }
    return null;
}

From source file:com.mellanox.r4h.DistributedFileSystem.java

License:Apache License

@Override
public FSDataOutputStream append(Path f, final int bufferSize, final Progressable progress) throws IOException {
    return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress);
}

From source file:com.mellanox.r4h.TestWriteRead.java

License:Apache License

/**
 * Common routine to do position read while open the file for write. 
 * After each iteration of write, do a read of the file from begin to end. 
 * Return 0 on success, else number of failure.
 *//*w w w.ja  v a2s  .c  om*/
private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition)
        throws IOException {

    int countOfFailures = 0;
    long byteVisibleToRead = 0;
    FSDataOutputStream out = null;

    byte[] outBuffer = new byte[BUFFER_SIZE];
    byte[] inBuffer = new byte[BUFFER_SIZE];

    for (int i = 0; i < BUFFER_SIZE; i++) {
        outBuffer[i] = (byte) (i & 0x00ff);
    }

    try {
        Path path = getFullyQualifiedPath(fname);
        long fileLengthBeforeOpen = 0;

        if (ifExists(path)) {
            if (truncateOption) {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE))
                        : mfs.create(path, truncateOption);
                LOG.info("File already exists. File open with Truncate mode: " + path);
            } else {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path);
                fileLengthBeforeOpen = getFileLengthFromNN(path);
                LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: "
                        + path);
            }
        } else {
            out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path);
        }

        long totalByteWritten = fileLengthBeforeOpen;
        long totalByteVisible = fileLengthBeforeOpen;
        long totalByteWrittenButNotVisible = 0;

        boolean toFlush;
        for (int i = 0; i < loopN; i++) {
            toFlush = (i % 2) == 0;

            writeData(out, outBuffer, chunkSize);

            totalByteWritten += chunkSize;

            if (toFlush) {
                out.hflush();
                totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
                totalByteWrittenButNotVisible = 0;
            } else {
                totalByteWrittenButNotVisible += chunkSize;
            }

            if (verboseOption) {
                LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten
                        + ". TotalByteVisible = " + totalByteVisible + " to file " + fname);
            }
            byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

            String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                    + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;

            if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
                readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]";
            } else {
                countOfFailures++;
                readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
                if (abortTestOnFailure) {
                    throw new IOException(readmsg);
                }
            }
            LOG.info(readmsg);
        }

        // test the automatic flush after close
        writeData(out, outBuffer, chunkSize);
        totalByteWritten += chunkSize;
        totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
        totalByteWrittenButNotVisible += 0;

        out.close();

        byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);

        String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
        String readmsg;

        if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
            readmsg = "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]";
        } else {
            countOfFailures++;
            readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]";
            LOG.info(readmsg);
            if (abortTestOnFailure)
                throw new IOException(readmsg);
        }

        // now check if NN got the same length 
        long lenFromFc = getFileLengthFromNN(path);
        if (lenFromFc != byteVisibleToRead) {
            readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]";
            throw new IOException(readmsg);
        }
    } catch (IOException e) {
        throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. "
                + "Total Byte Read so far = " + byteVisibleToRead, e);
    } finally {
        if (out != null)
            out.close();
    }
    return -countOfFailures;
}

From source file:com.quantcast.qfs.hadoop.Qfs.java

License:Apache License

@Override
public FSDataOutputStream createInternal(Path path, EnumSet<CreateFlag> createFlag,
        FsPermission absolutePermission, int bufferSize, short replication, long blockSize,
        Progressable progress, ChecksumOpt checksumOpt, boolean createParent) throws IOException {
    CreateFlag.validate(createFlag);/*from   w  ww  .  jav a 2s .  c  o m*/
    checkPath(path);
    if (createParent) {
        mkdir(path.getParent(), absolutePermission, createParent);
    }
    return qfsImpl.create(getUriPath(path), replication, bufferSize, createFlag.contains(CreateFlag.OVERWRITE),
            absolutePermission.toShort(), createFlag.contains(CreateFlag.APPEND));
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override// www .  j av a2  s.c o m
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize,
        short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt,
        boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
        IgfsPath path = convert(f);
        IgfsMode mode = modeRslvr.resolveMode(path);

        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path="
                    + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');

        if (mode == PROXY) {
            FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize, replication,
                    blockSize, progress, checksumOpt, createParent);

            if (clientLog.isLogEnabled()) {
                long logId = IgfsLogger.nextId();

                if (append)
                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
                else
                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

                return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
            } else
                return os;
        } else {
            Map<String, String> permMap = F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES,
                    Boolean.toString(preferLocFileWrites));

            // Create stream and close it in the 'finally' section if any sequential operation failed.
            HadoopIgfsStreamDelegate stream;

            long logId = -1;

            if (append) {
                stream = rmtClient.append(path, create, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logAppend(logId, path, mode, bufSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
            } else {
                stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
            }

            assert stream != null;

            HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);

            bufSize = Math.max(64 * 1024, bufSize);

            out = new BufferedOutputStream(igfsOut, bufSize);

            FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

            // Mark stream created successfully.
            out = null;

            return res;
        }
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);

        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override//from   w  w  w.ja  v  a 2s  . co  m
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize,
        short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt,
        boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
        IgfsPath path = convert(f);
        IgfsMode mode = modeRslvr.resolveMode(path);

        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path="
                    + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');

        if (mode == PROXY) {
            FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize, replication,
                    blockSize, progress, checksumOpt, createParent);

            if (clientLog.isLogEnabled()) {
                long logId = IgfsLogger.nextId();

                if (append)
                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
                else
                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

                return new FSDataOutputStream(new IgfsHadoopProxyOutputStream(os, clientLog, logId));
            } else
                return os;
        } else {
            Map<String, String> permMap = F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES,
                    Boolean.toString(preferLocFileWrites));

            // Create stream and close it in the 'finally' section if any sequential operation failed.
            IgfsHadoopStreamDelegate stream;

            long logId = -1;

            if (append) {
                stream = rmtClient.append(path, create, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logAppend(logId, path, mode, bufSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
            } else {
                stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
            }

            assert stream != null;

            IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, logId);

            bufSize = Math.max(64 * 1024, bufSize);

            out = new BufferedOutputStream(igfsOut, bufSize);

            FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

            // Mark stream created successfully.
            out = null;

            return res;
        }
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);

        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testAppendIfPathPointsToDirectory() throws Exception {
    final Path fsHome = new Path(primaryFsUri);
    final Path dir = new Path(fsHome, "/tmp");
    Path file = new Path(dir, "my");

    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();//from   w w w . j a v  a2s  . c  o  m

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            return fs.create(new Path(fsHome, dir), EnumSet.of(CreateFlag.APPEND),
                    Options.CreateOpts.perms(FsPermission.getDefault()));
        }
    }, IOException.class, null);
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testAppendIfFileIsAlreadyBeingOpenedToWrite() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    final Path file = new Path(fsHome, "someFile");

    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();//from  ww  w. j a v a2s . c om

    FSDataOutputStream appendOs = fs.create(file, EnumSet.of(CreateFlag.APPEND),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            return fs.create(file, EnumSet.of(CreateFlag.APPEND),
                    Options.CreateOpts.perms(FsPermission.getDefault()));
        }
    }, IOException.class, null);

    appendOs.close();
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testAppend() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "someFile");

    int cnt = 1024;

    FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    for (int i = 0; i < cnt; i++)
        out.writeLong(i);/*  w  ww  .j  av  a2  s  .  c om*/

    out.close();

    out = fs.create(file, EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault()));

    for (int i = cnt; i < cnt * 2; i++)
        out.writeLong(i);

    out.close();

    FSDataInputStream in = fs.open(file, 1024);

    for (int i = 0; i < cnt * 2; i++)
        assertEquals(i, in.readLong());

    in.close();
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testRenameIfSrcPathIsAlreadyBeingOpenedToWrite() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path srcFile = new Path(fsHome, "srcFile");
    Path dstFile = new Path(fsHome, "dstFile");

    FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();/*from   www  . ja  va 2 s .c  o  m*/

    os = fs.create(srcFile, EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault()));

    fs.rename(srcFile, dstFile);

    assertPathExists(fs, dstFile);

    String testStr = "Test";

    try {
        os.writeBytes(testStr);
    } finally {
        os.close();
    }

    try (FSDataInputStream is = fs.open(dstFile)) {
        byte[] buf = new byte[testStr.getBytes().length];

        is.readFully(buf);

        assertEquals(testStr, new String(buf));
    }
}