Example usage for org.apache.hadoop.fs CreateFlag OVERWRITE

List of usage examples for org.apache.hadoop.fs CreateFlag OVERWRITE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CreateFlag OVERWRITE.

Prototype

CreateFlag OVERWRITE

To view the source code for org.apache.hadoop.fs CreateFlag OVERWRITE.

Click Source Link

Document

Truncate/overwrite a file.

Usage

From source file:org.gridgain.grid.ggfs.hadoop.v2.GridGgfsHadoopFileSystem.java

License:Open Source License

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override/*from   w w  w.ja v a2  s.c  o  m*/
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize,
        short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt,
        boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
        GridGgfsPath path = convert(f);
        GridGgfsMode mode = modeRslvr.resolveMode(path);

        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path="
                    + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');

        if (mode == PROXY) {
            FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize, replication,
                    blockSize, progress, checksumOpt, createParent);

            if (clientLog.isLogEnabled()) {
                long logId = GridGgfsLogger.nextId();

                if (append)
                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
                else
                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

                return new FSDataOutputStream(new GridGgfsHadoopProxyOutputStream(os, clientLog, logId));
            } else
                return os;
        } else {
            Map<String, String> permMap = F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES,
                    Boolean.toString(preferLocFileWrites));

            // Create stream and close it in the 'finally' section if any sequential operation failed.
            GridGgfsHadoopStreamDelegate stream;

            long logId = -1;

            if (append) {
                stream = rmtClient.append(path, create, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = GridGgfsLogger.nextId();

                    clientLog.logAppend(logId, path, mode, bufSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
            } else {
                stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = GridGgfsLogger.nextId();

                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
            }

            assert stream != null;

            GridGgfsHadoopOutputStream ggfsOut = new GridGgfsHadoopOutputStream(stream, LOG, clientLog, logId);

            bufSize = Math.max(64 * 1024, bufSize);

            out = new BufferedOutputStream(ggfsOut, bufSize);

            FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

            // Mark stream created successfully.
            out = null;

            return res;
        }
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);

        leaveBusy();
    }
}

From source file:org.kitesdk.examples.Main.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("Usage: java -jar zips-1.jar <zips.json> <out.sequence>");
        System.exit(1);//  w  w  w  .  j  ava 2s .co  m
    }

    SequenceFileInputFormat in;

    File file = new File(args[0]);
    if (!file.exists() || !file.canRead()) {
        System.err.println("Cannot read " + file);
    }

    Schema schema = ReflectData.get().getSchema(ZipCode.class);
    JSONFileReader<ZipCode> reader = new JSONFileReader<ZipCode>(new FileInputStream(file), schema,
            ZipCode.class);
    reader.initialize();

    FileContext context = FileContext.getLocalFSFileContext();
    SequenceFile.Writer writer = SequenceFile.createWriter(context, new Configuration(), new Path(args[1]),
            NullWritable.class, ZipCode.class, SequenceFile.CompressionType.NONE, null,
            new SequenceFile.Metadata(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));

    for (ZipCode zip : reader) {
        writer.append(NullWritable.get(), zip);
    }

    writer.close();
}