Example usage for org.apache.hadoop.fs CreateFlag CREATE

List of usage examples for org.apache.hadoop.fs CreateFlag CREATE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CreateFlag CREATE.

Prototype

CreateFlag CREATE

To view the source code for org.apache.hadoop.fs CreateFlag CREATE.

Click Source Link

Document

Create a file.

Usage

From source file:com.uber.hoodie.common.file.HoodieAppendLog.java

License:Apache License

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem./*from w  w w.j a  v a 2s  .co  m*/
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer createWriter(FileSystem fs, Configuration conf, Path name, Class keyClass, Class valClass,
        int bufferSize, short replication, long blockSize, boolean createParent,
        CompressionType compressionType, CompressionCodec codec, Metadata metadata) throws IOException {
    return createWriter(FileContext.getFileContext(fs.getUri(), conf), conf, name, keyClass, valClass,
            compressionType, codec, metadata, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
            CreateOpts.bufferSize(bufferSize),
            createParent ? CreateOpts.createParent() : CreateOpts.donotCreateParent(),
            CreateOpts.repFac(replication), CreateOpts.blockSize(blockSize));
}

From source file:ldbc.snb.datagen.serializer.UpdateEventSerializer.java

License:Open Source License

public UpdateEventSerializer(Configuration conf, String fileNamePrefix, int reducerId, int numPartitions)
        throws IOException {
    conf_ = conf;//from  ww w  . j ava  2s .c  o  m
    reducerId_ = reducerId;
    stringBuffer_ = new StringBuffer(512);
    data_ = new ArrayList<String>();
    list_ = new ArrayList<String>();
    currentEvent_ = new UpdateEvent(-1, -1, UpdateEvent.UpdateEventType.NO_EVENT, new String(""));
    numPartitions_ = numPartitions;
    stats_ = new UpdateStreamStats();
    fileNamePrefix_ = fileNamePrefix;
    try {
        streamWriter_ = new SequenceFile.Writer[numPartitions_];
        FileContext fc = FileContext.getFileContext(conf);
        for (int i = 0; i < numPartitions_; ++i) {
            Path outFile = new Path(fileNamePrefix_ + "_" + i);
            streamWriter_[i] = SequenceFile.createWriter(fc, conf, outFile, UpdateEventKey.class, Text.class,
                    CompressionType.NONE, new DefaultCodec(), new SequenceFile.Metadata(),
                    EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                    Options.CreateOpts.checksumParam(Options.ChecksumOpt.createDisabled()));
            FileSystem fs = FileSystem.get(conf);
            Path propertiesFile = new Path(fileNamePrefix_ + ".properties");
            if (fs.exists(propertiesFile)) {
                FSDataInputStream file = fs.open(propertiesFile);
                Properties properties = new Properties();
                properties.load(file);
                stats_.minDate_ = Long
                        .parseLong(properties.getProperty("ldbc.snb.interactive.min_write_event_start_time"));
                stats_.maxDate_ = Long
                        .parseLong(properties.getProperty("ldbc.snb.interactive.max_write_event_start_time"));
                stats_.count_ = Long.parseLong(properties.getProperty("ldbc.snb.interactive.num_events"));
                file.close();
                fs.delete(propertiesFile, true);
            }
        }
    } catch (IOException e) {
        throw e;
    }
}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

@Override
public FSDataOutputStream createSyncable(Path logPath, int bufferSize, short replication, long blockSize)
        throws IOException {
    Volume v = getVolumeByPath(logPath);
    FileSystem fs = v.getFileSystem();
    blockSize = correctBlockSize(fs.getConf(), blockSize);
    bufferSize = correctBufferSize(fs.getConf(), bufferSize);
    EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
    log.debug("creating " + logPath + " with CreateFlag set: " + set);
    try {/* w  w w. j  a  v  a 2  s .com*/
        return fs.create(logPath, FsPermission.getDefault(), set, bufferSize, replication, blockSize, null);
    } catch (Exception ex) {
        log.debug("Exception", ex);
        return fs.create(logPath, true, bufferSize, replication, blockSize);
    }
}

From source file:org.apache.apex.malhar.lib.utils.IOUtilsTest.java

License:Apache License

private void testCopyPartialHelper(int dataSize, int offset, long size) throws IOException {
    FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
    File file = new File("target/IOUtilsTest/testCopyPartial/input");
    createDataFile(file, dataSize);/*from  w  ww . ja  va2  s .c  o  m*/

    FileContext fileContext = FileContext.getFileContext();
    DataInputStream inputStream = fileContext.open(new Path(file.getAbsolutePath()));

    Path output = new Path("target/IOUtilsTest/testCopyPartial/output");
    DataOutputStream outputStream = fileContext.create(output,
            EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
            Options.CreateOpts.CreateParent.createParent());

    if (offset == 0) {
        IOUtils.copyPartial(inputStream, size, outputStream);
    } else {
        IOUtils.copyPartial(inputStream, offset, size, outputStream);
    }

    outputStream.close();

    Assert.assertTrue("output exists", fileContext.util().exists(output));
    Assert.assertEquals("output size", size, fileContext.getFileStatus(output).getLen());
    //    FileUtils.deleteQuietly(new File("target/IOUtilsTest"));
}

From source file:org.apache.hawq.pxf.plugins.hdfs.SequenceFileAccessor.java

License:Apache License

@Override
public boolean writeNextObject(OneRow onerow) throws IOException {
    Writable value = (Writable) onerow.getData();
    Writable key = (Writable) onerow.getKey();

    // init writer on first approach here, based on onerow.getData type
    // TODO: verify data is serializable.
    if (writer == null) {
        Class<? extends Writable> valueClass = value.getClass();
        Class<? extends Writable> keyClass = (key == null) ? LongWritable.class : key.getClass();
        // create writer - do not allow overwriting existing file
        writer = SequenceFile.createWriter(fc, conf, file, keyClass, valueClass, compressionType, codec,
                new SequenceFile.Metadata(), EnumSet.of(CreateFlag.CREATE));
    }/*from  www .  ja v a  2  s.c  om*/

    try {
        writer.append((key == null) ? defaultKey : key, value);
    } catch (IOException e) {
        LOG.error("Failed to write data to file: " + e.getMessage());
        return false;
    }

    return true;
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override//from  w ww .  ja va  2 s  . com
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize,
        short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt,
        boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
        IgfsPath path = convert(f);
        IgfsMode mode = modeRslvr.resolveMode(path);

        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path="
                    + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');

        if (mode == PROXY) {
            FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize, replication,
                    blockSize, progress, checksumOpt, createParent);

            if (clientLog.isLogEnabled()) {
                long logId = IgfsLogger.nextId();

                if (append)
                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
                else
                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

                return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
            } else
                return os;
        } else {
            Map<String, String> permMap = F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES,
                    Boolean.toString(preferLocFileWrites));

            // Create stream and close it in the 'finally' section if any sequential operation failed.
            HadoopIgfsStreamDelegate stream;

            long logId = -1;

            if (append) {
                stream = rmtClient.append(path, create, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logAppend(logId, path, mode, bufSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
            } else {
                stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
            }

            assert stream != null;

            HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);

            bufSize = Math.max(64 * 1024, bufSize);

            out = new BufferedOutputStream(igfsOut, bufSize);

            FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

            // Mark stream created successfully.
            out = null;

            return res;
        }
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);

        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("deprecation")
@Override/*from   w ww  .j  a va  2s  . c o  m*/
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize,
        short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt,
        boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
        IgfsPath path = convert(f);
        IgfsMode mode = modeRslvr.resolveMode(path);

        if (LOG.isDebugEnabled())
            LOG.debug("Opening output stream in create [thread=" + Thread.currentThread().getName() + "path="
                    + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']');

        if (mode == PROXY) {
            FSDataOutputStream os = secondaryFs.createInternal(toSecondary(f), flag, perm, bufSize, replication,
                    blockSize, progress, checksumOpt, createParent);

            if (clientLog.isLogEnabled()) {
                long logId = IgfsLogger.nextId();

                if (append)
                    clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
                else
                    clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

                return new FSDataOutputStream(new IgfsHadoopProxyOutputStream(os, clientLog, logId));
            } else
                return os;
        } else {
            Map<String, String> permMap = F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES,
                    Boolean.toString(preferLocFileWrites));

            // Create stream and close it in the 'finally' section if any sequential operation failed.
            IgfsHadoopStreamDelegate stream;

            long logId = -1;

            if (append) {
                stream = rmtClient.append(path, create, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logAppend(logId, path, mode, bufSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
            } else {
                stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, permMap);

                if (clientLog.isLogEnabled()) {
                    logId = IgfsLogger.nextId();

                    clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
            }

            assert stream != null;

            IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, logId);

            bufSize = Math.max(64 * 1024, bufSize);

            out = new BufferedOutputStream(igfsOut, bufSize);

            FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

            // Mark stream created successfully.
            out = null;

            return res;
        }
    } finally {
        // Close if failed during stream creation.
        if (out != null)
            U.closeQuiet(out);

        leaveBusy();
    }
}

From source file:org.apache.solr.store.hdfs.HdfsFileWriter.java

License:Apache License

public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
    LOG.debug("Creating writer on {}", path);
    this.path = path;

    Configuration conf = fileSystem.getConf();
    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
        flags.add(CreateFlag.SYNC_BLOCK);
    }/*w  w  w. ja  v a 2  s . c  o m*/
    outputStream = fileSystem.create(path, FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)),
            flags, fsDefaults.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults.getBlockSize(),
            null);
}

From source file:org.apache.twill.filesystem.FileContextLocation.java

License:Apache License

@Override
public boolean createNew() throws IOException {
    try {//from   w  w  w .  j  a v a  2s  . co m
        fc.create(path, EnumSet.of(CreateFlag.CREATE), Options.CreateOpts.createParent()).close();
        return true;
    } catch (FileAlreadyExistsException e) {
        return false;
    }
}

From source file:org.apache.twill.filesystem.FileContextLocation.java

License:Apache License

@Override
public OutputStream getOutputStream() throws IOException {
    return fc.create(path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
            Options.CreateOpts.createParent());
}