Example usage for org.apache.hadoop.fs FSDataOutputStream FSDataOutputStream

List of usage examples for org.apache.hadoop.fs FSDataOutputStream FSDataOutputStream

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataOutputStream FSDataOutputStream.

Prototype

public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats) 

Source Link

Usage

From source file:io.prestosql.plugin.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize,
        short replication, long blockSize, Progressable progress) throws IOException {
    if ((!overwrite) && exists(path)) {
        throw new IOException("File already exists:" + path);
    }//from  www .  j  a  va2  s.c  om

    if (!stagingDirectory.exists()) {
        createDirectories(stagingDirectory.toPath());
    }
    if (!stagingDirectory.isDirectory()) {
        throw new IOException("Configured staging path is not a directory: " + stagingDirectory);
    }
    File tempFile = createTempFile(stagingDirectory.toPath(), "presto-s3-", ".tmp").toFile();

    String key = keyFromPath(qualifiedPath(path));
    return new FSDataOutputStream(new PrestoS3OutputStream(s3, getBucketName(uri), key, tempFile, sseEnabled,
            sseType, sseKmsKeyId, multiPartUploadMinFileSize, multiPartUploadMinPartSize, s3AclType),
            statistics);
}

From source file:io.warp10.continuum.Dump.java

License:Apache License

@Override
public int run(String[] args) throws Exception {

    String dumpurl = args[0];/*from   ww  w.  java  2s  .  co  m*/
    String seqfile = args[1];

    //
    // Open output SequenceFile
    //

    Configuration conf = getConf();

    //
    // Open output file
    //

    FSDataOutputStream out = null;

    if ("-".equals(args[args.length - 1])) {
        out = new FSDataOutputStream(System.out, null);
    }

    SequenceFile.Writer writer = SequenceFile.createWriter(conf,
            SequenceFile.Writer.compression(CompressionType.BLOCK, new DefaultCodec()),
            SequenceFile.Writer.keyClass(BytesWritable.class),
            SequenceFile.Writer.valueClass(BytesWritable.class),
            null == out ? SequenceFile.Writer.file(new Path(args[args.length - 1]))
                    : SequenceFile.Writer.stream(out));

    InputStream is = null;

    if (dumpurl.startsWith("http://") || dumpurl.startsWith("https://")) {
        URLConnection conn = new URL(dumpurl).openConnection();
        conn.setDoInput(true);
        conn.connect();
        is = conn.getInputStream();
    } else if ("-".equals(dumpurl)) {
        is = System.in;
    } else {
        is = new FileInputStream(dumpurl);
    }

    BufferedReader br = new BufferedReader(new InputStreamReader(is));

    TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());

    while (true) {
        String line = br.readLine();

        if (null == line) {
            break;
        }

        //
        // Extract ts// class{labels}
        //

        String meta = line.substring(0, line.indexOf('}') + 1);

        //
        // Parse a dummy line 'ts// class{labels} T' to retrieve the Metadata
        //

        GTSEncoder encoder = GTSHelper.parse(null, meta + " T");

        Metadata metadata = encoder.getMetadata();

        // Retrieve potential dummy elevation which will encode the number of datapoints encoded

        GTSDecoder decoder = encoder.getDecoder();
        decoder.next();

        long count = decoder.getElevation();

        //
        // Create a GTSWrapper
        //

        GTSWrapper wrapper = new GTSWrapper();
        wrapper.setMetadata(metadata);
        wrapper.setBase(encoder.getBaseTimestamp());

        if (GeoTimeSerie.NO_ELEVATION != count) {
            wrapper.setCount(count);
        } else {
            wrapper.setCount(0L);
        }

        //
        // Retrieve encoded datapoints
        //

        byte[] datapoints = OrderPreservingBase64
                .decode(line.substring(line.indexOf('}') + 2).getBytes(Charsets.UTF_8));

        writer.append(new BytesWritable(serializer.serialize(wrapper)), new BytesWritable(datapoints));
    }

    writer.close();
    br.close();
    is.close();

    return 0;
}

From source file:org.apache.accumulo.core.client.rfile.RFileWriterBuilder.java

License:Apache License

@Override
public RFileWriter build() throws IOException {
    FileOperations fileops = FileOperations.getInstance();
    AccumuloConfiguration acuconf = AccumuloConfiguration.getDefaultConfiguration();
    HashMap<String, String> userProps = new HashMap<>();
    if (sampler != null) {
        userProps.putAll(new SamplerConfigurationImpl(sampler).toTablePropertiesMap());
    }//from  w  w  w  .  ja  v a2  s . c  om
    userProps.putAll(tableConfig);

    if (userProps.size() > 0) {
        acuconf = new ConfigurationCopy(Iterables.concat(acuconf, userProps.entrySet()));
    }

    if (out.getOutputStream() != null) {
        FSDataOutputStream fsdo;
        if (out.getOutputStream() instanceof FSDataOutputStream) {
            fsdo = (FSDataOutputStream) out.getOutputStream();
        } else {
            fsdo = new FSDataOutputStream(out.getOutputStream(), new FileSystem.Statistics("foo"));
        }
        return new RFileWriter(fileops.newWriterBuilder().forOutputStream(".rf", fsdo, out.getConf())
                .withTableConfiguration(acuconf).build(), visCacheSize);
    } else {
        return new RFileWriter(
                fileops.newWriterBuilder().forFile(out.path.toString(), out.getFileSystem(), out.getConf())
                        .withTableConfiguration(acuconf).build(),
                visCacheSize);
    }
}

From source file:org.apache.accumulo.core.file.rfile.MultiLevelIndexTest.java

License:Apache License

private void runTest(int maxBlockSize, int num) throws IOException {
    AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(PositionedOutputs.wrap(dos), "gz",
            CachedConfiguration.getInstance(), aconf);

    BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, maxBlockSize));

    for (int i = 0; i < num; i++)
        mliw.add(new Key(String.format("%05d000", i)), i, 0, 0, 0);

    mliw.addLast(new Key(String.format("%05d000", num)), num, 0, 0, 0);

    ABlockWriter root = _cbw.prepareMetaBlock("root");
    mliw.close(root);/*from ww  w .j  ava2  s  .c o m*/
    root.close();

    _cbw.close();
    dos.close();
    baos.close();

    byte[] data = baos.toByteArray();
    SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
    FSDataInputStream in = new FSDataInputStream(bais);
    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length,
            CachedConfiguration.getInstance(), aconf);

    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_8);
    BlockRead rootIn = _cbr.getMetaBlock("root");
    reader.readFields(rootIn);
    rootIn.close();
    IndexIterator liter = reader.lookup(new Key("000000"));
    int count = 0;
    while (liter.hasNext()) {
        assertEquals(count, liter.nextIndex());
        assertEquals(count, liter.peek().getNumEntries());
        assertEquals(count, liter.next().getNumEntries());
        count++;
    }

    assertEquals(num + 1, count);

    while (liter.hasPrevious()) {
        count--;
        assertEquals(count, liter.previousIndex());
        assertEquals(count, liter.peekPrevious().getNumEntries());
        assertEquals(count, liter.previous().getNumEntries());
    }

    assertEquals(0, count);

    // go past the end
    liter = reader.lookup(new Key(String.format("%05d000", num + 1)));
    assertFalse(liter.hasNext());

    Random rand = new Random();
    for (int i = 0; i < 100; i++) {
        int k = rand.nextInt(num * 1000);
        int expected;
        if (k % 1000 == 0)
            expected = k / 1000; // end key is inclusive
        else
            expected = k / 1000 + 1;
        liter = reader.lookup(new Key(String.format("%08d", k)));
        IndexEntry ie = liter.next();
        assertEquals(expected, ie.getNumEntries());
    }

}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

/**
 * @param permission//from  w  ww . j a  va2  s . c  om
 *            Currently ignored.
 */
@Override
public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize,
        short replication, long blockSize, Progressable progress) throws IOException {

    INode inode = store.retrieveINode(makeAbsolute(file));
    if (inode != null) {
        if (overwrite) {
            delete(file);
        } else {
            throw new IOException("File already exists: " + file);
        }
    } else {
        Path parent = file.getParent();
        if (parent != null) {
            if (!mkdirs(parent)) {
                throw new IOException("Mkdirs failed to create " + parent.toString());
            }
        }
    }
    return new FSDataOutputStream(new CassandraOutputStream(getConf(), store, makeAbsolute(file), permission,
            blockSize, subBlockSize, progress, bufferSize), statistics);
}

From source file:org.apache.ignite.internal.processors.hadoop.fs.GridHadoopRawLocalFileSystem.java

License:Apache License

/**
 * @param file File./*w  w  w . ja  va2s . c o m*/
 * @param append Append flag.
 * @return Output stream.
 * @throws IOException If failed.
 */
private FSDataOutputStream out(File file, boolean append, int bufSize) throws IOException {
    return new FSDataOutputStream(new BufferedOutputStream(new FileOutputStream(file, append),
            bufSize < 32 * 1024 ? 32 * 1024 : bufSize), new Statistics(getUri().getScheme()));
}

From source file:org.apache.metron.writer.hdfs.SourceHandler.java

License:Apache License

private Path createOutputFile() throws IOException {
    Path path = new Path(this.fileNameFormat.getPath(),
            this.fileNameFormat.getName(this.rotation, System.currentTimeMillis()));
    if (fs.getScheme().equals("file")) {
        //in the situation where we're running this in a local filesystem, flushing doesn't work.
        fs.mkdirs(path.getParent());//from   w w  w . jav a  2  s . c om
        this.out = new FSDataOutputStream(new FileOutputStream(path.toString()), null);
    } else {
        this.out = this.fs.create(path);
    }
    return path;
}

From source file:org.apache.nifi.processors.hadoop.SequenceFileWriterImpl.java

License:Apache License

@Override
public FlowFile writeSequenceFile(final FlowFile flowFile, final ProcessSession session,
        final Configuration configuration, final CompressionType compressionType,
        final CompressionCodec compressionCodec) {

    if (flowFile.getSize() > Integer.MAX_VALUE) {
        throw new IllegalArgumentException("Cannot write " + flowFile
                + "to Sequence File because its size is greater than the largest possible Integer");
    }/*from  w w  w.j  a  va  2 s.  c  om*/
    final String sequenceFilename = flowFile.getAttribute(CoreAttributes.FILENAME.key()) + ".sf";

    // Analytics running on HDFS want data that is written with a BytesWritable. However, creating a
    // BytesWritable requires that we buffer the entire file into memory in a byte array.
    // We can create an FSFilterableOutputStream to wrap the FSDataOutputStream and use that to replace
    // the InputStreamWritable class name with the BytesWritable class name when we write the header.
    // This allows the Sequence File to say that the Values are of type BytesWritable (so they can be
    // read via the BytesWritable class) while allowing us to stream the data rather than buffering
    // entire files in memory.
    final byte[] toReplace, replaceWith;
    try {
        toReplace = InputStreamWritable.class.getCanonicalName().getBytes("UTF-8");
        replaceWith = BytesWritable.class.getCanonicalName().getBytes("UTF-8");
    } catch (final UnsupportedEncodingException e) {
        // This won't happen.
        throw new RuntimeException("UTF-8 is not a supported Character Format");
    }

    final StopWatch watch = new StopWatch(true);
    FlowFile sfFlowFile = session.write(flowFile, new StreamCallback() {

        @Override
        public void process(InputStream in, OutputStream out) throws IOException {
            // Use a FilterableOutputStream to change 'InputStreamWritable' to 'BytesWritable' - see comment
            // above for an explanation of why we want to do this.
            final ByteFilteringOutputStream bwos = new ByteFilteringOutputStream(out);

            // TODO: Adding this filter could be dangerous... A Sequence File's header contains 3 bytes: "SEQ",
            // followed by 1 byte that is the Sequence File version, followed by 2 "entries." These "entries"
            // contain the size of the Key/Value type and the Key/Value type. So, we will be writing the
            // value type as InputStreamWritable -- which we need to change to BytesWritable. This means that
            // we must also change the "size" that is written, but replacing this single byte could be
            // dangerous. However, we know exactly what will be written to the header, and we limit this at one
            // replacement, so we should be just fine.
            bwos.addFilter(toReplace, replaceWith, 1);
            bwos.addFilter((byte) InputStreamWritable.class.getCanonicalName().length(),
                    (byte) BytesWritable.class.getCanonicalName().length(), 1);

            try (final FSDataOutputStream fsDataOutputStream = new FSDataOutputStream(bwos, new Statistics(""));
                    final SequenceFile.Writer writer = SequenceFile.createWriter(configuration,
                            SequenceFile.Writer.stream(fsDataOutputStream),
                            SequenceFile.Writer.keyClass(Text.class),
                            SequenceFile.Writer.valueClass(InputStreamWritable.class),
                            SequenceFile.Writer.compression(compressionType, compressionCodec))) {

                processInputStream(in, flowFile, writer);

            } finally {
                watch.stop();
            }
        }
    });
    logger.debug("Wrote Sequence File {} ({}).",
            new Object[] { sequenceFilename, watch.calculateDataRate(flowFile.getSize()) });
    return sfFlowFile;
}

From source file:org.apache.orc.bench.core.NullFileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(Path path, FsPermission fsPermission, boolean b, int i, short i1, long l,
        Progressable progressable) throws IOException {
    return new FSDataOutputStream(NULL_OUTPUT, null);
}

From source file:org.apache.orc.bench.core.NullFileSystem.java

License:Apache License

@Override
public FSDataOutputStream append(Path path, int i, Progressable progressable) throws IOException {
    return new FSDataOutputStream(NULL_OUTPUT, null);
}