Example usage for org.apache.hadoop.fs FileSystem create

List of usage examples for org.apache.hadoop.fs FileSystem create

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem create.

Prototype

public FSDataOutputStream create(Path f, short replication) throws IOException 

Source Link

Document

Create an FSDataOutputStream at the indicated Path.

Usage

From source file:com.splicemachine.derby.impl.io.HdfsDirFile.java

License:Apache License

@Override
public boolean createNewFile() throws IOException {
    FSDataOutputStream os = null;//from  w ww.  ja v  a  2 s.com
    try {
        FileSystem fs = getFileSystem();
        os = fs.create(new Path(path), false);
        return true;
    } catch (IOException e) {
        LOG.error(String.format("An exception occurred while creating the path '%s'.", path), e);
        return false;
    } finally {
        if (os != null) {
            os.close();
        }
    }
}

From source file:com.splicemachine.derby.impl.io.HdfsDirFile.java

License:Apache License

@Override
public OutputStream getOutputStream() throws FileNotFoundException {
    try {// w ww  . j a  v a  2 s . c  om
        FileSystem fs = getFileSystem();
        return fs.create(new Path(path), false);
    } catch (FileNotFoundException fnfe) {
        throw fnfe;
    } catch (IOException e) {
        LOG.error(String.format("An exception occurred while creating the file '%s'.", path), e);
        return null;
    }
}

From source file:com.splicemachine.hbase.MockSnapshot.java

License:Apache License

public static void createFile(Path p) throws IOException {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///tmp");
    FileSystem fs = FileSystem.getLocal(conf);

    FSDataOutputStream dos = fs.create(p, true);
    dos.write(0);/*from  w  w  w  .j  av a2s  . co  m*/
    dos.flush();
    dos.close();
}

From source file:com.splout.db.hadoop.TablespaceGenerator.java

License:Apache License

/**
 * Write the partition map and other metadata to the output folder. They will
 * be needed for deploying the dataset to Splout.
 */// w w w .  j a va  2 s . com
protected void writeOutputMetadata(Configuration conf) throws IOException, JSONSerDeException {
    FileSystem fileSystem = outputPath.getFileSystem(conf);

    // Write the Partition map
    Path partitionMapPath = new Path(outputPath, OUT_PARTITION_MAP);
    BufferedWriter writer = new BufferedWriter(
            new OutputStreamWriter(fileSystem.create(partitionMapPath, true)));
    writer.write(JSONSerDe.ser(partitionMap));
    writer.close();

    // Write init statements, if applicable
    if (tablespace.getInitStatements() != null) {
        Path initStatementsPath = new Path(outputPath, OUT_INIT_STATEMENTS);
        writer = new BufferedWriter(new OutputStreamWriter(fileSystem.create(initStatementsPath, true)));
        writer.write(JSONSerDe.ser(tablespace.getInitStatements()));
        writer.close();
    }

    // Write the Engine ID so we know what we are deploying exactly afterwards
    Path enginePath = new Path(outputPath, OUT_ENGINE);
    writer = new BufferedWriter(new OutputStreamWriter(fileSystem.create(enginePath, true)));
    writer.write(tablespace.getEngine().getClass().getName());
    writer.close();
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.DefaultFsHelper.java

License:Apache License

@Override
public OutputStream create(FileSystem fs, Path path) throws IOException {
    return new HflushableWrapperOutputStream(fs.create(path, false));
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.RecordWriterManager.java

License:Apache License

RecordWriter createWriter(FileSystem fs, Path path, long timeToLiveMillis) throws StageException, IOException {
    switch (fileType) {
    case TEXT://from w w  w .  j  a va2 s  . c  om
        OutputStream os = fs.create(path, false);
        if (compressionCodec != null) {
            try {
                os = compressionCodec.createOutputStream(os);
            } catch (UnsatisfiedLinkError unsatisfiedLinkError) {
                throw new StageException(Errors.HADOOPFS_46, compressionType.name(), unsatisfiedLinkError,
                        unsatisfiedLinkError);
            }
        }
        return new RecordWriter(path, timeToLiveMillis, os, generatorFactory);
    case SEQUENCE_FILE:
        Utils.checkNotNull(compressionType, "compressionType");
        Utils.checkNotNull(keyEL, "keyEL");
        Utils.checkArgument(compressionCodec == null || compressionType != SequenceFile.CompressionType.NONE,
                "if using a compressionCodec, compressionType cannot be NULL");
        try {
            SequenceFile.Writer writer = SequenceFile.createWriter(fs, hdfsConf, path, Text.class, Text.class,
                    compressionType, compressionCodec);
            return new RecordWriter(path, timeToLiveMillis, writer, keyEL, generatorFactory, context);
        } catch (UnsatisfiedLinkError unsatisfiedLinkError) {
            throw new StageException(Errors.HADOOPFS_46, compressionType.name(), unsatisfiedLinkError,
                    unsatisfiedLinkError);
        }
    default:
        throw new UnsupportedOperationException(Utils.format("Unsupported file Type '{}'", fileType));
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.TestRecordWriter.java

License:Apache License

@Test
public void testTextFile() throws Exception {
    FileSystem fs = getRawLocalFileSystem();
    try {// ww w .j a va 2s.c  o m
        Path file = new Path(getTestDir(), "file.txt");
        OutputStream os = fs.create(file, false);
        long timeToLive = 10000;
        long expires = System.currentTimeMillis() + timeToLive;
        RecordWriter writer = new RecordWriter(file, timeToLive, os, new DummyDataGeneratorFactory(null));
        Assert.assertTrue(writer.isTextFile());
        Assert.assertFalse(writer.isSeqFile());
        Assert.assertEquals(file, writer.getPath());
        Assert.assertTrue(expires <= writer.getExpiresOn());
        Assert.assertTrue(writer.toString().contains(file.toString()));
        Record record = RecordCreator.create();
        record.set(Field.create("a"));
        writer.write(record);
        record.set(Field.create("z"));
        writer.write(record);
        Assert.assertFalse(writer.isClosed());
        writer.flush();
        Assert.assertTrue(writer.getLength() > 2);
        Assert.assertEquals(2, writer.getRecords());
        writer.close();
        Assert.assertTrue(writer.isClosed());
        try {
            writer.write(record);
            Assert.fail();
        } catch (IOException ex) {
            //NOP
        }
        BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(file)));
        Assert.assertEquals("a", reader.readLine());
        Assert.assertEquals("z", reader.readLine());
        Assert.assertNull(reader.readLine());
        reader.close();
    } finally {
        fs.close();
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.WholeFileFormatFsHelper.java

License:Apache License

@Override
public OutputStream create(FileSystem fs, Path path) throws IOException {
    //Make sure if the tmp file already exists, overwrite it
    return fs.create(path, true);
}

From source file:com.talis.hadoop.rdf.ZipUtils.java

License:Apache License

public static void packZipFile(Path perm, Path temp, Configuration conf, FileSystem fs) throws IOException {
    FSDataOutputStream out = null;//from  ww w. ja va 2s. c om
    ZipOutputStream zos = null;
    int zipCount = 0;
    LOG.info("Packing zip file for " + perm);
    try {
        out = fs.create(perm, false);
        zos = new ZipOutputStream(out);
        String name = perm.getName().replaceAll(".zip$", "");
        LOG.info("adding index directory" + temp);
        zipCount = zipDirectory(conf, zos, "", temp.toString(), temp);
    } catch (Throwable t) {
        LOG.error("packZipFile exception", t);
        if (t instanceof RuntimeException) {
            throw (RuntimeException) t;
        }
        if (t instanceof IOException) {
            throw (IOException) t;
        }
        throw new IOException(t);
    } finally {
        if (zos != null) {
            if (zipCount == 0) { // If no entries were written, only close out, as
                // the zip will throw an error
                LOG.error("No entries written to zip file " + perm);
                fs.delete(perm, false);
                // out.close();
            } else {
                LOG.info(String.format("Wrote %d items to %s for %s", zipCount, perm, temp));
                zos.close();
            }
        }
    }
}

From source file:com.toy.Client.java

License:Apache License

private void uploadDepAndRegister(Map<String, LocalResource> localResources, ApplicationId appId, FileSystem fs,
        String depname) throws IOException {
    File dep = new File(depname);
    if (!dep.exists())
        throw new IOException(dep.getAbsolutePath() + " does not exist");
    Path dst = new Path(fs.getHomeDirectory(), Constants.TOY_PREFIX + appId.toString() + "/" + dep.getName());
    LOG.info("Copy {} from local filesystem to {} and add to local environment", dep.getName(), dst.toUri());
    FileInputStream input = new FileInputStream(dep);
    final FSDataOutputStream outputStream = fs.create(dst, true);
    ByteStreams.copy(input, outputStream);
    input.close();//w w  w .j av  a 2 s  .  c  om
    outputStream.close();
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
    amJarRsrc.setType(LocalResourceType.FILE);
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    FileStatus destStatus = fs.getFileStatus(dst);
    amJarRsrc.setTimestamp(destStatus.getModificationTime());
    amJarRsrc.setSize(destStatus.getLen());
    localResources.put(dep.getName(), amJarRsrc);

}