Example usage for org.apache.hadoop.fs FileSystem create

List of usage examples for org.apache.hadoop.fs FileSystem create

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem create.

Prototype

public FSDataOutputStream create(Path f, short replication) throws IOException 

Source Link

Document

Create an FSDataOutputStream at the indicated Path.

Usage

From source file:com.netflix.aegisthus.tools.Utils.java

License:Apache License

public static void copy(Path from, Path to, boolean snappy, Configuration conf) throws IOException {
    FileSystem fromFs = from.getFileSystem(conf);
    FileSystem toFs = to.getFileSystem(conf);

    InputStream in = fromFs.open(from);
    OutputStream out = toFs.create(to, false);
    try {/*from w  w w  . j a v  a  2 s .  c om*/
        if (snappy) {
            in = new SnappyInputStream2(in);
        }
        byte[] buffer = new byte[65536];
        int bytesRead;
        while ((bytesRead = in.read(buffer)) >= 0) {
            if (bytesRead > 0) {
                out.write(buffer, 0, bytesRead);
            }
        }
    } finally {
        in.close();
        out.close();
    }
}

From source file:com.netflix.aegisthus.tools.Utils.java

License:Apache License

public static void copy(Path from, Path to, boolean snappy, TaskAttemptContext ctx) throws IOException {
    FileSystem fromFs = from.getFileSystem(ctx.getConfiguration());
    FileSystem toFs = to.getFileSystem(ctx.getConfiguration());

    if (!to.isAbsolute()) {
        to = new Path(ctx.getConfiguration().get("mapred.working.dir"), to);
    }//www.j a  va 2  s  . c  o m
    if (!snappy && onSameHdfs(ctx.getConfiguration(), from, to)) {
        LOG.info(String.format("renaming %s to %s", from, to));
        toFs.mkdirs(to.getParent());
        toFs.rename(from, to);
        return;
    }

    InputStream in = fromFs.open(from);
    OutputStream out = toFs.create(to, false);
    try {
        if (snappy) {
            in = new SnappyInputStream2(in);
        }
        byte[] buffer = new byte[65536];
        int bytesRead;
        int count = 0;
        while ((bytesRead = in.read(buffer)) >= 0) {
            if (bytesRead > 0) {
                out.write(buffer, 0, bytesRead);
            }
            if (count++ % 50 == 0) {
                ctx.progress();
            }
        }
    } finally {
        in.close();
        out.close();
    }
}

From source file:com.netflix.bdp.s3.S3MultipartOutputCommitter.java

License:Apache License

protected void commitTaskInternal(final TaskAttemptContext context, Iterable<FileStatus> taskOutput)
        throws IOException {
    Configuration conf = context.getConfiguration();
    final AmazonS3 client = getClient(getOutputPath(context), conf);

    final Path attemptPath = getTaskAttemptPath(context);
    FileSystem attemptFS = attemptPath.getFileSystem(conf);

    // add the commits file to the wrapped commiter's task attempt location.
    // this complete file will be committed by the wrapped committer at the end
    // of this method.
    Path commitsAttemptPath = wrappedCommitter.getTaskAttemptPath(context);
    FileSystem commitsFS = commitsAttemptPath.getFileSystem(conf);

    // keep track of unfinished commits in case one fails. if something fails,
    // we will try to abort the ones that had already succeeded.
    final List<S3Util.PendingUpload> commits = Lists.newArrayList();

    boolean threw = true;
    ObjectOutputStream completeUploadRequests = new ObjectOutputStream(
            commitsFS.create(commitsAttemptPath, false));
    try {//from ww w  .j av  a2 s .  co  m
        Tasks.foreach(taskOutput).stopOnFailure().throwFailureWhenFinished().executeWith(threadPool)
                .run(new Task<FileStatus, IOException>() {
                    @Override
                    public void run(FileStatus stat) throws IOException {
                        File localFile = new File(URI.create(stat.getPath().toString()).getPath());
                        if (localFile.length() <= 0) {
                            return;
                        }
                        String relative = Paths.getRelativePath(attemptPath, stat.getPath());
                        String partition = getPartition(relative);
                        String key = getFinalKey(relative, context);
                        S3Util.PendingUpload commit = S3Util.multipartUpload(client, localFile, partition,
                                getBucket(context), key, uploadPartSize);
                        commits.add(commit);
                    }
                });

        for (S3Util.PendingUpload commit : commits) {
            completeUploadRequests.writeObject(commit);
        }

        threw = false;

    } finally {
        if (threw) {
            Tasks.foreach(commits).run(new Task<S3Util.PendingUpload, RuntimeException>() {
                @Override
                public void run(S3Util.PendingUpload commit) {
                    S3Util.abortCommit(client, commit);
                }
            });
            try {
                attemptFS.delete(attemptPath, true);
            } catch (Exception e) {
                LOG.error("Failed while cleaning up failed task commit: ", e);
            }
        }
        Closeables.close(completeUploadRequests, threw);
    }

    wrappedCommitter.commitTask(context);

    attemptFS.delete(attemptPath, true);
}

From source file:com.panguso.lc.analysis.format.mapreduce.TextOutputFormat.java

License:Open Source License

/**
 * @param job job//from  ww w .  j a  v a  2  s  .  co  m
 * @throws IOException IOException
 * @throws InterruptedException InterruptedException
 */
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
    Configuration conf = job.getConfiguration();
    boolean isCompressed = getCompressOutput(job);
    String keyValueSeparator = conf.get("mapred.textoutputformat.separator", separate);
    CompressionCodec codec = null;
    String extension = "";
    if (isCompressed) {
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
        codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
        extension = codec.getDefaultExtension();
    }
    Path file = getDefaultWorkFile(job, extension);
    FileSystem fs = file.getFileSystem(conf);
    if (!isCompressed) {
        FSDataOutputStream fileOut = fs.create(file, false);
        return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
    } else {
        FSDataOutputStream fileOut = fs.create(file, false);
        return new LineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)),
                keyValueSeparator);
    }
}

From source file:com.pivotal.gfxd.demo.mapreduce.LoadAverage.java

License:Open Source License

private void writeLastStart(FileSystem hdfs, long timestamp) throws IOException {
    Path file = new Path("/sensorStore/last_mapreduce_timestamp");
    OutputStream os = hdfs.create(file, true);
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os));
    br.write(Long.toString(timestamp));
    br.close();//from  w  w w  .j  a va 2 s . co m
}

From source file:com.ricemap.spateDB.mapred.TextOutputFormat.java

License:Apache License

public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress)
        throws IOException {
    boolean isCompressed = getCompressOutput(job);
    String keyValueSeparator = job.get("mapred.textoutputformat.separator", "\t");
    if (!isCompressed) {
        Path file = FileOutputFormat.getTaskOutputPath(job, name);
        FileSystem fs = file.getFileSystem(job);
        FSDataOutputStream fileOut = fs.create(file, progress);
        return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
    } else {//from w ww. jav  a 2s  . c o  m
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
        // create the named codec
        CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
        // build the filename including the extension
        Path file = FileOutputFormat.getTaskOutputPath(job, name + codec.getDefaultExtension());
        FileSystem fs = file.getFileSystem(job);
        FSDataOutputStream fileOut = fs.create(file, progress);
        return new LineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)),
                keyValueSeparator);
    }
}

From source file:com.ricemap.spateDB.operations.Plot.java

License:Apache License

public static <S extends Shape> void plotLocal(Path inFile, Path outFile, S shape, int width, int height,
        Color color, boolean showBorders, boolean showBlockCount, boolean showRecordCount) throws IOException {
    FileSystem inFs = inFile.getFileSystem(new Configuration());
    Prism fileMbr = FileMBR.fileMBRLocal(inFs, inFile, shape);
    LOG.info("FileMBR: " + fileMbr);

    // Adjust width and height to maintain aspect ratio
    if ((fileMbr.x2 - fileMbr.x1) / (fileMbr.y2 - fileMbr.y1) > (double) width / height) {
        // Fix width and change height
        height = (int) ((fileMbr.y2 - fileMbr.y1) * width / (fileMbr.x2 - fileMbr.x1));
    } else {/* www. j ava  2 s . c  o m*/
        width = (int) ((fileMbr.x2 - fileMbr.x1) * height / (fileMbr.y2 - fileMbr.y1));
    }

    double scale2 = (double) width * height / ((double) (fileMbr.x2 - fileMbr.x1) * (fileMbr.y2 - fileMbr.y1));

    // Create an image
    BufferedImage image = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
    Graphics2D graphics = image.createGraphics();
    Color bg_color = new Color(0, 0, 0, 0);
    graphics.setBackground(bg_color);
    graphics.clearRect(0, 0, width, height);
    graphics.setColor(color);

    long fileLength = inFs.getFileStatus(inFile).getLen();
    ShapeRecordReader<S> reader = new ShapeRecordReader<S>(inFs.open(inFile), 0, fileLength);

    Prism cell = reader.createKey();
    while (reader.next(cell, shape)) {
        drawShape(graphics, shape, fileMbr, width, height, scale2);
    }

    reader.close();
    graphics.dispose();
    FileSystem outFs = outFile.getFileSystem(new Configuration());
    OutputStream out = outFs.create(outFile, true);
    ImageIO.write(image, "png", out);
    out.close();
}

From source file:com.soteradefense.dga.hbse.HBSEMasterCompute.java

License:Apache License

/**
 * Writes the various statistics when computation finishes.
 */// ww  w  . ja  v a 2 s  .  c om
private void writeStats() {
    double percentSelected = (double) totalPivotsSelected / this.maxId;
    int time = (int) ((end.getTime() - start.getTime()) / 1000);

    String defaultFS = getDefaultFS(getConf());
    String filename = defaultFS + "/" + outputDir + "/" + HBSEConfigurationConstants.STATS_CSV;
    Path pt = new Path(filename);
    try {
        FileSystem fs = FileSystem.get(new Configuration());
        BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, true)));
        try {
            br.write("k: " + this.highBetweennessSet.size() + "\n");
            br.write("delta p: " + this.pivotCount + "\n");
            br.write("cutoff: " + this.stabilityCutoff + "\n");
            br.write("counter: " + this.stabilityCounter + "\n");
            br.write("pivots selected: " + totalPivotsSelected + "\n");
            br.write("percent of graph selected: " + percentSelected + "\n");
            br.write("supsersteps: " + this.getSuperstep() + "\n");
            br.write("cycles: " + this.cycle + "\n");
            br.write("run time: " + time + "\n");
        } catch (IOException e) {
            e.printStackTrace();
            throw new IllegalStateException("Could not write to file: " + filename);
        } finally {
            br.close();
        }

    } catch (IOException e) {
        e.printStackTrace();
        throw new IllegalStateException("Could not open file: " + filename);
    }

}

From source file:com.soteradefense.dga.hbse.HBSEMasterCompute.java

License:Apache License

/**
 * Write the high betweenness set to a file in hdfs
 *
 * @param set A set of vertices that contain the highest highbetweenness value.
 *///  ww w  .  ja  va2s . c  o m
private void writeHighBetweennessSet(Set<String> set) {
    String defaultFS = getDefaultFS(getConf());
    String filename = defaultFS + "/" + outputDir + "/" + HBSEConfigurationConstants.FINAL_SET_CSV;
    Path pt = new Path(filename);
    try {
        FileSystem fs = FileSystem.get(new Configuration());
        BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, true)));
        try {
            for (String id : set) {
                br.write(id + "\n");
            }
        } catch (IOException e) {
            e.printStackTrace();
            throw new IllegalStateException("Could not write to file: " + filename);
        } finally {
            br.close();
        }
    } catch (IOException e) {
        e.printStackTrace();
        throw new IllegalStateException("Could not open file: " + filename);
    }

}

From source file:com.soteradefense.dga.louvain.giraph.LouvainMasterCompute.java

License:Apache License

private void writeFile(String path, String message) {
    Path pt = new Path(path);
    logger.debug("Writing file out to {}, message {}", path, message);
    try {// www  .ja v a 2 s.c o  m
        FileSystem fs = FileSystem.get(new Configuration());
        BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, true)));
        br.write(message);
        br.close();
    } catch (IOException e) {
        e.printStackTrace();
        throw new IllegalStateException("Could not write to file: " + path);
    }
}