Example usage for org.apache.hadoop.fs FileSystem exists

List of usage examples for org.apache.hadoop.fs FileSystem exists

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem exists.

Prototype

public boolean exists(Path f) throws IOException 

Source Link

Document

Check if a path exists.

Usage

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

public static long readCleanUpIntervalMillis(FileSystem fs, Path cleanUpIntervalPath) throws IOException {
    if (fs.exists(cleanUpIntervalPath)) {
        FSDataInputStream input = new FSDataInputStream(fs.open(cleanUpIntervalPath));
        long intervalDurationMillis = input.readLong();
        input.close();//from   www  . j a  v a  2s  . com
        return intervalDurationMillis;
    } else {
        return -1l;
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

public static void exposeCleanupIntervalMillis(FileSystem fs, Path path, long intervalDurationMillis) {
    FSDataInputStream input = null;//from w  w w  .ja  v  a  2s . c  o  m
    FSDataOutputStream output = null;
    try {
        if (fs.exists(path)) {
            input = new FSDataInputStream(fs.open(path));
            if (intervalDurationMillis == input.readLong()) {
                input.close();
                return;
            }
            input.close();
            fs.delete(path, true);
        }
        output = fs.create(path);
        output.writeLong(intervalDurationMillis);
        output.close();
    } catch (IOException e) {
        return;
    } finally {
        try {
            if (input != null) {
                input.close();
            }
            if (output != null) {
                output.close();
            }
        } catch (IOException e2) {

        }
    }
}

From source file:com.github.gaoyangthu.demo.mapred.PiEstimator.java

License:Apache License

/**
 * Run a map/reduce job for estimating Pi.
 *
 * @return the estimated value of Pi/*from   w  w  w  .  j a v  a2  s  . c  om*/
 */
public static BigDecimal estimate(int numMaps, long numPoints, JobConf jobConf) throws IOException {
    //setup job conf
    jobConf.setJobName(PiEstimator.class.getSimpleName());

    jobConf.setInputFormat(SequenceFileInputFormat.class);

    jobConf.setOutputKeyClass(BooleanWritable.class);
    jobConf.setOutputValueClass(LongWritable.class);
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);

    jobConf.setMapperClass(PiMapper.class);
    jobConf.setNumMapTasks(numMaps);

    jobConf.setReducerClass(PiReducer.class);
    jobConf.setNumReduceTasks(1);

    // turn off speculative execution, because DFS doesn't handle
    // multiple writers to the same file.
    jobConf.setSpeculativeExecution(false);

    //setup input/output directories
    final Path inDir = new Path(TMP_DIR, "in");
    final Path outDir = new Path(TMP_DIR, "out");
    FileInputFormat.setInputPaths(jobConf, inDir);
    FileOutputFormat.setOutputPath(jobConf, outDir);

    final FileSystem fs = FileSystem.get(jobConf);
    if (fs.exists(TMP_DIR)) {
        throw new IOException(
                "Tmp directory " + fs.makeQualified(TMP_DIR) + " already exists.  Please remove it first.");
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Cannot create input directory " + inDir);
    }

    try {
        //generate an input file for each map task
        for (int i = 0; i < numMaps; ++i) {
            final Path file = new Path(inDir, "part" + i);
            final LongWritable offset = new LongWritable(i * numPoints);
            final LongWritable size = new LongWritable(numPoints);
            final SequenceFile.Writer writer = SequenceFile.createWriter(fs, jobConf, file, LongWritable.class,
                    LongWritable.class, CompressionType.NONE);
            try {
                writer.append(offset, size);
            } finally {
                writer.close();
            }
            System.out.println("Wrote input for Map #" + i);
        }

        //start a map/reduce job
        System.out.println("Starting Job");
        final long startTime = System.currentTimeMillis();
        JobClient.runJob(jobConf);
        final double duration = (System.currentTimeMillis() - startTime) / 1000.0;
        System.out.println("Job Finished in " + duration + " seconds");

        //read outputs
        Path inFile = new Path(outDir, "reduce-out");
        LongWritable numInside = new LongWritable();
        LongWritable numOutside = new LongWritable();
        SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, jobConf);
        try {
            reader.next(numInside, numOutside);
        } finally {
            reader.close();
        }

        //compute estimated value
        return BigDecimal.valueOf(4).setScale(20).multiply(BigDecimal.valueOf(numInside.get()))
                .divide(BigDecimal.valueOf(numMaps)).divide(BigDecimal.valueOf(numPoints));
    } finally {
        fs.delete(TMP_DIR, true);
    }
}

From source file:com.github.libsml.commons.util.HadoopUtils.java

License:Apache License

public static void mkdir(Path path, boolean overwrite) throws IOException {

    Configuration config = new Configuration();
    FileSystem fs = path.getFileSystem(config);
    if (fs.exists(path) && !overwrite) {
        throw new IllegalStateException("Mkdir exception:path=" + path.toString() + " exists");
    }/*  w w  w .  j  ava  2  s.c  om*/
    if (fs.exists(path)) {
        fs.delete(path, true);
    }
    fs.mkdirs(path);
    fs.close();
}

From source file:com.github.libsml.commons.util.HadoopUtils.java

License:Apache License

public static void delete(Configuration conf, Iterable<Path> paths) throws IOException {
    if (conf == null) {
        conf = new Configuration();
    }/*from  ww  w  . j a va 2 s.  c om*/
    for (Path path : paths) {
        FileSystem fs = path.getFileSystem(conf);
        if (fs.exists(path)) {
            fs.delete(path, true);
        }
    }
}

From source file:com.github.seqware.queryengine.plugins.hbasemr.MRHBasePluginRunner.java

License:Open Source License

public File handleFileResult(Path path) {
    FileSystem fs = null;
    try {/*from   w  w  w. ja  va2s .c  o  m*/
        Path outputPartPath = new Path(path, "part-r-00000");
        // copy file from HDFS to local temporary file
        Logger.getLogger(FeaturesByFilterPlugin.class.getName())
                .info("Source file is " + outputPartPath.toString());
        Configuration conf = new Configuration();

        HBaseStorage.configureHBaseConfig(conf);

        HBaseConfiguration.addHbaseResources(conf);
        fs = FileSystem.get(conf);
        File createTempFile = File.createTempFile("vcf", "out");

        createTempFile.delete();
        Path outPath = new Path(createTempFile.toURI());
        FileSystem localSystem = FileSystem.get(new Configuration());

        Logger.getLogger(FeaturesByFilterPlugin.class.getName())
                .info("Destination file is " + outPath.toString());
        if (!fs.exists(outputPartPath)) {
            Logger.getLogger(FeaturesByFilterPlugin.class.getName()).fatal("Input file not found");
        }

        if (!fs.isFile(outputPartPath)) {
            Logger.getLogger(FeaturesByFilterPlugin.class.getName()).fatal("Input should be a file");
        }

        if (localSystem.exists(outPath)) {
            Logger.getLogger(FeaturesByFilterPlugin.class.getName()).fatal("Output already exists");
        }
        // doesn't quite work yet, no time to finish before poster, check results manually on hdfs

        FileUtil.copy(fs, outputPartPath, localSystem, outPath, true, true, conf);
        return new File(outPath.toUri());
    } catch (IOException ex) {
        Logger.getLogger(VCFDumperPlugin.class.getName()).fatal(null, ex);
    } finally {
        if (fs != null) {
            try {
                fs.delete(path, true);
            } catch (IOException ex) {
                Logger.getLogger(VCFDumperPlugin.class.getName())
                        .warn("IOException when clearing after text output", ex);
            }
        }
    }

    return null;
}

From source file:com.google.cloud.dataflow.contrib.hadoop.HadoopFileSink.java

License:Apache License

@Override
public void validate(PipelineOptions options) {
    try {/*ww w.  j  a v a2 s  .  co  m*/
        Job job = jobInstance();
        FileSystem fs = FileSystem.get(job.getConfiguration());
        Preconditions.checkState(!fs.exists(new Path(path)), "Output path " + path + " already exists");
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.google.cloud.dataflow.sdk.io.hdfs.HDFSFileSink.java

License:Apache License

@Override
public void validate(PipelineOptions options) {
    if (validate) {
        try {/*from w  w  w.jav  a  2s .  co m*/
            FileSystem fs = FileSystem.get(new URI(path), jobInstance().getConfiguration());
            checkState(!fs.exists(new Path(path)), "Output path %s exists", path);
        } catch (IOException | URISyntaxException e) {
            throw new RuntimeException(e);
        }
    }
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemIntegrationTest.java

License:Open Source License

@Test
public void testIOExceptionIsThrowAfterClose() throws IOException, URISyntaxException {
    Configuration conf = getConfigurationWtihImplementation();

    URI fsUri = new URI(String.format("gs://%s/", bucketName));

    FileSystem fs1 = FileSystem.get(fsUri, conf);
    FileSystem fs2 = FileSystem.get(fsUri, conf);

    junit.framework.Assert.assertSame(fs1, fs2);

    fs1.close();//ww w.  ja v a2  s  .  c o  m

    expectedException.expect(IOException.class);

    fs2.exists(new Path("/SomePath/That/Doesnt/Matter"));
}

From source file:com.google.mr4c.content.HDFSContentFactory.java

License:Open Source License

public boolean exists(URI uri) throws IOException {
    Path path = toPath(uri);/*w w w. j a va  2  s . c o  m*/
    FileSystem fs = FileSystem.get(uri, m_config);
    return fs.exists(path);
}