Example usage for org.apache.hadoop.fs FileSystem getLocal

List of usage examples for org.apache.hadoop.fs FileSystem getLocal

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getLocal.

Prototype

public static LocalFileSystem getLocal(Configuration conf) throws IOException 

Source Link

Document

Get the local FileSystem.

Usage

From source file:Script.java

License:Open Source License

/** Evaluates the Javascript expressions contained in a
 *  DataInputStream serialized file and passed over the distributed
 *  cache.//from ww w .j a  v  a  2 s.c om
 *  @param conf       The Hadoop configuration object
 *  @param pathString The path string of the cached file
 *  @param name       The name of the file added to the cache
 *  @return           The result of the Javascript evaluation
 */
public Object evalCache(Configuration conf, String pathString, String name) throws IOException {
    FSDataInputStream in;
    FileSystem fs = FileSystem.getLocal(conf);
    try {
        Path path = new Path(pathString);
        in = fs.open(path);
    } catch (FileNotFoundException e) { // must be running in standalone mode
        Path path = new Path(Eggshell.SCRIPT_DIR + "/" + name);
        in = fs.open(path); // read it from the eggshell script directory instead
    }
    String buf = in.readUTF();
    in.close();
    return evalString(buf);
}

From source file:TestBytesBloomFilter.java

License:Apache License

public void testSetSanity() throws IOException {
    FileSystem local = FileSystem.getLocal(new Configuration());

    BytesBloomFilter set = new BytesBloomFilter(1000000, 4);
    byte[] arr1 = new byte[] { 1, 2, 3, 4, 5, 6, 7 };
    byte[] arr2 = new byte[] { 11, 12, 5, -2 };
    byte[] arr3 = new byte[] { 3, 4, 5 };
    set.add(arr1);//  www  .j a  v a  2  s  .  c o  m
    set.add(arr2);

    for (byte i = 0; i < (byte) 125; i++) {
        set.add(new byte[] { i });
    }

    assertTrue(set.mayContain(arr1));
    assertTrue(set.mayContain(arr2));

    for (byte i = 0; i < (byte) 125; i++) {
        assertTrue(set.mayContain(new byte[] { i }));
    }

    //technically this could be an invalid statement, but the probability is low and this is a sanity check
    assertFalse(set.mayContain(arr3));

    //now test that we can write and read from file just fine
    local.delete(new Path("/tmp/filter-test.bloomfilter"), false);
    DataOutputStream os = new DataOutputStream(new FileOutputStream("/tmp/filter-test.bloomfilter"));
    set.write(os);
    os.close();

    BytesBloomFilter set2 = new BytesBloomFilter();
    DataInputStream is = new DataInputStream(new FileInputStream("/tmp/filter-test.bloomfilter"));
    set2.readFields(is);

    assertTrue(set2.mayContain(arr1));
    assertTrue(set2.mayContain(arr2));

    for (byte i = 0; i < (byte) 125; i++) {
        assertTrue(set2.mayContain(new byte[] { i }));
    }

    //technically this could be an invalid statement, but the probability is low and this is a sanity check
    assertFalse(set2.mayContain(arr3));

}

From source file:LobFileStressTest.java

License:Apache License

private void writeIntegerFile(boolean compress) throws Exception {
    boolean passed = false;
    try {/*ww  w .jav a 2 s .  c  om*/
        System.out.print("Writing integers file. compress=" + compress + ". ");
        Path p = getPath(compress);
        FileSystem fs = FileSystem.getLocal(conf);
        if (fs.exists(p)) {
            fs.delete(p, false);
        }
        String codecName = compress ? "deflate" : null;
        LobFile.Writer w = LobFile.create(p, conf, false, codecName);

        int numRecords = getNumRecords(compress);
        for (int i = 0; i < numRecords; i++) {
            setLastRecordPos(w.tell(), compress);
            OutputStream os = w.writeBlobRecord(0);
            DataOutputStream dos = new DataOutputStream(os);
            dos.writeInt(i);
            dos.close();
            os.close();
        }

        w.close();
        System.out.println("PASS");
        passed = true;
    } finally {
        if (!passed) {
            allPassed = false;
            System.out.println("FAIL");
        }
    }
}

From source file:LobFileStressTest.java

License:Apache License

private void testBigFile(boolean compress) throws Exception {
    // Write a file containing 5 GB records.

    final int NUM_RECORDS = 5;
    boolean passed = false;

    try {//from w w w .j a v  a  2 s  . com
        System.out.print("Testing large file operations. compress=" + compress + ". ");

        Path p = getBigFilePath(compress);
        long[] startOffsets = new long[NUM_RECORDS];

        // Write the file. Five records, 5 GB a piece.
        System.out.print("Testing write. ");
        FileSystem fs = FileSystem.getLocal(conf);
        if (fs.exists(p)) {
            fs.delete(p, false);
        }
        String codecName = compress ? "deflate" : null;
        System.out.println("record size: " + LARGE_RECORD_LEN);
        LobFile.Writer w = LobFile.create(p, conf, false, codecName);
        for (int i = 0; i < NUM_RECORDS; i++) {
            startOffsets[i] = w.tell();
            System.out.println("Starting record " + i + " at " + startOffsets[i]);
            OutputStream os = w.writeBlobRecord(0);
            for (long v = 0; v < LARGE_RECORD_LEN; v++) {
                long byteVal = (((long) i) + v) & 0xFF;
                os.write((int) byteVal);
            }
            os.close();
        }
        w.close();
        System.out.println("PASS");

        // Iterate past three records, read the fourth.
        System.out.print("Testing iterated skipping. ");
        LobFile.Reader r = LobFile.open(p, conf);
        for (int i = 0; i < 4; i++) {
            r.next();
        }

        checkBigRecord(r, 3);
        System.out.println("PASS");

        // Seek directly to record 2, read it through.
        System.out.print("Testing large backward seek. ");
        r.seek(startOffsets[2]);
        r.next();
        checkBigRecord(r, 2);
        System.out.println("PASS");

        passed = true;
    } finally {
        if (!passed) {
            allPassed = false;
            System.out.println("FAIL");
        }
    }
}

From source file:Assignment3_P2_MergeStockAverageCount.StockPriceMergeDriver.java

/**
 * @param args the command line arguments
 *//*from w ww  .  j a  v a2  s .c o m*/
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
    Configuration conf = new Configuration();

    // local file system handle
    FileSystem local = FileSystem.getLocal(conf);

    // hdfs file system handle
    FileSystem hdfs = FileSystem.get(conf);

    // local input directory
    Path inputDir = new Path(args[0]);

    // hdfs i/p  directory
    Path inputDir1 = new Path(args[1]);

    // local input files in local dir
    FileStatus[] inputFiles = local.listStatus(inputDir);

    // o/p stream
    FSDataOutputStream out = hdfs.create(inputDir1);

    // open each file and extract contents of file
    for (int i = 0; i < inputFiles.length; i++) {
        System.out.println("File name ----------------------------------------------------------------> "
                + inputFiles[i].getPath().getName());
        FSDataInputStream in = local.open(inputFiles[i].getPath());
        byte buffer[] = new byte[256];
        int bytesRead = 0;

        // extract all contents of file
        while ((bytesRead = in.read(buffer)) > 0) {
            out.write(buffer, 0, bytesRead);
        }

        // close input stream
        in.close();
    }

    Job job = Job.getInstance(conf, "Average Stock Price");
    job.setJarByClass(StockPriceMergeDriver.class);
    job.setMapperClass(StockPriceMerge_Mapper.class);
    job.setCombinerClass(StockPriceMerge_Reducer.class);
    job.setReducerClass(StockPriceMerge_Reducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(FloatWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[1])); // above programs output will be input for mapper
    FileOutputFormat.setOutputPath(job, new Path(args[2]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:audr.text.utils.FileUtils.java

License:Open Source License

/**
 * hadoop/*from w w w . j  a  v  a2  s.  c  o m*/
 * 
 * @param localFile
 *            
 * @param hadoopFile
 *            hadoop
 * @return
 */
public static void uploadFile2HDFS(String localFile, String hadoopFile) {
    try {
        Configuration conf = new Configuration();
        FileSystem src = FileSystem.getLocal(conf);
        FileSystem dst = FileSystem.get(conf);
        Path srcpath = new Path(localFile);
        Path dstpath = new Path(hadoopFile);
        FileUtil.copy(src, srcpath, dst, dstpath, false, conf);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:backup.namenode.NameNodeBackupBlockCheckProcessor.java

License:Apache License

private Path getLocalSort(String name) throws IOException {
    Path sortDir = conf.getLocalPath(DFS_BACKUP_NAMENODE_LOCAL_DIR_KEY, name);
    LocalFileSystem local = FileSystem.getLocal(conf);
    sortDir = sortDir.makeQualified(local.getUri(), local.getWorkingDirectory());
    local.delete(sortDir, true);/*w  ww . j  a v a  2 s  . c  om*/
    return sortDir;
}

From source file:backup.store.ExternalExtendedBlockSort.java

License:Apache License

private synchronized void sortIfNeeded() throws IOException {
    for (String blockPoolId : writers.keySet()) {
        Path output = getOutputFilePath(blockPoolId);
        Path input = getInputFilePath(blockPoolId);
        FileSystem fileSystem = output.getFileSystem(conf);
        if (!fileSystem.exists(output) && fileSystem.exists(input)) {
            LocalFileSystem local = FileSystem.getLocal(conf);
            SequenceFile.Sorter sorter = new Sorter(local, ComparableBlock.class, dataClass, conf);
            sorter.sort(input, output);/*from  w ww . j av a 2  s  .  co m*/
        }
    }
}

From source file:cascading.flow.hadoop.util.HadoopUtil.java

License:Open Source License

public static Thread getHDFSShutdownHook() {
    Exception caughtException;/*from  w w  w . ja v  a2s. c o m*/

    try {
        // we must init the FS so the finalizer is registered
        FileSystem.getLocal(new JobConf());

        Field field = FileSystem.class.getDeclaredField("clientFinalizer");
        field.setAccessible(true);

        Thread finalizer = (Thread) field.get(null);

        if (finalizer != null)
            Runtime.getRuntime().removeShutdownHook(finalizer);

        return finalizer;
    } catch (NoSuchFieldException exception) {
        caughtException = exception;
    } catch (IllegalAccessException exception) {
        caughtException = exception;
    } catch (IOException exception) {
        caughtException = exception;
    }

    LOG.debug("unable to find and remove client hdfs shutdown hook, received exception: {}",
            caughtException.getClass().getName());

    return null;
}

From source file:cascading.flow.hadoop.util.HadoopUtil.java

License:Open Source License

public static LocalFileSystem getLocalFS(Configuration config) {
    try {//from  w  ww .j ava 2 s .c  o  m
        return FileSystem.getLocal(config);
    } catch (IOException exception) {
        throw new FlowException("unable to get handle to underlying filesystem", exception);
    }
}