Example usage for org.apache.hadoop.fs FileSystem getLocal

List of usage examples for org.apache.hadoop.fs FileSystem getLocal

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getLocal.

Prototype

public static LocalFileSystem getLocal(Configuration conf) throws IOException 

Source Link

Document

Get the local FileSystem.

Usage

From source file:com.cloudera.science.avrostarter.MainTest.java

License:Open Source License

@Test
public void testMain() throws Exception {
    Main m = new Main(FileSystem.getLocal(new Configuration()));
    m.run(new String[] { "10", tmpFile.getAbsolutePath() });
}

From source file:com.cloudera.spark.bulkload.TotalOrderPartitioner.java

License:Apache License

/**
   * Read in the partition file and build indexing data structures.
   * If the keytype is {@link BinaryComparable} and
   * <tt>total.order.partitioner.natural.order</tt> is not false, a trie
   * of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
   * will be built. Otherwise, keys will be located using a binary search of
   * the partition keyset using the {@link RawComparator}
   * defined for this job. The input file must be sorted with the same
   * comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
   *//*from   ww w  . java  2  s  .  c  om*/
  @SuppressWarnings("unchecked") // keytype from conf not static
  public void setConf(Configuration conf) {
      try {
          this.conf = conf;
          String parts = getPartitionFile(conf);
          final Path partFile = new Path(parts);
          final FileSystem fs = (DEFAULT_PATH.equals(parts)) ? FileSystem.getLocal(conf) // assume in DistributedCache
                  : partFile.getFileSystem(conf);

          Job job = new Job(conf);
          Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
          K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
          if (splitPoints.length != job.getNumReduceTasks() - 1) {
              throw new IOException("Wrong number of partitions in keyset");
          }
          RawComparator<K> comparator = (RawComparator<K>) job.getSortComparator();
          for (int i = 0; i < splitPoints.length - 1; ++i) {
              if (comparator.compare(splitPoints[i], splitPoints[i + 1]) >= 0) {
                  throw new IOException("Split points are out of order");
              }
          }
          boolean natOrder = conf.getBoolean(NATURAL_ORDER, true);
          if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
              partitions = buildTrie((BinaryComparable[]) splitPoints, 0, splitPoints.length, new byte[0],
                      // Now that blocks of identical splitless trie nodes are 
                      // represented reentrantly, and we develop a leaf for any trie
                      // node with only one split point, the only reason for a depth
                      // limit is to refute stack overflow or bloat in the pathological
                      // case where the split points are long and mostly look like bytes 
                      // iii...iixii...iii   .  Therefore, we make the default depth
                      // limit large but not huge.
                      conf.getInt(MAX_TRIE_DEPTH, 200));
          } else {
              partitions = new BinarySearchNode(splitPoints, comparator);
          }
      } catch (IOException e) {
          throw new IllegalArgumentException("Can't read partitions file", e);
      }
  }

From source file:com.cloudera.sqoop.hive.TestHiveImport.java

License:Apache License

/** Test that we can generate a file containing the DDL and not import. */
@Test/*from  ww w . j  av a  2 s.  co  m*/
public void testGenerateOnly() throws IOException {
    final String TABLE_NAME = "GenerateOnly";
    setCurTableName(TABLE_NAME);
    setNumCols(1);

    // Figure out where our target generated .q file is going to be.
    SqoopOptions options = getSqoopOptions(getArgv(false, null), new ImportTool());
    Path ddlFile = new Path(new Path(options.getCodeOutputDir()), TABLE_NAME + ".q");
    FileSystem fs = FileSystem.getLocal(new Configuration());

    // If it's already there, remove it before running the test to ensure
    // that it's the current test that generated the file.
    if (fs.exists(ddlFile)) {
        if (!fs.delete(ddlFile, false)) {
            LOG.warn("Could not delete previous ddl file: " + ddlFile);
        }
    }

    // Run a basic import, but specify that we're just generating definitions.
    String[] types = { "INTEGER" };
    String[] vals = { "42" };
    runImportTest(TABLE_NAME, types, vals, null, getCodeGenArgs(), new CodeGenTool());

    // Test that the generated definition file exists.
    assertTrue("Couldn't find expected ddl file", fs.exists(ddlFile));

    Path hiveImportPath = new Path(new Path(options.getWarehouseDir()), TABLE_NAME);
    assertFalse("Import actually happened!", fs.exists(hiveImportPath));
}

From source file:com.cloudera.sqoop.io.TestLobFile.java

License:Apache License

public void setUp() throws Exception {
    conf = new Configuration();
    conf.set("fs.default.name", "file:///");

    fs = FileSystem.getLocal(conf);
    fs.mkdirs(TEMP_BASE_DIR);/*from   ww w  .j  av a 2 s  .  c om*/
}

From source file:com.cloudera.sqoop.io.TestSplittableBufferedWriter.java

License:Apache License

/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it./*from  w ww .  jav a  2s  .c  om*/
 */
private void ensureEmptyWriteDir() throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    Path writeDir = getWritePath();

    fs.mkdirs(writeDir);

    FileStatus[] stats = fs.listStatus(writeDir);

    for (FileStatus stat : stats) {
        if (stat.isDir()) {
            fail("setUp(): Write directory " + writeDir + " contains subdirectories");
        }

        LOG.debug("setUp(): Removing " + stat.getPath());
        if (!fs.delete(stat.getPath(), false)) {
            fail("setUp(): Could not delete residual file " + stat.getPath());
        }
    }

    if (!fs.exists(writeDir)) {
        fail("setUp: Could not create " + writeDir);
    }
}

From source file:com.cloudera.sqoop.io.TestSplittableBufferedWriter.java

License:Apache License

private void verifyFileExists(Path p) throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    assertTrue("File not found: " + p, fs.exists(p));
}

From source file:com.cloudera.sqoop.io.TestSplittableBufferedWriter.java

License:Apache License

private void verifyFileDoesNotExist(Path p) throws IOException {
    FileSystem fs = FileSystem.getLocal(getConf());
    assertFalse("File found: " + p + " and we did not expect it", fs.exists(p));
}

From source file:com.cloudera.sqoop.lib.TestBlobRef.java

License:Apache License

public void testExternalSubdir() throws IOException {
    final byte[] DATA = { 1, 2, 3, 4, 5 };
    final String FILENAME = "_lob/blobdata";

    try {//from w  w w.  j a v a2  s. c  om
        doExternalTest(DATA, FILENAME);
    } finally {
        // remove dir we made.
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.getLocal(conf);
        String tmpDir = System.getProperty("test.build.data", "/tmp/");
        Path lobDir = new Path(new Path(tmpDir), "_lob");
        fs.delete(lobDir, true);
    }
}

From source file:com.cloudera.sqoop.lib.TestClobRef.java

License:Apache License

public void testExternalSubdir() throws IOException {
    final String DATA = "This is the clob data!";
    final String FILENAME = "_lob/clobdata";

    try {//from   w  ww .  j a v  a  2  s  . c om
        doExternalTest(DATA, FILENAME);
    } finally {
        // remove dir we made.
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.getLocal(conf);
        String tmpDir = System.getProperty("test.build.data", "/tmp/");
        Path lobDir = new Path(new Path(tmpDir), "_lob");
        fs.delete(lobDir, true);
    }
}

From source file:com.cloudera.sqoop.mapreduce.db.TestDataDrivenDBInputFormat.java

License:Apache License

public void testDateSplits() throws Exception {
    Statement s = connection.createStatement();
    final String DATE_TABLE = "datetable";
    final String COL = "foo";
    try {/*from w w  w.j  av  a2  s. co m*/
        try {
            // delete the table if it already exists.
            s.executeUpdate("DROP TABLE " + DATE_TABLE);
        } catch (SQLException e) {
            // Ignored; proceed regardless of whether we deleted the table;
            // it may have simply not existed.
        }

        // Create the table.
        s.executeUpdate("CREATE TABLE " + DATE_TABLE + "(" + COL + " TIMESTAMP)");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-01')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-02')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-05-01')");
        s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2011-04-01')");

        // commit this tx.
        connection.commit();

        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "file:///");
        FileSystem fs = FileSystem.getLocal(conf);
        fs.delete(new Path(OUT_DIR), true);

        // now do a dd import
        Job job = new Job(conf);
        job.setMapperClass(ValMapper.class);
        job.setReducerClass(Reducer.class);
        job.setMapOutputKeyClass(DateCol.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setOutputKeyClass(DateCol.class);
        job.setOutputValueClass(NullWritable.class);
        job.setNumReduceTasks(1);
        job.getConfiguration().setInt("mapreduce.map.tasks", 2);
        FileOutputFormat.setOutputPath(job, new Path(OUT_DIR));
        DBConfiguration.configureDB(job.getConfiguration(), DRIVER_CLASS, DB_URL, (String) null, (String) null);
        DataDrivenDBInputFormat.setInput(job, DateCol.class, DATE_TABLE, null, COL, COL);

        boolean ret = job.waitForCompletion(true);
        assertTrue("job failed", ret);

        // Check to see that we imported as much as we thought we did.
        assertEquals("Did not get all the records", 4, job.getCounters()
                .findCounter("org.apache.hadoop.mapred.Task$Counter", "REDUCE_OUTPUT_RECORDS").getValue());
    } finally {
        s.close();
    }
}