Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:com.cloudera.sqoop.manager.NetezzaExportManualTest.java

License:Apache License

protected void createExportFile(ColumnGenerator... extraCols) throws IOException, SQLException {
    String ext = ".txt";

    Path tablePath = getTablePath();
    Path filePath = new Path(tablePath, "part0" + ext);

    Configuration conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }//  w w  w .ja  va  2 s .  c  o m
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(tablePath);
    OutputStream os = fs.create(filePath);

    BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
    for (int i = 0; i < 3; i++) {
        String line = getRecordLine(i, extraCols);
        w.write(line);
        LOG.debug("Create Export file - Writing line : " + line);
    }
    w.close();
    os.close();
}

From source file:com.cloudera.sqoop.mapreduce.TestImportJob.java

License:Apache License

public void testFailedImportDueToIOException() throws IOException {
    // Make sure that if a MapReduce job to do the import fails due
    // to an IOException, we tell the user about it.

    // Create a table to attempt to import.
    createTableForColType("VARCHAR(32)", "'meep'");

    Configuration conf = new Configuration();

    LogFactory.getLog(getClass()).info(" getWarehouseDir() " + getWarehouseDir());

    // Make the output dir exist so we know the job will fail via IOException.
    Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.mkdirs(outputPath);

    assertTrue(fs.exists(outputPath));/*  ww w  .jav  a  2s  .c  o  m*/

    String[] argv = getArgv(true, new String[] { "DATA_COL0" }, conf);

    Sqoop importer = new Sqoop(new ImportTool());
    try {
        int ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected ImportException running this job.", 1 == ret);
    } catch (Exception e) {
        // In debug mode, IOException is wrapped in RuntimeException.
        LOG.info("Got exceptional return (expected: ok). msg is: " + e);
    }
}

From source file:com.cloudera.sqoop.mapreduce.TestImportJob.java

License:Apache License

public void testFailedNoColumns() throws IOException {
    // Make sure that if a MapReduce job to do the import fails due
    // to an IOException, we tell the user about it.

    // Create a table to attempt to import.
    createTableForColType("VARCHAR(32)", "'meep'");

    Configuration conf = new Configuration();

    // Make the output dir exist so we know the job will fail via IOException.
    Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.mkdirs(outputPath);
    assertTrue(fs.exists(outputPath));// w  w  w  .j  a v  a2 s .  c  o m

    String[] argv = getArgv(true, new String[] { "" }, conf);

    Sqoop importer = new Sqoop(new ImportTool());
    try {
        int ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected job to fail due to no colnames.", 1 == ret);
    } catch (Exception e) {
        // In debug mode, IOException is wrapped in RuntimeException.
        LOG.info("Got exceptional return (expected: ok). msg is: " + e);
    }
}

From source file:com.cloudera.sqoop.mapreduce.TestImportJob.java

License:Apache License

public void testFailedIllegalColumns() throws IOException {
    // Make sure that if a MapReduce job to do the import fails due
    // to an IOException, we tell the user about it.

    // Create a table to attempt to import.
    createTableForColType("VARCHAR(32)", "'meep'");

    Configuration conf = new Configuration();

    // Make the output dir exist so we know the job will fail via IOException.
    Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.mkdirs(outputPath);

    assertTrue(fs.exists(outputPath));//from  ww  w .  j  a v a 2 s . c  om

    // DATA_COL0 ok, by zyzzyva not good
    String[] argv = getArgv(true, new String[] { "DATA_COL0", "zyzzyva" }, conf);

    Sqoop importer = new Sqoop(new ImportTool());
    try {
        int ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected job to fail due bad colname.", 1 == ret);
    } catch (Exception e) {
        // In debug mode, IOException is wrapped in RuntimeException.
        LOG.info("Got exceptional return (expected: ok). msg is: " + e);
    }
}

From source file:com.cloudera.sqoop.mapreduce.TestImportJob.java

License:Apache License

public void testDuplicateColumns() throws IOException {
    // Make sure that if a MapReduce job to do the import fails due
    // to an IOException, we tell the user about it.

    // Create a table to attempt to import.
    createTableForColType("VARCHAR(32)", "'meep'");

    Configuration conf = new Configuration();

    // Make the output dir exist so we know the job will fail via IOException.
    Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.mkdirs(outputPath);
    assertTrue(fs.exists(outputPath));//from ww  w . ja  v  a 2s.c  om

    String[] argv = getArgv(true, new String[] { "DATA_COL0,DATA_COL0" }, conf);

    Sqoop importer = new Sqoop(new ImportTool());
    try {
        int ret = Sqoop.runSqoop(importer, argv);
        assertTrue("Expected job to fail!", 1 == ret);
    } catch (Exception e) {
        // In debug mode, ImportException is wrapped in RuntimeException.
        LOG.info("Got exceptional return (expected: ok). msg is: " + e);
    }
}

From source file:com.cloudera.sqoop.TestAvroExport.java

License:Apache License

/**
 * Create a data file that gets exported to the db.
 * @param fileNum the number of the file (for multi-file export)
 * @param numRecords how many records to write to the file.
 *//*w  ww  . j  a v  a 2 s .co m*/
protected void createAvroFile(int fileNum, int numRecords, ColumnGenerator... extraCols) throws IOException {

    Path tablePath = getTablePath();
    Path filePath = new Path(tablePath, "part" + fileNum);

    Configuration conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(tablePath);
    OutputStream os = fs.create(filePath);

    Schema schema = buildAvroSchema(extraCols);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>();
    DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<GenericRecord>(datumWriter);
    dataFileWriter.create(schema, os);

    for (int i = 0; i < numRecords; i++) {
        GenericRecord record = new GenericData.Record(schema);
        record.put("id", i);
        record.put("msg", getMsgPrefix() + i);
        addExtraColumns(record, i, extraCols);
        dataFileWriter.append(record);
    }

    dataFileWriter.close();
    os.close();
}

From source file:com.cloudera.sqoop.TestExport.java

License:Apache License

/**
 * Create a data file that gets exported to the db.
 * @param fileNum the number of the file (for multi-file export)
 * @param numRecords how many records to write to the file.
 * @param gzip is true if the file should be gzipped.
 *//*  www.  j  a  v a 2 s .  c om*/
protected void createTextFile(int fileNum, int numRecords, boolean gzip, ColumnGenerator... extraCols)
        throws IOException {
    int startId = fileNum * numRecords;

    String ext = ".txt";
    if (gzip) {
        ext = ext + ".gz";
    }
    Path tablePath = getTablePath();
    Path filePath = new Path(tablePath, "part" + fileNum + ext);

    Configuration conf = new Configuration();
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    FileSystem fs = FileSystem.get(conf);
    fs.mkdirs(tablePath);
    OutputStream os = fs.create(filePath);
    if (gzip) {
        CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
        CompressionCodec codec = ccf.getCodec(filePath);
        os = codec.createOutputStream(os);
    }
    BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
    for (int i = 0; i < numRecords; i++) {
        w.write(getRecordLine(startId + i, extraCols));
    }
    w.close();
    os.close();

    if (gzip) {
        verifyCompressedFile(filePath, numRecords);
    }
}

From source file:com.cloudera.sqoop.TestExport.java

License:Apache License

/**
 * Create a data file in SequenceFile format that gets exported to the db.
 * @param fileNum the number of the file (for multi-file export).
 * @param numRecords how many records to write to the file.
 * @param className the table class name to instantiate and populate
 *          for each record./*www. ja va2 s  .c  o m*/
 */
private void createSequenceFile(int fileNum, int numRecords, String className) throws IOException {

    try {
        // Instantiate the value record object via reflection.
        Class cls = Class.forName(className, true, Thread.currentThread().getContextClassLoader());
        SqoopRecord record = (SqoopRecord) ReflectionUtils.newInstance(cls, new Configuration());

        // Create the SequenceFile.
        Configuration conf = new Configuration();
        if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
            conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
        }
        FileSystem fs = FileSystem.get(conf);
        Path tablePath = getTablePath();
        Path filePath = new Path(tablePath, "part" + fileNum);
        fs.mkdirs(tablePath);
        SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, filePath, LongWritable.class, cls);

        // Now write the data.
        int startId = fileNum * numRecords;
        for (int i = 0; i < numRecords; i++) {
            record.parse(getRecordLine(startId + i));
            w.append(new LongWritable(startId + i), record);
        }

        w.close();
    } catch (ClassNotFoundException cnfe) {
        throw new IOException(cnfe);
    } catch (RecordParser.ParseError pe) {
        throw new IOException(pe);
    }
}

From source file:com.cloudera.sqoop.TestTargetDir.java

License:Apache License

/** test target-dir breaks if already existing
 * (only allowed in append mode). */
public void testExistingTargetDir() throws IOException {

    try {/* w w  w.  ja v a2  s.  com*/
        String targetDir = getWarehouseDir() + "/tempTargetDir";

        ArrayList args = getOutputArgv(true);
        args.add("--target-dir");
        args.add(targetDir);

        // delete target-dir if exists and recreate it
        FileSystem fs = FileSystem.get(getConf());
        Path outputPath = new Path(targetDir);
        if (!fs.exists(outputPath)) {
            fs.mkdirs(outputPath);
        }

        String[] argv = (String[]) args.toArray(new String[0]);
        runImport(argv);

        fail("Existing target-dir run without problem report");

    } catch (IOException e) {
        // expected
    }
}

From source file:com.cloudera.sqoop.util.AppendUtils.java

License:Apache License

/**
 * Moves the imported files from temporary directory to specified target-dir,
 * renaming partition number if appending file exists.
 *///from  w ww  .jav  a  2s. c om
public void append() throws IOException {

    SqoopOptions options = context.getOptions();
    FileSystem fs = FileSystem.get(options.getConf());
    Path tempDir = context.getDestination();

    // Try in this order: target-dir or warehouse-dir
    Path userDestDir = null;
    if (options.getTargetDir() != null) {
        userDestDir = new Path(options.getTargetDir());
    } else if (options.getWarehouseDir() != null) {
        userDestDir = new Path(options.getWarehouseDir(), context.getTableName());
    } else {
        userDestDir = new Path(context.getTableName());
    }

    int nextPartition = 0;

    if (!fs.exists(tempDir)) {
        // This occurs if there was no source (tmp) dir. This might happen
        // if the import was an HBase-target import, but the user specified
        // --append anyway. This is a warning, not an error.
        LOG.warn("Cannot append files to target dir; no such directory: " + tempDir);
        return;
    }

    // Create target directory.
    if (!fs.exists(userDestDir)) {
        LOG.info("Creating missing output directory - " + userDestDir.getName());
        fs.mkdirs(userDestDir);
        nextPartition = 0;
    } else {
        LOG.info("Appending to directory " + userDestDir.getName());
        // Get the right next partition for the imported files
        nextPartition = getNextPartition(fs, userDestDir);
    }

    // move files
    moveFiles(fs, tempDir, userDestDir, nextPartition);

    // delete temporary path
    LOG.debug("Deleting temporary folder " + tempDir.getName());
    fs.delete(tempDir, true);
}