Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:gaffer.accumulostore.integration.AddElementsFromHdfsIT.java

License:Apache License

@Test
public void shouldThrowExceptionWhenAddElementsFromHdfsWhenFailureDirectoryContainsFiles() throws Exception {
    final FileSystem fs = FileSystem.getLocal(createLocalConf());
    fs.mkdirs(new Path(failureDir));
    try (final BufferedWriter writer = new BufferedWriter(
            new OutputStreamWriter(fs.create(new Path(failureDir + "/someFile.txt"), true)))) {
        writer.write("Some content");
    }/*from   w w w  .j a va  2s. co m*/

    try {
        addElementsFromHdfs(ByteEntityKeyPackage.class);
        fail("Exception expected");
    } catch (final OperationException e) {
        assertEquals("Failure directory is not empty: " + failureDir, e.getCause().getMessage());
    }

    try {
        addElementsFromHdfs(ClassicKeyPackage.class);
        fail("Exception expected");
    } catch (final OperationException e) {
        assertEquals("Failure directory is not empty: " + failureDir, e.getCause().getMessage());
    }
}

From source file:gaffer.accumulostore.integration.AddElementsFromHdfsIT.java

License:Apache License

private void createInputFile() throws IOException, StoreException {
    final Path inputPath = new Path(inputDir);
    final Path inputFilePath = new Path(inputDir + "/file.txt");
    final FileSystem fs = FileSystem.getLocal(createLocalConf());
    fs.mkdirs(inputPath);

    try (final BufferedWriter writer = new BufferedWriter(
            new OutputStreamWriter(fs.create(inputFilePath, true)))) {
        for (int i = 0; i < NUM_ENTITIES; i++) {
            writer.write(TestGroups.ENTITY + "," + VERTEX_ID_PREFIX + i + "\n");
        }/*from  w  w  w  .  j a  va 2  s.com*/
    }
}

From source file:gaffer.accumulostore.operation.handler.tool.ImportElementsToAccumulo.java

License:Apache License

@Override
public int run(final String[] strings) throws Exception {
    // Hadoop configuration
    final Configuration conf = getConf();
    final FileSystem fs = FileSystem.get(conf);

    // Make the failure directory
    fs.mkdirs(operation.getFailurePath());
    fs.setPermission(operation.getFailurePath(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    // Remove the _SUCCESS file to prevent warning in accumulo
    fs.delete(new Path(operation.getOutputPath().toString() + "/_SUCCESS"), false);

    // Set all permissions
    IngestUtils.setDirectoryPermsForAccumulo(fs, operation.getOutputPath());

    // Import the files
    connector.tableOperations().importDirectory(table, operation.getOutputPath().toString(),
            operation.getFailurePath().toString(), false);

    // Delete the temporary directories
    fs.delete(operation.getFailurePath(), true);

    return SUCCESS_RESPONSE;
}

From source file:gaffer.accumulostore.operation.hdfs.handler.job.tool.FetchElementsFromHdfsTool.java

License:Apache License

private void checkHdfsDirectories(final AddElementsFromHdfs operation) throws IOException {
    LOGGER.info("Checking that the correct HDFS directories exist");
    final FileSystem fs = FileSystem.get(getConf());

    final Path outputPath = new Path(operation.getOutputPath());
    LOGGER.info("Ensuring output directory {} doesn't exist", outputPath);
    if (fs.exists(outputPath)) {
        if (fs.listFiles(outputPath, true).hasNext()) {
            LOGGER.error("Output directory exists and is not empty: {}", outputPath);
            throw new IllegalArgumentException("Output directory exists and is not empty: " + outputPath);
        }//from w ww  .  j  ava2  s  .  co m
        LOGGER.info("Output directory exists and is empty so deleting: {}", outputPath);
        fs.delete(outputPath, true);
    }

    final Path failurePath = new Path(operation.getFailurePath());
    LOGGER.info("Ensuring failure directory {} exists", failurePath);
    if (fs.exists(failurePath)) {
        if (fs.listFiles(failurePath, true).hasNext()) {
            LOGGER.error("Failure directory exists and is not empty: {}", failurePath);
            throw new IllegalArgumentException("Failure directory is not empty: " + failurePath);
        }
    } else {
        LOGGER.info("Failure directory doesn't exist so creating: {}", failurePath);
        fs.mkdirs(failurePath);
    }
    IngestUtils.setDirectoryPermsForAccumulo(fs, failurePath);
}

From source file:gaffer.accumulostore.operation.hdfs.handler.tool.ImportElementsToAccumulo.java

License:Apache License

@Override
public int run(final String[] strings) throws Exception {
    // Hadoop configuration
    final Configuration conf = getConf();
    final FileSystem fs = FileSystem.get(conf);

    // Make the failure directory
    fs.mkdirs(operation.getFailurePath());
    fs.setPermission(operation.getFailurePath(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    // Remove the _SUCCESS file to prevent warning in accumulo
    fs.delete(new Path(operation.getOutputPath().toString() + "/_SUCCESS"), false);

    // Set all permissions
    IngestUtils.setDirectoryPermsForAccumulo(fs, operation.getOutputPath());

    // Import the files
    connector.tableOperations().importDirectory(table, operation.getOutputPath().toString(),
            operation.getFailurePath().toString(), false);

    return SUCCESS_RESPONSE;
}

From source file:gobblin.compaction.HdfsWriter.java

License:Open Source License

public static void moveSelectFiles(String extension, String source, String destination) throws IOException {
    FileSystem fs = getFileSystem();
    fs.mkdirs(new Path(destination));
    //RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(new Path(source), false);
    FileStatus[] fileStatuses = fs.listStatus(new Path(source));
    for (FileStatus fileStatus : fileStatuses) {
        Path path = fileStatus.getPath();
        if (!fileStatus.isDir() && path.toString().toLowerCase().endsWith(extension.toLowerCase())) {
            FileUtil.copy(fs, path, fs, new Path(destination), false, true, getConfiguration());
        }/*from   ww w.j  ava2 s .c o m*/
    }
}

From source file:gobblin.compaction.hive.HdfsWriter.java

License:Apache License

public static void moveSelectFiles(String extension, String source, String destination) throws IOException {
    FileSystem fs = getFileSystem();
    fs.mkdirs(new Path(destination));
    FileStatus[] fileStatuses = fs.listStatus(new Path(source));
    for (FileStatus fileStatus : fileStatuses) {
        Path path = fileStatus.getPath();
        if (!fileStatus.isDirectory() && path.toString().toLowerCase().endsWith(extension.toLowerCase())) {
            HadoopUtils.deleteIfExists(fs, new Path(destination), true);
            HadoopUtils.copyPath(fs, path, fs, new Path(destination), getConfiguration());
        }// ww  w  .  j av  a2s. c om
    }
}

From source file:gobblin.compaction.mapreduce.conditions.RecompactionConditionTest.java

License:Apache License

@Test
public void testRecompactionConditionBasedOnFileCount() {
    try {//from   w  w  w  . ja  v  a 2 s .  com
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);
        fs.delete(outputLatePath, true);
        fs.mkdirs(outputLatePath);
        RecompactionConditionFactory factory = new RecompactionConditionBasedOnFileCount.Factory();
        RecompactionCondition conditionBasedOnFileCount = factory.createRecompactionCondition(dataset);
        DatasetHelper helper = new DatasetHelper(dataset, fs, Lists.newArrayList("avro"));

        fs.createNewFile(new Path(outputLatePath, new Path("1.avro")));
        fs.createNewFile(new Path(outputLatePath, new Path("2.avro")));
        Assert.assertEquals(conditionBasedOnFileCount.isRecompactionNeeded(helper), false);

        fs.createNewFile(new Path(outputLatePath, new Path("3.avro")));
        Assert.assertEquals(conditionBasedOnFileCount.isRecompactionNeeded(helper), true);

        fs.delete(outputLatePath, true);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:gobblin.compaction.mapreduce.MRCompactorJobRunnerFilenameRecordCountProviderTest.java

License:Apache License

@Test
public void testFileNameRecordCountProvider() throws IOException {
    String originalFilename = "test.123.avro";
    String suffixPattern = Pattern.quote(".late") + "[\\d]*";

    Path testDir = new Path("/tmp/compactorFilenameRecordCountProviderTest");
    FileSystem fs = FileSystem.getLocal(new Configuration());
    try {//from   w w  w .  j  ava 2  s . c o  m
        if (fs.exists(testDir)) {
            fs.delete(testDir, true);
        }
        fs.mkdirs(testDir);

        RecordCountProvider originFileNameFormat = new IngestionRecordCountProvider();

        LateFileRecordCountProvider lateFileRecordCountProvider = new LateFileRecordCountProvider(
                originFileNameFormat);

        Path firstOutput = lateFileRecordCountProvider.constructLateFilePath(originalFilename, fs, testDir);

        Assert.assertEquals(new Path(testDir, originalFilename), firstOutput);
        Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(firstOutput));

        fs.create(firstOutput);
        Pattern pattern1 = Pattern.compile(
                Pattern.quote(Files.getNameWithoutExtension(originalFilename)) + suffixPattern + "\\.avro");
        Path secondOutput = lateFileRecordCountProvider.constructLateFilePath(firstOutput.getName(), fs,
                testDir);
        Assert.assertEquals(testDir, secondOutput.getParent());
        Assert.assertTrue(pattern1.matcher(secondOutput.getName()).matches());
        Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(secondOutput));

        fs.create(secondOutput);
        Pattern pattern2 = Pattern.compile(
                Files.getNameWithoutExtension(originalFilename) + suffixPattern + suffixPattern + "\\.avro");
        Path thirdOutput = lateFileRecordCountProvider.constructLateFilePath(secondOutput.getName(), fs,
                testDir);
        Assert.assertEquals(testDir, thirdOutput.getParent());
        Assert.assertTrue(pattern2.matcher(thirdOutput.getName()).matches());
        Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(thirdOutput));
    } finally {
        fs.delete(testDir, true);
    }
}

From source file:gobblin.config.store.hdfs.SimpleHdfsConfigureStoreFactoryTest.java

License:Apache License

@Test
public void testGetDefaults() throws URISyntaxException, ConfigStoreCreationException, IOException {
    Path configStoreDir = new Path(SimpleHDFSConfigStore.CONFIG_STORE_NAME);
    FileSystem localFS = FileSystem.getLocal(new Configuration());

    try {/*from  w  ww.ja  v a 2  s.  c o m*/
        Assert.assertTrue(localFS.mkdirs(configStoreDir));

        SimpleLocalHDFSConfigStoreFactory simpleLocalHDFSConfigStoreFactory = new SimpleLocalHDFSConfigStoreFactory();

        URI configKey = new URI(simpleLocalHDFSConfigStoreFactory.getScheme(), "", "", "", "");
        SimpleHDFSConfigStore simpleHDFSConfigStore = simpleLocalHDFSConfigStoreFactory
                .createConfigStore(configKey);

        Assert.assertEquals(simpleHDFSConfigStore.getStoreURI().getScheme(),
                simpleLocalHDFSConfigStoreFactory.getScheme());
        Assert.assertNull(simpleHDFSConfigStore.getStoreURI().getAuthority());
        Assert.assertEquals(simpleHDFSConfigStore.getStoreURI().getPath(), System.getProperty("user.dir"));
    } finally {
        localFS.delete(configStoreDir, true);
    }
}