Example usage for org.apache.hadoop.fs.permission FsPermission getDirDefault

List of usage examples for org.apache.hadoop.fs.permission FsPermission getDirDefault

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsPermission getDirDefault.

Prototype

public static FsPermission getDirDefault() 

Source Link

Document

Get the default permission for directory.

Usage

From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize,
        short replication, long blockSize, Progressable progress) throws IOException {
    CrailFile fileInfo = null;/*  ww  w  .  j  a v  a 2s.  c om*/
    try {
        fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                CrailLocationClass.PARENT).get().asFile();
    } catch (Exception e) {
        if (e.getMessage().contains(RpcErrors.messages[RpcErrors.ERR_PARENT_MISSING])) {
            fileInfo = null;
        } else {
            throw new IOException(e);
        }
    }

    if (fileInfo == null) {
        Path parent = path.getParent();
        this.mkdirs(parent, FsPermission.getDirDefault());
        try {
            fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                    CrailLocationClass.PARENT).get().asFile();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    CrailBufferedOutputStream outputStream = null;
    if (fileInfo != null) {
        try {
            fileInfo.syncDir();
            outputStream = fileInfo.getBufferedOutputStream(Integer.MAX_VALUE);
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    if (outputStream != null) {
        return new CrailHDFSOutputStream(outputStream, statistics);
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }
}

From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java

License:Apache License

@Override
public FileStatus[] listStatus(Path path) throws FileNotFoundException, IOException {
    try {/*from   w w  w. j a va  2s. c o  m*/
        CrailNode node = dfs.lookup(path.toUri().getRawPath()).get();
        Iterator<String> iter = node.getType() == CrailNodeType.DIRECTORY ? node.asDirectory().listEntries()
                : node.asMultiFile().listEntries();
        ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
        while (iter.hasNext()) {
            String filepath = iter.next();
            CrailNode directFile = dfs.lookup(filepath).get();
            if (directFile != null) {
                FsPermission permission = FsPermission.getFileDefault();
                if (directFile.getType().isDirectory()) {
                    permission = FsPermission.getDirDefault();
                }
                FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(),
                        CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE,
                        directFile.getModificationTime(), directFile.getModificationTime(), permission,
                        CrailConstants.USER, CrailConstants.USER,
                        new Path(filepath).makeQualified(this.getUri(), this.workingDir));
                statusList.add(status);
            }
        }
        FileStatus[] list = new FileStatus[statusList.size()];
        statusList.toArray(list);
        return list;
    } catch (Exception e) {
        throw new FileNotFoundException(path.toUri().getRawPath());
    }
}

From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java

License:Apache License

@Override
public FileStatus getFileStatus(Path path) throws IOException {
    CrailNode directFile = null;//from w w  w. j av a 2  s.c  o m
    try {
        directFile = dfs.lookup(path.toUri().getRawPath()).get();
    } catch (Exception e) {
        throw new IOException(e);
    }
    if (directFile == null) {
        throw new FileNotFoundException("File does not exist: " + path);
    }
    FsPermission permission = FsPermission.getFileDefault();
    if (directFile.getType().isDirectory()) {
        permission = FsPermission.getDirDefault();
    }
    FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(),
            CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(),
            directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER,
            path.makeQualified(this.getUri(), this.workingDir));
    return status;
}

From source file:com.ibm.crail.hdfs.CrailHDFS.java

License:Apache License

@Override
public FSDataOutputStream createInternal(Path path, EnumSet<CreateFlag> flag, FsPermission absolutePermission,
        int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt,
        boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
        ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException {
    CrailFile fileInfo = null;//from  w  ww.  j a  va  2s  . c  om
    try {
        fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                CrailLocationClass.PARENT).get().asFile();
    } catch (Exception e) {
        if (e.getMessage().contains(RpcErrors.messages[RpcErrors.ERR_PARENT_MISSING])) {
            fileInfo = null;
        } else {
            throw new IOException(e);
        }
    }

    if (fileInfo == null) {
        Path parent = path.getParent();
        this.mkdir(parent, FsPermission.getDirDefault(), true);
        try {
            fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                    CrailLocationClass.PARENT).get().asFile();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    CrailBufferedOutputStream outputStream = null;
    if (fileInfo != null) {
        try {
            fileInfo.syncDir();
            outputStream = fileInfo.getBufferedOutputStream(Integer.MAX_VALUE);
        } catch (Exception e) {
            throw new IOException(e);
        }
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }

    if (outputStream != null) {
        return new CrailHDFSOutputStream(outputStream, statistics);
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }
}

From source file:com.ibm.crail.hdfs.CrailHDFS.java

License:Apache License

@Override
public FileStatus getFileStatus(Path path)
        throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    CrailNode directFile = null;// w  w w . j  a  v a 2  s.com
    try {
        directFile = dfs.lookup(path.toUri().getRawPath()).get();
    } catch (Exception e) {
        throw new IOException(e);
    }
    if (directFile == null) {
        throw new FileNotFoundException("filename " + path);
    }

    FsPermission permission = FsPermission.getFileDefault();
    if (directFile.getType().isDirectory()) {
        permission = FsPermission.getDirDefault();
    }
    FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(),
            CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(),
            directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER,
            path.makeQualified(this.getUri(), this.workingDir));
    return status;
}

From source file:com.ibm.crail.hdfs.CrailHDFS.java

License:Apache License

@Override
public FileStatus[] listStatus(Path path)
        throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    try {//from ww w.  j a v a 2  s  . c  om
        CrailNode node = dfs.lookup(path.toUri().getRawPath()).get();
        Iterator<String> iter = node.getType() == CrailNodeType.DIRECTORY ? node.asDirectory().listEntries()
                : node.asMultiFile().listEntries();
        ArrayList<FileStatus> statusList = new ArrayList<FileStatus>();
        while (iter.hasNext()) {
            String filepath = iter.next();
            CrailNode directFile = dfs.lookup(filepath).get();
            if (directFile != null) {
                FsPermission permission = FsPermission.getFileDefault();
                if (directFile.getType().isDirectory()) {
                    permission = FsPermission.getDirDefault();
                }
                FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(),
                        CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE,
                        directFile.getModificationTime(), directFile.getModificationTime(), permission,
                        CrailConstants.USER, CrailConstants.USER,
                        new Path(filepath).makeQualified(this.getUri(), workingDir));
                statusList.add(status);
            }
        }
        FileStatus[] list = new FileStatus[statusList.size()];
        statusList.toArray(list);
        return list;
    } catch (Exception e) {
        throw new FileNotFoundException(path.toUri().getRawPath());
    }
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestMockHdfsStorageSystem.java

License:Apache License

@Test
public void test_basic_secondaryBuffers()
        throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
        ParentNotDirectoryException, UnsupportedFileSystemException, IllegalArgumentException, IOException {
    // 0) Setup/*  ww w  .jav  a2 s.c om*/
    final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator;

    final GlobalPropertiesBean globals = BeanTemplateUtils.build(GlobalPropertiesBean.class)
            .with(GlobalPropertiesBean::local_yarn_config_dir, temp_dir)
            .with(GlobalPropertiesBean::distributed_root_dir, temp_dir)
            .with(GlobalPropertiesBean::local_root_dir, temp_dir)
            .with(GlobalPropertiesBean::distributed_root_dir, temp_dir).done().get();

    final MockHdfsStorageService storage_service = new MockHdfsStorageService(globals);

    // Some buckets

    final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
            .with(DataBucketBean::full_name, "/test/storage/bucket")
            .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class).done().get())
            .done().get();

    setup_bucket(storage_service, bucket, Collections.emptyList());

    // Get primary buffer doesn't work:

    assertFalse(storage_service.getDataService().get().getPrimaryBufferName(bucket).isPresent());

    // Add some secondary buffers and check they get picked up

    final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty())
            .get();
    final String bucket_root = storage_service.getBucketRootPath() + "/" + bucket.full_name();
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test1"),
            FsPermission.getDirDefault(), true);
    //(skip the current dir once just to check it doesn't cause problems)
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"),
            FsPermission.getDirDefault(), true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON), FsPermission.getDirDefault(),
            true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test3"),
            FsPermission.getDirDefault(), true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED),
            FsPermission.getDirDefault(), true);

    assertEquals(Arrays.asList("test1", "test2", "test3"), storage_service.getDataService().get()
            .getSecondaryBuffers(bucket).stream().sorted().collect(Collectors.toList()));

    //(check dedups)
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test1"),
            FsPermission.getDirDefault(), true);

    assertEquals(Arrays.asList("test1", "test2", "test3"), storage_service.getDataService().get()
            .getSecondaryBuffers(bucket).stream().sorted().collect(Collectors.toList()));

    try {
        dfs.delete(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test3"),
                true);
    } catch (Exception e) {
    }

    assertEquals(Arrays.asList("test1", "test2"), storage_service.getDataService().get()
            .getSecondaryBuffers(bucket).stream().sorted().collect(Collectors.toList()));

    try {
        dfs.delete(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test1"), true);
    } catch (Exception e) {
    }

    assertEquals(Arrays.asList("test1", "test2"), storage_service.getDataService().get()
            .getSecondaryBuffers(bucket).stream().sorted().collect(Collectors.toList()));
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestMockHdfsStorageSystem.java

License:Apache License

@Test
public void test_switching_secondaryBuffers() throws AccessControlException, FileAlreadyExistsException,
        FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException,
        IllegalArgumentException, IOException, InterruptedException, ExecutionException {
    // 0) Setup/*from   w w w.  j a v a 2  s.  com*/
    final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator;

    final GlobalPropertiesBean globals = BeanTemplateUtils.build(GlobalPropertiesBean.class)
            .with(GlobalPropertiesBean::local_yarn_config_dir, temp_dir)
            .with(GlobalPropertiesBean::distributed_root_dir, temp_dir)
            .with(GlobalPropertiesBean::local_root_dir, temp_dir)
            .with(GlobalPropertiesBean::distributed_root_dir, temp_dir).done().get();

    final MockHdfsStorageService storage_service = new MockHdfsStorageService(globals);

    // Some buckets

    final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
            .with(DataBucketBean::full_name, "/test/storage/bucket")
            .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class).done().get())
            .done().get();

    setup_bucket(storage_service, bucket, Collections.emptyList());

    final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty())
            .get();
    final String bucket_root = storage_service.getBucketRootPath() + "/" + bucket.full_name();
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "test_exdir"),
            FsPermission.getDirDefault(), true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test1"),
            FsPermission.getDirDefault(), true);
    //(skip the current dir once just to check it doesn't cause problems)
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"),
            FsPermission.getDirDefault(), true);
    dfs.create(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2/test2.json"),
            EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)).close();
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test_exdir"),
            FsPermission.getDirDefault(), true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test3"),
            FsPermission.getDirDefault(), true);
    dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "test_exdir"),
            FsPermission.getDirDefault(), true);

    // (retire the primary, copy test2 across)
    {
        BasicMessageBean res1 = storage_service.getDataService().get()
                .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test2"), Optional.empty()).get();
        System.out.println("(res1 = " + res1.message() + ")");
        assertTrue("Request returns: " + res1.message(), res1.success());
    }
    assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON)));
    assertTrue(
            doesFileExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test2.json")));
    assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW)));
    assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED)));
    assertFalse(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2")));
    assertFalse(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test2")));
    assertFalse(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test2")));
    assertTrue(doesDirExist(dfs, new Path(
            bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "former_current/test_exdir")));
    assertTrue(doesDirExist(dfs, new Path(
            bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "former_current/test_exdir")));
    assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY
            + "former_current/test_exdir")));
    {
        BasicMessageBean res2 = storage_service.getDataService().get()
                .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test3"), Optional.of("ex_primary"))
                .get();
        System.out.println("(res2 = " + res2.message() + ")");
        assertTrue("Request returns: " + res2.message(), res2.success());
    }
    assertTrue(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary")));
    assertTrue(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "ex_primary")));
    assertTrue(doesDirExist(dfs,
            new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "ex_primary")));
    assertTrue(doesFileExist(dfs, new Path(
            bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary/test2.json")));

    // return to the primary, delete the current
    {
        BasicMessageBean res3 = storage_service.getDataService().get()
                .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("former_current"), Optional.of("")).get();
        System.out.println("(res3 = " + res3.message() + ")");
        assertTrue("Request returns: " + res3.message(), res3.success());
        assertTrue(doesDirExist(dfs,
                new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "/test_exdir")));
        assertTrue(doesDirExist(dfs,
                new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "/test_exdir")));
        assertTrue(doesDirExist(dfs,
                new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "/test_exdir")));
    }
}

From source file:org.apache.falcon.regression.core.util.HadoopUtil.java

License:Apache License

/**
 * Copies given data to hdfs location./*from w  w  w . jav a 2 s.co  m*/
 * @param fs target filesystem
 * @param dstHdfsDir destination dir
 * @param data source location
 * @param overwrite do we want to overwrite the data
 * @throws IOException
 */
public static void writeDataForHive(final FileSystem fs, final String dstHdfsDir, final CharSequence data,
        boolean overwrite) throws IOException {
    LOGGER.info(String.format("Writing data %s to hdfs location %s", data, dstHdfsDir));
    final File tempFile = File.createTempFile(UUID.randomUUID().toString().split("-")[0], ".dat");
    FileUtils.write(tempFile, data);
    if (overwrite) {
        HadoopUtil.deleteDirIfExists(dstHdfsDir, fs);
    }
    try {
        fs.mkdirs(new Path(dstHdfsDir));
    } catch (Exception e) {
        //ignore
    }
    fs.setPermission(new Path(dstHdfsDir), FsPermission.getDirDefault());
    HadoopUtil.copyDataToFolder(fs, dstHdfsDir, tempFile.getAbsolutePath());
    if (!tempFile.delete()) {
        LOGGER.warn("Deletion of " + tempFile + " failed.");
    }
}

From source file:org.apache.falcon.regression.hive.dr.HiveObjectCreator.java

License:Apache License

/**
 * Create an external table.//from   w ww.  j  av a 2s .  c  o  m
 * @param connection jdbc connection object to use for issuing queries to hive
 * @param fs filesystem object to upload the data
 * @param clickDataLocation location to upload the data to
 * @throws IOException
 * @throws SQLException
 */
static void createExternalTable(Connection connection, FileSystem fs, String clickDataLocation,
        String tableName) throws IOException, SQLException {
    HadoopUtil.deleteDirIfExists(clickDataLocation, fs);
    fs.mkdirs(new Path(clickDataLocation));
    fs.setPermission(new Path(clickDataLocation), FsPermission.getDirDefault());
    writeDataForHive(fs, clickDataLocation, new StringBuffer("click1").append((char) 0x01).append("01:01:01")
            .append("\n").append("click2").append((char) 0x01).append("02:02:02"), true);
    //clusterFS.setPermission(new Path(clickDataPart2), FsPermission.getFileDefault());
    runSql(connection, "create external table " + tableName + " (data string, time string) " + "location '"
            + clickDataLocation + "'");
    runSql(connection, "select * from " + tableName);
}