Example usage for org.apache.hadoop.fs LocatedFileStatus getPermission

List of usage examples for org.apache.hadoop.fs LocatedFileStatus getPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs LocatedFileStatus getPermission.

Prototype

public FsPermission getPermission() 

Source Link

Document

Get FsPermission associated with the file.

Usage

From source file:org.apache.drill.exec.rpc.user.TemporaryTablesAutomaticDropTest.java

License:Apache License

private File createAndCheckSessionTemporaryLocation(String suffix, File schemaLocation) throws Exception {
    String temporaryTableName = "temporary_table_automatic_drop_" + suffix;
    File sessionTemporaryLocation = schemaLocation.toPath().resolve(SESSION_UUID.toString()).toFile();

    test("create TEMPORARY table %s.%s as select 'A' as c1 from (values(1))", DFS_TMP_SCHEMA,
            temporaryTableName);//from   w w w.ja  v  a 2  s.co m

    FileSystem fs = getLocalFileSystem();
    Path sessionPath = new Path(sessionTemporaryLocation.getAbsolutePath());
    assertTrue("Session temporary location should exist", fs.exists(sessionPath));
    assertEquals("Directory permission should match", StorageStrategy.TEMPORARY.getFolderPermission(),
            fs.getFileStatus(sessionPath).getPermission());
    Path tempTablePath = new Path(sessionPath, SESSION_UUID.toString());
    assertTrue("Temporary table location should exist", fs.exists(tempTablePath));
    assertEquals("Directory permission should match", StorageStrategy.TEMPORARY.getFolderPermission(),
            fs.getFileStatus(tempTablePath).getPermission());
    RemoteIterator<LocatedFileStatus> fileIterator = fs.listFiles(tempTablePath, false);
    while (fileIterator.hasNext()) {
        LocatedFileStatus file = fileIterator.next();
        assertEquals("File permission should match", StorageStrategy.TEMPORARY.getFilePermission(),
                file.getPermission());
    }
    return sessionTemporaryLocation;
}

From source file:org.apache.falcon.service.SharedLibraryHostingService.java

License:Apache License

private void pushExtensionArtifactsToCluster(final Cluster cluster, final FileSystem clusterFs)
        throws FalconException {
    if (!Services.get().isRegistered(ExtensionService.SERVICE_NAME)) {
        LOG.info("ExtensionService not registered, return");
        return;//ww  w  .  j av  a2s  . c om
    }

    ExtensionStore store = ExtensionStore.get();
    if (!store.isExtensionStoreInitialized()) {
        LOG.info(
                "Extension store not initialized by Extension service. Make sure Extension service is added in "
                        + "start up properties");
        return;
    }

    final String filterPath = "/apps/falcon/extensions/mirroring/";
    Path extensionStorePath = store.getExtensionStorePath();
    LOG.info("extensionStorePath :{}", extensionStorePath);
    FileSystem falconFileSystem = HadoopClientFactory.get().createFalconFileSystem(extensionStorePath.toUri());
    String nameNode = StringUtils
            .removeEnd(falconFileSystem.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY), File.separator);

    String clusterStorageUrl = StringUtils.removeEnd(ClusterHelper.getStorageUrl(cluster), File.separator);

    // If default fs for Falcon server is same as cluster fs abort copy
    if (nameNode.equalsIgnoreCase(clusterStorageUrl)) {
        LOG.info("clusterStorageUrl :{} same return", clusterStorageUrl);
        return;
    }

    try {
        RemoteIterator<LocatedFileStatus> fileStatusListIterator = falconFileSystem
                .listFiles(extensionStorePath, true);

        while (fileStatusListIterator.hasNext()) {
            LocatedFileStatus srcfileStatus = fileStatusListIterator.next();
            Path filePath = Path.getPathWithoutSchemeAndAuthority(srcfileStatus.getPath());

            if (filePath != null && filePath.toString().startsWith(filterPath)) {
                /* HiveDR uses filter path as store path in DRStatusStore, so skip it. Copy only the extension
                 artifacts */
                continue;
            }

            if (srcfileStatus.isDirectory()) {
                if (!clusterFs.exists(filePath)) {
                    HadoopClientFactory.mkdirs(clusterFs, filePath, srcfileStatus.getPermission());
                }
            } else {
                if (clusterFs.exists(filePath)) {
                    FileStatus targetfstat = clusterFs.getFileStatus(filePath);
                    if (targetfstat.getLen() == srcfileStatus.getLen()) {
                        continue;
                    }
                }

                Path parentPath = filePath.getParent();
                if (!clusterFs.exists(parentPath)) {
                    FsPermission dirPerm = falconFileSystem.getFileStatus(parentPath).getPermission();
                    HadoopClientFactory.mkdirs(clusterFs, parentPath, dirPerm);
                }

                FileUtil.copy(falconFileSystem, srcfileStatus, clusterFs, filePath, false, true,
                        falconFileSystem.getConf());
                FileUtil.chmod(clusterFs.makeQualified(filePath).toString(),
                        srcfileStatus.getPermission().toString());
            }
        }
    } catch (IOException | InterruptedException e) {
        throw new FalconException("Failed to copy extension artifacts to cluster" + cluster.getName(), e);
    }
}