Example usage for org.apache.hadoop.fs FileStatus getOwner

List of usage examples for org.apache.hadoop.fs FileStatus getOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getOwner.

Prototype

public String getOwner() 

Source Link

Document

Get the owner of the file.

Usage

From source file:org.apache.drill.exec.impersonation.BaseTestImpersonation.java

License:Apache License

protected static void createView(final String viewOwner, final String viewGroup, final short viewPerms,
        final String newViewName, final String fromSourceSchema, final String fromSourceTableName)
        throws Exception {
    updateClient(viewOwner);//from   w ww  .j av  a 2  s .co  m
    test(String.format("ALTER SESSION SET `%s`='%o';", ExecConstants.NEW_VIEW_DEFAULT_PERMS_KEY, viewPerms));
    test(String.format("CREATE VIEW %s.%s AS SELECT * FROM %s.%s;", getWSSchema(viewOwner), newViewName,
            fromSourceSchema, fromSourceTableName));

    // Verify the view file created has the expected permissions and ownership
    Path viewFilePath = new Path(getUserHome(viewOwner), newViewName + DotDrillType.VIEW.getEnding());
    FileStatus status = fs.getFileStatus(viewFilePath);
    assertEquals(viewGroup, status.getGroup());
    assertEquals(viewOwner, status.getOwner());
    assertEquals(viewPerms, status.getPermission().toShort());
}

From source file:org.apache.drill.exec.impersonation.TestInboundImpersonation.java

License:Apache License

private static void createTestData() throws Exception {
    // Create table accessible only by OWNER
    final String tableName = "lineitem";
    updateClient(OWNER, OWNER_PASSWORD);
    test("USE " + getWSSchema(OWNER));
    test(String.format("CREATE TABLE %s as SELECT * FROM cp.`tpch/%s.parquet`;", tableName, tableName));

    // Change the ownership and permissions manually.
    // Currently there is no option to specify the default permissions and ownership for new tables.
    final Path tablePath = new Path(getUserHome(OWNER), tableName);
    fs.setOwner(tablePath, OWNER, DATA_GROUP);
    fs.setPermission(tablePath, new FsPermission((short) 0700));

    // Create a view on top of lineitem table; allow IMPERSONATION_TARGET to read the view
    // /user/user0_1    u0_lineitem    750    user0_1:group0_1
    final String viewName = "u0_lineitem";
    test(String.format("ALTER SESSION SET `%s`='%o';", ExecConstants.NEW_VIEW_DEFAULT_PERMS_KEY, (short) 0750));
    test(String.format("CREATE VIEW %s.%s AS SELECT l_orderkey, l_partkey FROM %s.%s;", getWSSchema(OWNER),
            viewName, getWSSchema(OWNER), "lineitem"));
    // Verify the view file created has the expected permissions and ownership
    final Path viewFilePath = new Path(getUserHome(OWNER), viewName + DotDrillType.VIEW.getEnding());
    final FileStatus status = fs.getFileStatus(viewFilePath);
    assertEquals(org1Groups[0], status.getGroup());
    assertEquals(OWNER, status.getOwner());
    assertEquals((short) 0750, status.getPermission().toShort());

    // Authorize PROXY_NAME to impersonate TARGET_NAME
    updateClient(UserAuthenticatorTestImpl.PROCESS_USER, UserAuthenticatorTestImpl.PROCESS_USER_PASSWORD);
    test("ALTER SYSTEM SET `%s`='%s'", ExecConstants.IMPERSONATION_POLICIES_KEY,
            "[ { proxy_principals : { users: [\"" + PROXY_NAME + "\" ] }," + "target_principals : { users : [\""
                    + TARGET_NAME + "\"] } } ]");
}

From source file:org.apache.drill.exec.planner.sql.handlers.ShowFileHandler.java

License:Apache License

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException {

    SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();

    DrillFileSystem fs = null;//from ww w. j a va2s .  c om
    String defaultLocation = null;
    String fromDir = "./";

    SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
    SchemaPlus drillSchema = defaultSchema;

    // Show files can be used without from clause, in which case we display the files in the default schema
    if (from != null) {
        // We are not sure if the full from clause is just the schema or includes table name,
        // first try to see if the full path specified is a schema
        drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names);
        if (drillSchema == null) {
            // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
            drillSchema = SchemaUtilites.findSchema(defaultSchema,
                    from.names.subList(0, from.names.size() - 1));
            fromDir = fromDir + from.names.get((from.names.size() - 1));
        }

        if (drillSchema == null) {
            throw UserException.validationError().message("Invalid FROM/IN clause [%s]", from.toString())
                    .build(logger);
        }
    }

    WorkspaceSchema wsSchema;
    try {
        wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
    } catch (ClassCastException e) {
        throw UserException.validationError().message(
                "SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.",
                SchemaUtilites.getSchemaPath(drillSchema)).build(logger);
    }

    // Get the file system object
    fs = wsSchema.getFS();

    // Get the default path
    defaultLocation = wsSchema.getDefaultLocation();

    List<ShowFilesCommandResult> rows = new ArrayList<>();

    for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) {
        ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(),
                fileStatus.isDir(), !fileStatus.isDir(), fileStatus.getLen(), fileStatus.getOwner(),
                fileStatus.getGroup(), fileStatus.getPermission().toString(), fileStatus.getAccessTime(),
                fileStatus.getModificationTime());
        rows.add(result);
    }
    return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(),
            ShowFilesCommandResult.class);
}

From source file:org.apache.drill.exec.store.parquet.metadata.Metadata.java

License:Apache License

/**
 * Get the metadata for a single file/*from w w w. j av  a2 s .  c  o m*/
 */
private ParquetFileMetadata_v3 getParquetFileMetadata_v3(ParquetTableMetadata_v3 parquetTableMetadata,
        final FileStatus file, final FileSystem fs, boolean allColumns, Set<String> columnSet)
        throws IOException, InterruptedException {
    final ParquetMetadata metadata;
    final UserGroupInformation processUserUgi = ImpersonationUtil.getProcessUserUGI();
    final Configuration conf = new Configuration(fs.getConf());
    try {
        metadata = processUserUgi.doAs((PrivilegedExceptionAction<ParquetMetadata>) () -> {
            try (ParquetFileReader parquetFileReader = ParquetFileReader
                    .open(HadoopInputFile.fromStatus(file, conf), readerConfig.toReadOptions())) {
                return parquetFileReader.getFooter();
            }
        });
    } catch (Exception e) {
        logger.error(
                "Exception while reading footer of parquet file [Details - path: {}, owner: {}] as process user {}",
                file.getPath(), file.getOwner(), processUserUgi.getShortUserName(), e);
        throw e;
    }

    MessageType schema = metadata.getFileMetaData().getSchema();

    Map<SchemaPath, ColTypeInfo> colTypeInfoMap = new HashMap<>();
    schema.getPaths();
    for (String[] path : schema.getPaths()) {
        colTypeInfoMap.put(SchemaPath.getCompoundPath(path), getColTypeInfo(schema, schema, path, 0));
    }

    List<RowGroupMetadata_v3> rowGroupMetadataList = Lists.newArrayList();

    ArrayList<SchemaPath> ALL_COLS = new ArrayList<>();
    ALL_COLS.add(SchemaPath.STAR_COLUMN);
    ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
            .detectCorruptDates(metadata, ALL_COLS, readerConfig.autoCorrectCorruptedDates());
    logger.debug("Contains corrupt dates: {}.", containsCorruptDates);

    for (BlockMetaData rowGroup : metadata.getBlocks()) {
        List<ColumnMetadata_v3> columnMetadataList = new ArrayList<>();
        long length = 0;
        for (ColumnChunkMetaData col : rowGroup.getColumns()) {
            String[] columnName = col.getPath().toArray();
            SchemaPath columnSchemaName = SchemaPath.getCompoundPath(columnName);
            ColTypeInfo colTypeInfo = colTypeInfoMap.get(columnSchemaName);

            ColumnTypeMetadata_v3 columnTypeMetadata = new ColumnTypeMetadata_v3(columnName,
                    col.getPrimitiveType().getPrimitiveTypeName(), colTypeInfo.originalType,
                    colTypeInfo.precision, colTypeInfo.scale, colTypeInfo.repetitionLevel,
                    colTypeInfo.definitionLevel);

            if (parquetTableMetadata.columnTypeInfo == null) {
                parquetTableMetadata.columnTypeInfo = new ConcurrentHashMap<>();
            }
            parquetTableMetadata.columnTypeInfo.put(new ColumnTypeMetadata_v3.Key(columnTypeMetadata.name),
                    columnTypeMetadata);
            // Store column metadata only if allColumns is set to true or if the column belongs to the subset of columns specified in the refresh command
            if (allColumns || columnSet == null || !allColumns && columnSet != null && columnSet.size() > 0
                    && columnSet.contains(columnSchemaName.getRootSegmentPath())) {
                Statistics<?> stats = col.getStatistics();
                // Save the column schema info. We'll merge it into one list
                Object minValue = null;
                Object maxValue = null;
                long numNulls = -1;
                boolean statsAvailable = stats != null && !stats.isEmpty();
                if (statsAvailable) {
                    if (stats.hasNonNullValue()) {
                        minValue = stats.genericGetMin();
                        maxValue = stats.genericGetMax();
                        if (containsCorruptDates == ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION
                                && columnTypeMetadata.originalType == OriginalType.DATE) {
                            minValue = ParquetReaderUtility.autoCorrectCorruptedDate((Integer) minValue);
                            maxValue = ParquetReaderUtility.autoCorrectCorruptedDate((Integer) maxValue);
                        }
                    }
                    numNulls = stats.getNumNulls();
                }
                ColumnMetadata_v3 columnMetadata = new ColumnMetadata_v3(columnTypeMetadata.name,
                        col.getPrimitiveType().getPrimitiveTypeName(), minValue, maxValue, numNulls);
                columnMetadataList.add(columnMetadata);
            }
            length += col.getTotalSize();
        }

        // DRILL-5009: Skip the RowGroup if it is empty
        // Note we still read the schema even if there are no values in the RowGroup
        if (rowGroup.getRowCount() == 0) {
            continue;
        }
        RowGroupMetadata_v3 rowGroupMeta = new RowGroupMetadata_v3(rowGroup.getStartingPos(), length,
                rowGroup.getRowCount(), getHostAffinity(file, fs, rowGroup.getStartingPos(), length),
                columnMetadataList);

        rowGroupMetadataList.add(rowGroupMeta);
    }
    Path path = Path.getPathWithoutSchemeAndAuthority(file.getPath());

    return new ParquetFileMetadata_v3(path, file.getLen(), rowGroupMetadataList);
}

From source file:org.apache.falcon.entity.FileSystemStorage.java

License:Apache License

@Override
public void validateACL(AccessControlList acl) throws FalconException {
    try {/*w  w  w . j  a  va 2s .co m*/
        for (Location location : getLocations()) {
            String pathString = getRelativePath(location);
            Path path = new Path(pathString);
            FileSystem fileSystem = HadoopClientFactory.get().createProxiedFileSystem(path.toUri(), getConf());
            if (fileSystem.exists(path)) {
                FileStatus fileStatus = fileSystem.getFileStatus(path);
                Set<String> groups = CurrentUser.getGroupNames();

                if (fileStatus.getOwner().equals(acl.getOwner()) || groups.contains(acl.getGroup())) {
                    return;
                }

                LOG.error(
                        "Permission denied: Either Feed ACL owner {} or group {} doesn't "
                                + "match the actual file owner {} or group {} for file {}",
                        acl, acl.getGroup(), fileStatus.getOwner(), fileStatus.getGroup(), path);
                throw new FalconException("Permission denied: Either Feed ACL owner " + acl + " or group "
                        + acl.getGroup() + " doesn't match the actual " + "file owner " + fileStatus.getOwner()
                        + " or group " + fileStatus.getGroup() + "  for file " + path);
            }
        }
    } catch (IOException e) {
        LOG.error("Can't validate ACL on storage {}", getStorageUrl(), e);
        throw new RuntimeException("Can't validate storage ACL (URI " + getStorageUrl() + ")", e);
    }
}

From source file:org.apache.falcon.entity.parser.ClusterEntityParser.java

License:Apache License

private void checkPathOwnerAndPermission(String clusterName, String location, FileSystem fs,
        FsPermission expectedPermission) throws ValidationException {

    Path locationPath = new Path(location);
    try {/*  w w w.  ja va  2  s . com*/
        if (!fs.exists(locationPath)) {
            throw new ValidationException(
                    "Location " + location + " for cluster " + clusterName + " must exist.");
        }

        // falcon owns this path on each cluster
        final String loginUser = UserGroupInformation.getLoginUser().getShortUserName();
        FileStatus fileStatus = fs.getFileStatus(locationPath);
        final String locationOwner = fileStatus.getOwner();
        if (!locationOwner.equals(loginUser)) {
            LOG.error("Owner of the location {} is {} for cluster {}. Current user {} is not the owner of the "
                    + "location.", locationPath, locationOwner, clusterName, loginUser);
            throw new ValidationException("Path [" + locationPath + "] on the cluster [" + clusterName
                    + "] has " + "owner [" + locationOwner + "]. Current user [" + loginUser
                    + "] is not the owner of the " + "path");
        }
        String errorMessage = "Path " + locationPath + " has permissions: "
                + fileStatus.getPermission().toString() + ", should be " + expectedPermission;
        if (fileStatus.getPermission().toShort() != expectedPermission.toShort()) {
            LOG.error(errorMessage);
            throw new ValidationException(errorMessage);
        }
        // try to list to see if the user is able to write to this folder
        fs.listStatus(locationPath);
    } catch (IOException e) {
        throw new ValidationException("Unable to validate the location with path: " + location + " for cluster:"
                + clusterName + " due to transient failures ", e);
    }
}

From source file:org.apache.falcon.entity.parser.ClusterEntityParserTest.java

License:Apache License

/**
 * A lightweight unit test for a cluster where location type working is missing.
 * It should automatically get generated
 * Extensive tests are found in ClusterEntityValidationIT.
 *///from  w w  w .ja v a  2  s .c om
@Test
public void testClusterWithOnlyStaging() throws Exception {
    ClusterEntityParser clusterEntityParser = Mockito
            .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
    Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
    Locations locations = getClusterLocations("staging2", null);
    cluster.setLocations(locations);
    Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
    Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
    Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
    String stagingPath = ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING).getPath();
    this.dfsCluster.getFileSystem().mkdirs(new Path(stagingPath), HadoopClientFactory.ALL_PERMISSION);
    clusterEntityParser.validate(cluster);
    String workingDirPath = cluster.getLocations().getLocations().get(0).getPath() + "/working";
    Assert.assertEquals(ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
            workingDirPath);
    FileStatus workingDirStatus = this.dfsCluster.getFileSystem().getFileLinkStatus(new Path(workingDirPath));
    Assert.assertTrue(workingDirStatus.isDirectory());
    Assert.assertEquals(workingDirStatus.getPermission(), HadoopClientFactory.READ_EXECUTE_PERMISSION);
    Assert.assertEquals(workingDirStatus.getOwner(), UserGroupInformation.getLoginUser().getShortUserName());

    FileStatus emptyDirStatus = this.dfsCluster.getFileSystem()
            .getFileStatus(new Path(stagingPath + "/" + ClusterHelper.EMPTY_DIR_NAME));
    Assert.assertEquals(emptyDirStatus.getPermission(), HadoopClientFactory.READ_ONLY_PERMISSION);
    Assert.assertEquals(emptyDirStatus.getOwner(), UserGroupInformation.getLoginUser().getShortUserName());

    String stagingSubdirFeed = cluster.getLocations().getLocations().get(0).getPath()
            + "/falcon/workflows/feed";
    String stagingSubdirProcess = cluster.getLocations().getLocations().get(0).getPath()
            + "/falcon/workflows/process";
    FileStatus stagingSubdirFeedStatus = this.dfsCluster.getFileSystem()
            .getFileLinkStatus(new Path(stagingSubdirFeed));
    FileStatus stagingSubdirProcessStatus = this.dfsCluster.getFileSystem()
            .getFileLinkStatus(new Path(stagingSubdirProcess));
    Assert.assertTrue(stagingSubdirFeedStatus.isDirectory());
    Assert.assertEquals(stagingSubdirFeedStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
    Assert.assertTrue(stagingSubdirProcessStatus.isDirectory());
    Assert.assertEquals(stagingSubdirProcessStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
}

From source file:org.apache.falcon.hadoop.JailedFileSystem.java

License:Apache License

@Override
public FileStatus[] listStatus(Path f) throws IOException {
    FileStatus[] fileStatuses = localFS.listStatus(toLocalPath(f));
    if (fileStatuses == null || fileStatuses.length == 0) {
        return fileStatuses;
    } else {//from ww w .jav  a  2s  .c o m
        FileStatus[] jailFileStatuses = new FileStatus[fileStatuses.length];
        for (int index = 0; index < fileStatuses.length; index++) {
            FileStatus status = fileStatuses[index];
            jailFileStatuses[index] = new FileStatus(status.getLen(), status.isDirectory(),
                    status.getReplication(), status.getBlockSize(), status.getModificationTime(),
                    status.getAccessTime(), status.getPermission(), status.getOwner(), status.getGroup(),
                    fromLocalPath(status.getPath()).makeQualified(this.getUri(), this.getWorkingDirectory()));
        }
        return jailFileStatuses;
    }
}

From source file:org.apache.falcon.hadoop.JailedFileSystem.java

License:Apache License

@Override
public FileStatus getFileStatus(Path f) throws IOException {
    FileStatus status = localFS.getFileStatus(toLocalPath(f));
    if (status == null) {
        return null;
    }//from  www . java2 s .c  o m
    return new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(), status.getBlockSize(),
            status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(),
            status.getGroup(),
            fromLocalPath(status.getPath()).makeQualified(this.getUri(), this.getWorkingDirectory()));
}

From source file:org.apache.falcon.oozie.feed.OozieFeedWorkflowBuilderTest.java

License:Apache License

private void verifyWorkflowUMask(FileSystem fs, COORDINATORAPP coord, String defaultUMask) throws IOException {
    Assert.assertEquals(fs.getConf().get("fs.permissions.umask-mode"), defaultUMask);

    String appPath = coord.getAction().getWorkflow().getAppPath().replace("${nameNode}", "");
    Path wfPath = new Path(appPath);
    FileStatus[] fileStatuses = fs.listStatus(wfPath);
    for (FileStatus fileStatus : fileStatuses) {
        Assert.assertEquals(fileStatus.getOwner(), CurrentUser.getProxyUGI().getShortUserName());

        final FsPermission permission = fileStatus.getPermission();
        if (!fileStatus.isDirectory()) {
            Assert.assertEquals(permission.toString(),
                    HadoopClientFactory.getFileDefaultPermission(fs.getConf()).toString());
        }/*from  ww w. ja  v a 2 s  .c  o m*/
    }
}