Example usage for org.apache.hadoop.fs.permission FsAction READ_WRITE

List of usage examples for org.apache.hadoop.fs.permission FsAction READ_WRITE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsAction READ_WRITE.

Prototype

FsAction READ_WRITE

To view the source code for org.apache.hadoop.fs.permission FsAction READ_WRITE.

Click Source Link

Usage

From source file:TestParascaleFileStatus.java

License:Apache License

public void testLoadPermissionInfo() {
    final Path p = new Path("/foo/bar");
    {//  ww  w  .  ja  v a  2  s .c  o  m
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.READ_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());
    }
    {
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw--wxr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        assertEquals(32 * 1024 * 1024, parascaleFileStatus.getBlockSize());
        assertEquals("parascale", parascaleFileStatus.getOwner());
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.WRITE_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());

    }
    final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
            32 * 1024 * 1024, System.currentTimeMillis(), p);
    parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
    assertEquals("permissions already loaded - should be lazy", 0, parascaleFileStatus.count.get());
    parascaleFileStatus.getPermission();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getOwner();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getGroup();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
}

From source file:co.cask.cdap.app.runtime.spark.distributed.SparkExecutionServiceTest.java

License:Apache License

@Test
public void testWriteCredentials() throws Exception {
    ProgramRunId programRunId = new ProgramRunId("ns", "app", ProgramType.SPARK, "test",
            RunIds.generate().getId());//from w  w  w. j a  va2  s  .  c o m

    // Start a service that doesn't support workflow token
    SparkExecutionService service = new SparkExecutionService(locationFactory,
            InetAddress.getLoopbackAddress().getCanonicalHostName(), programRunId, null);
    service.startAndWait();
    try {
        SparkExecutionClient client = new SparkExecutionClient(service.getBaseURI(), programRunId);

        Location targetLocation = locationFactory.create(UUID.randomUUID().toString()).append("credentials");
        client.writeCredentials(targetLocation);

        FileStatus status = dfsCluster.getFileSystem().getFileStatus(new Path(targetLocation.toURI()));
        // Verify the file permission is 600
        Assert.assertEquals(FsAction.READ_WRITE, status.getPermission().getUserAction());
        Assert.assertEquals(FsAction.NONE, status.getPermission().getGroupAction());
        Assert.assertEquals(FsAction.NONE, status.getPermission().getOtherAction());

        // Should be able to deserialize back to credentials
        Credentials credentials = new Credentials();
        try (DataInputStream is = new DataInputStream(targetLocation.getInputStream())) {
            credentials.readTokenStorageStream(is);
        }

        // Call complete to notify the service it has been stopped
        client.completed(null);
    } finally {
        service.stopAndWait();
    }
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testCreate(Path path, boolean override) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    FileSystem fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2,
            100 * 1024 * 1024, null);//  ww  w  .  ja va  2 s  .c o  m
    os.write(1);
    os.close();
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status = fs.getFileStatus(path);
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
    Assert.assertEquals(status.getPermission(), permission);
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    is.close();
    fs.close();
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetPermission() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);//from w w w  .j  a v a 2 s .  co m
    os.close();
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
}

From source file:com.cloudera.impala.analysis.AlterTableAddPartitionStmt.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    super.analyze(analyzer);
    if (!ifNotExists_)
        partitionSpec_.setPartitionShouldNotExist();
    partitionSpec_.setPrivilegeRequirement(Privilege.ALTER);
    partitionSpec_.analyze(analyzer);/*w w w  .j a  v a2  s.  c  o  m*/

    if (location_ != null) {
        location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
    }

    boolean shouldCache = false;
    Table table = getTargetTable();
    if (cacheOp_ != null) {
        cacheOp_.analyze(analyzer);
        shouldCache = cacheOp_.shouldCache();
    } else if (table instanceof HdfsTable) {
        shouldCache = ((HdfsTable) table).isMarkedCached();
    }
    if (shouldCache) {
        if (!(table instanceof HdfsTable)) {
            throw new AnalysisException("Caching must target a HDFS table: " + table.getFullName());
        }
        HdfsTable hdfsTable = (HdfsTable) table;
        if ((location_ != null && !FileSystemUtil.isPathCacheable(location_.getPath()))
                || (location_ == null && !hdfsTable.isLocationCacheable())) {
            throw new AnalysisException(String.format(
                    "Location '%s' cannot be cached. "
                            + "Please retry without caching: ALTER TABLE %s ADD PARTITION ... UNCACHED",
                    (location_ != null) ? location_.toString() : hdfsTable.getLocation(), table.getFullName()));
        }
    }
}

From source file:com.cloudera.impala.analysis.AlterTableSetLocationStmt.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    super.analyze(analyzer);
    location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);

    Table table = getTargetTable();//from  ww w .  ja  v a 2  s .c o  m
    Preconditions.checkNotNull(table);
    if (table instanceof HdfsTable) {
        HdfsTable hdfsTable = (HdfsTable) table;
        if (getPartitionSpec() != null) {
            // Targeting a partition rather than a table.
            PartitionSpec partitionSpec = getPartitionSpec();
            HdfsPartition partition = hdfsTable.getPartition(partitionSpec.getPartitionSpecKeyValues());
            Preconditions.checkNotNull(partition);
            if (partition.isMarkedCached()) {
                throw new AnalysisException(String.format("Target partition is cached, "
                        + "please uncache before changing the location using: ALTER TABLE %s %s "
                        + "SET UNCACHED", table.getFullName(), partitionSpec.toSql()));
            }
        } else if (hdfsTable.isMarkedCached()) {
            throw new AnalysisException(String.format(
                    "Target table is cached, please "
                            + "uncache before changing the location using: ALTER TABLE %s SET UNCACHED",
                    table.getFullName()));
        }
    }
}

From source file:com.cloudera.impala.analysis.CreateTableLikeStmt.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
    Preconditions.checkState(srcTableName_ != null && !srcTableName_.isEmpty());
    // Make sure the source table exists and the user has permission to access it.
    srcDbName_ = analyzer.getTable(srcTableName_, Privilege.VIEW_METADATA).getDb().getName();
    tableName_.analyze();//from   w  ww. j a va2  s . c  o m
    dbName_ = analyzer.getTargetDbName(tableName_);
    owner_ = analyzer.getUser().getName();

    if (analyzer.dbContainsTable(dbName_, tableName_.getTbl(), Privilege.CREATE) && !ifNotExists_) {
        throw new AnalysisException(
                Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG + String.format("%s.%s", dbName_, getTbl()));
    }
    analyzer.addAccessEvent(new TAccessEvent(dbName_ + "." + tableName_.getTbl(), TCatalogObjectType.TABLE,
            Privilege.CREATE.toString()));

    if (location_ != null) {
        location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
    }
}

From source file:com.cloudera.impala.analysis.CreateTableStmt.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    super.analyze(analyzer);
    Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
    tableName_ = analyzer.getFqTableName(tableName_);
    tableName_.analyze();/*from w  w w  . jav a 2s .co m*/
    owner_ = analyzer.getUser().getName();

    MetaStoreUtil.checkShortPropertyMap("Property", tblProperties_);
    MetaStoreUtil.checkShortPropertyMap("Serde property", serdeProperties_);

    if (analyzer.dbContainsTable(tableName_.getDb(), tableName_.getTbl(), Privilege.CREATE) && !ifNotExists_) {
        throw new AnalysisException(Analyzer.TBL_ALREADY_EXISTS_ERROR_MSG + tableName_);
    }

    analyzer.addAccessEvent(
            new TAccessEvent(tableName_.toString(), TCatalogObjectType.TABLE, Privilege.CREATE.toString()));

    // Only Avro tables can have empty column defs because they can infer them from
    // the Avro schema.
    if (columnDefs_.isEmpty() && fileFormat_ != THdfsFileFormat.AVRO) {
        throw new AnalysisException("Table requires at least 1 column");
    }

    if (location_ != null) {
        location_.analyze(analyzer, Privilege.ALL, FsAction.READ_WRITE);
    }

    analyzeRowFormat(analyzer);

    // Check that all the column names are valid and unique.
    analyzeColumnDefs(analyzer);

    if (getTblProperties() != null
            && KuduTable.KUDU_STORAGE_HANDLER.equals(getTblProperties().get(KuduTable.KEY_STORAGE_HANDLER))) {
        analyzeKuduTable(analyzer);
    } else if (distributeParams_ != null) {
        throw new AnalysisException("Only Kudu tables can use DISTRIBUTE BY clause.");
    }

    if (fileFormat_ == THdfsFileFormat.AVRO) {
        columnDefs_ = analyzeAvroSchema(analyzer);
        if (columnDefs_.isEmpty()) {
            throw new AnalysisException("An Avro table requires column definitions or an Avro schema.");
        }
        AvroSchemaUtils.setFromSerdeComment(columnDefs_);
        analyzeColumnDefs(analyzer);
    }

    if (cachingOp_ != null) {
        cachingOp_.analyze(analyzer);
        if (cachingOp_.shouldCache() && location_ != null
                && !FileSystemUtil.isPathCacheable(location_.getPath())) {
            throw new AnalysisException(String.format(
                    "Location '%s' cannot be cached. "
                            + "Please retry without caching: CREATE TABLE %s ... UNCACHED",
                    location_.toString(), tableName_));
        }
    }

    // Analyze 'skip.header.line.format' property.
    if (tblProperties_ != null) {
        AlterTableSetTblProperties.analyzeSkipHeaderLineCount(tblProperties_);
    }
}

From source file:com.cloudera.recordbreaker.fisheye.AccessController.java

License:Open Source License

public boolean hasReadAccess(FileSummary fs) {
    String fileOwner = fs.getOwner();
    String fileGroup = fs.getGroup();
    FsPermission fsp = fs.getPermissions();

    // Check world-readable
    FsAction otherAction = fsp.getOtherAction();
    if (otherAction == FsAction.ALL || otherAction == FsAction.READ || otherAction == FsAction.READ_EXECUTE
            || otherAction == FsAction.READ_WRITE) {
        return true;
    }/*from   w w w .  j a v  a 2  s . c  o  m*/

    // Check group-readable
    // REMIND -- mjc -- implement group-readable testing when we have the user database
    // that will tell us the current logged-in-user's groups.

    // Check owner-readable
    if (currentUser != null && currentUser.equals(fileOwner)) {
        FsAction userAction = fsp.getUserAction();
        if (userAction == FsAction.ALL || userAction == FsAction.READ || userAction == FsAction.READ_EXECUTE
                || userAction == FsAction.READ_WRITE) {
            return true;
        }
    }

    return false;
}

From source file:com.moz.fiji.mapreduce.tools.FijiBulkLoad.java

License:Apache License

/**
 * Helper method used by recursiveGrantAllReadWritePermissions to actually grant the
 * additional read and write permissions to all.  It deals with FileStatus objects
 * since that is the object that supports listStatus.
 *
 * @param hdfs The FileSystem on which the file exists.
 * @param status The status of the file whose permissions are checked and on whose children
 *     this method is called recursively.
 * @throws IOException on IOException./*from www  .  ja  v a  2 s . c om*/
 */
private void recursiveGrantAllReadWritePermissions(FileSystem hdfs, FileStatus status) throws IOException {
    final FsPermission currentPermissions = status.getPermission();
    if (!currentPermissions.getOtherAction().implies(FsAction.READ_WRITE)) {
        LOG.info("Adding a+rw to permissions for {}: {}", status.getPath(), currentPermissions);
        hdfs.setPermission(status.getPath(),
                new FsPermission(currentPermissions.getUserAction(),
                        currentPermissions.getGroupAction().or(FsAction.READ_WRITE),
                        currentPermissions.getOtherAction().or(FsAction.READ_WRITE)));
    }
    // Recurse into any files and directories in the path.
    // We must use listStatus because listFiles does not list subdirectories.
    FileStatus[] subStatuses = hdfs.listStatus(status.getPath());
    for (FileStatus subStatus : subStatuses) {
        if (!subStatus.equals(status)) {
            recursiveGrantAllReadWritePermissions(hdfs, subStatus);
        }
    }
}