Example usage for org.apache.hadoop.fs.permission FsAction READ

List of usage examples for org.apache.hadoop.fs.permission FsAction READ

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsAction READ.

Prototype

FsAction READ

To view the source code for org.apache.hadoop.fs.permission FsAction READ.

Click Source Link

Usage

From source file:TestParascaleFileStatus.java

License:Apache License

public void testLoadPermissionInfo() {
    final Path p = new Path("/foo/bar");
    {/*  ww  w  . jav  a 2s.c  om*/
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.READ_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());
    }
    {
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw--wxr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        assertEquals(32 * 1024 * 1024, parascaleFileStatus.getBlockSize());
        assertEquals("parascale", parascaleFileStatus.getOwner());
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.WRITE_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());

    }
    final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
            32 * 1024 * 1024, System.currentTimeMillis(), p);
    parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
    assertEquals("permissions already loaded - should be lazy", 0, parascaleFileStatus.count.get());
    parascaleFileStatus.getPermission();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getOwner();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getGroup();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
}

From source file:alluxio.underfs.hdfs.acl.SupportedHdfsAclProvider.java

License:Apache License

@Override
public Pair<AccessControlList, DefaultAccessControlList> getAcl(FileSystem hdfs, String path)
        throws IOException {
    AclStatus hdfsAcl;//from   ww w  .ja  v a 2 s  .c o  m
    Path filePath = new Path(path);
    boolean isDir = hdfs.isDirectory(filePath);
    try {
        hdfsAcl = hdfs.getAclStatus(filePath);
    } catch (AclException e) {
        // When dfs.namenode.acls.enabled is false, getAclStatus throws AclException.
        return new Pair<>(null, null);
    }
    AccessControlList acl = new AccessControlList();
    DefaultAccessControlList defaultAcl = new DefaultAccessControlList();

    acl.setOwningUser(hdfsAcl.getOwner());
    acl.setOwningGroup(hdfsAcl.getGroup());
    defaultAcl.setOwningUser(hdfsAcl.getOwner());
    defaultAcl.setOwningGroup(hdfsAcl.getGroup());
    for (AclEntry entry : hdfsAcl.getEntries()) {
        alluxio.security.authorization.AclEntry.Builder builder = new alluxio.security.authorization.AclEntry.Builder();
        builder.setType(getAclEntryType(entry));
        builder.setSubject(entry.getName() == null ? "" : entry.getName());
        FsAction permission = entry.getPermission();
        if (permission.implies(FsAction.READ)) {
            builder.addAction(AclAction.READ);
        } else if (permission.implies(FsAction.WRITE)) {
            builder.addAction(AclAction.WRITE);
        } else if (permission.implies(FsAction.EXECUTE)) {
            builder.addAction(AclAction.EXECUTE);
        }
        if (entry.getScope().equals(AclEntryScope.ACCESS)) {
            acl.setEntry(builder.build());
        } else {
            // default ACL, must be a directory
            defaultAcl.setEntry(builder.build());
        }
    }
    if (isDir) {
        return new Pair<>(acl, defaultAcl);
    } else {
        // a null defaultACL indicates this is a file
        return new Pair<>(acl, null);
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.handlers.TestACCESSHandler.java

License:Apache License

@Test
public void testPerms() throws Exception {
    List<PermTest> perms = Lists.newArrayList();
    // read for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.READ, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner or group
    perms.add(new PermTest("notroot", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));

    // write for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.WRITE, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.WRITE, FsAction.NONE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND));
    // write for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when not owner or group
    perms.add(//from   ww  w .  j  a  va  2 s  .  co  m
            new PermTest("notroot", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
                    NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND));

    // execute for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_EXECUTE));
    // execute for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.EXECUTE, FsAction.NONE),
            NFS_ACCESS_EXECUTE));
    // execute for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner or group
    perms.add(new PermTest("notroot", "notwheel",
            new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE), NFS_ACCESS_EXECUTE));
    // no perms but owner, this might be rethought?
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE), 0));
    // all for user/group but not user/groups
    perms.add(new PermTest("notroot", "notwheel", new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE),
            0));
    // all for user/group but not user/group
    perms.add(
            new PermTest("notroot", "wheel", new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE), 0));
    // owner has all, is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));
    // group has all is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));
    // other has all is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.ALL),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));

    for (PermTest permTest : perms) {
        when(filePermissions.toShort()).thenReturn(permTest.perm.toShort());
        int result = ACCESSHandler.getPermsForUserGroup(permTest.user, new String[] { permTest.group },
                fileStatus);
        assertEquals(permTest.toString(), Integer.toBinaryString(permTest.result),
                Integer.toBinaryString(result));
    }
}

From source file:com.cloudera.impala.analysis.CreateFunctionStmtBase.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    // Validate function name is legal
    fnName_.analyze(analyzer);/*from  w  w  w.  j  a  v a 2s. c  om*/

    if (hasSignature()) {
        // Validate function arguments and return type.
        args_.analyze(analyzer);
        retTypeDef_.analyze(analyzer);
        fn_ = createFunction(fnName_, args_.getArgTypes(), retTypeDef_.getType(), args_.hasVarArgs());
    } else {
        fn_ = createFunction(fnName_, null, null, false);
    }

    // For now, if authorization is enabled, the user needs ALL on the server
    // to create functions.
    // TODO: this is not the right granularity but acceptable for now.
    analyzer.registerPrivReq(new PrivilegeRequest(new AuthorizeableFn(fn_.signatureString()), Privilege.ALL));

    Db builtinsDb = analyzer.getCatalog().getDb(Catalog.BUILTINS_DB);
    if (builtinsDb.containsFunction(fn_.getName())) {
        throw new AnalysisException(
                "Function cannot have the same name as a builtin: " + fn_.getFunctionName().getFunction());
    }

    db_ = analyzer.getDb(fn_.dbName(), Privilege.CREATE);
    Function existingFn = db_.getFunction(fn_, Function.CompareMode.IS_INDISTINGUISHABLE);
    if (existingFn != null && !ifNotExists_) {
        throw new AnalysisException(Analyzer.FN_ALREADY_EXISTS_ERROR_MSG + existingFn.signatureString());
    }

    location_.analyze(analyzer, Privilege.CREATE, FsAction.READ);
    fn_.setLocation(location_);

    // Check the file type from the binary type to infer the type of the UDA
    fn_.setBinaryType(getBinaryType());

    // Forbid unsupported and complex types.
    if (hasSignature()) {
        List<Type> refdTypes = Lists.newArrayList(fn_.getReturnType());
        refdTypes.addAll(Lists.newArrayList(fn_.getArgs()));
        for (Type t : refdTypes) {
            if (!t.isSupported() || t.isComplexType()) {
                throw new AnalysisException(
                        String.format("Type '%s' is not supported in UDFs/UDAs.", t.toSql()));
            }
        }
    } else if (fn_.getBinaryType() != TFunctionBinaryType.JAVA) {
        throw new AnalysisException(
                String.format("Native functions require a return type and/or " + "argument types: %s",
                        fn_.getFunctionName()));
    }

    // Check if the function can be persisted. We persist all native/IR functions
    // and also JAVA functions added without signature. Only JAVA functions added
    // with signatures aren't persisted.
    if (getBinaryType() == TFunctionBinaryType.JAVA && hasSignature()) {
        fn_.setIsPersistent(false);
    } else {
        fn_.setIsPersistent(true);
    }
}

From source file:com.cloudera.impala.analysis.CreateTableDataSrcStmt.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    super.analyze(analyzer);
    String dataSourceName = getTblProperties().get(TBL_PROP_DATA_SRC_NAME);
    DataSource dataSource = analyzer.getCatalog().getDataSource(dataSourceName);
    if (dataSource == null) {
        throw new AnalysisException("Data source does not exist: " + dataSourceName);
    }/*from w ww.j  a  va2  s.  c o m*/

    for (ColumnDef col : getColumnDefs()) {
        if (!DataSourceTable.isSupportedColumnType(col.getType())) {
            throw new AnalysisException("Tables produced by an external data source do "
                    + "not support the column type: " + col.getType());
        }
    }
    // Add table properties from the DataSource catalog object now that we have access
    // to the catalog. These are stored in the table metadata because DataSource catalog
    // objects are not currently persisted.
    String location = dataSource.getLocation();
    getTblProperties().put(TBL_PROP_LOCATION, location);
    getTblProperties().put(TBL_PROP_CLASS, dataSource.getClassName());
    getTblProperties().put(TBL_PROP_API_VER, dataSource.getApiVersion());
    new HdfsUri(location).analyze(analyzer, Privilege.ALL, FsAction.READ);
    // TODO: check class exists and implements API version
}

From source file:com.cloudera.impala.analysis.FunctionCallExpr.java

License:Apache License

@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
    if (isAnalyzed_)
        return;//from   w w  w. j  a  v  a2  s  .c  o m
    super.analyze(analyzer);
    fnName_.analyze(analyzer);

    if (isMergeAggFn_) {
        // This is the function call expr after splitting up to a merge aggregation.
        // The function has already been analyzed so just do the minimal sanity
        // check here.
        AggregateFunction aggFn = (AggregateFunction) fn_;
        Preconditions.checkNotNull(aggFn);
        Type intermediateType = aggFn.getIntermediateType();
        if (intermediateType == null)
            intermediateType = type_;
        Preconditions.checkState(!type_.isWildcardDecimal());
        return;
    }

    Type[] argTypes = collectChildReturnTypes();

    // User needs DB access.
    Db db = analyzer.getDb(fnName_.getDb(), Privilege.VIEW_METADATA, true);
    if (!db.containsFunction(fnName_.getFunction())) {
        throw new AnalysisException(fnName_ + "() unknown");
    }

    if (fnName_.getFunction().equals("count") && params_.isDistinct()) {
        // Treat COUNT(DISTINCT ...) special because of how we do the rewrite.
        // There is no version of COUNT() that takes more than 1 argument but after
        // the rewrite, we only need count(*).
        // TODO: fix how we rewrite count distinct.
        argTypes = new Type[0];
        Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
        fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
        type_ = fn_.getReturnType();
        // Make sure BE doesn't see any TYPE_NULL exprs
        for (int i = 0; i < children_.size(); ++i) {
            if (getChild(i).getType().isNull()) {
                uncheckedCastChild(ScalarType.BOOLEAN, i);
            }
        }
        return;
    }

    // TODO: We allow implicit cast from string->timestamp but only
    // support avg(timestamp). This means avg(string_col) would work
    // from our casting rules. This is not right.
    // We need to revisit where implicit casts are allowed for string
    // to timestamp
    if (fnName_.getFunction().equalsIgnoreCase("avg") && children_.size() == 1
            && children_.get(0).getType().isStringType()) {
        throw new AnalysisException("AVG requires a numeric or timestamp parameter: " + toSql());
    }

    Function searchDesc = new Function(fnName_, argTypes, Type.INVALID, false);
    fn_ = db.getFunction(searchDesc, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
    if (fn_ == null || (!isInternalFnCall_ && !fn_.userVisible())) {
        throw new AnalysisException(getFunctionNotFoundError(argTypes));
    }

    // Throw AnalysisException if the execution of UDF is disabled.
    if (fn_.getBinaryType() != TFunctionBinaryType.BUILTIN && analyzer.isUDFDisabled()) {
        throw new AnalysisException("Execution of UDFs is currently disabled for security "
                + "reasons. If the UDFs registered in the Hive Metastore are known to be safe, "
                + "execution can be enabled by setting '-rs_disable_udf' to false.");
    }

    if (isAggregateFunction()) {
        // subexprs must not contain aggregates
        if (TreeNode.contains(children_, Expr.isAggregatePredicate())) {
            throw new AnalysisException(
                    "aggregate function must not contain aggregate parameters: " + this.toSql());
        }

        // .. or analytic exprs
        if (Expr.contains(children_, AnalyticExpr.class)) {
            throw new AnalysisException(
                    "aggregate function must not contain analytic parameters: " + this.toSql());
        }

        // The catalog contains count() with no arguments to handle count(*) but don't
        // accept count().
        // TODO: can this be handled more cleanly. It does seem like a special case since
        // no other aggregate functions (currently) can accept '*'.
        if (fnName_.getFunction().equalsIgnoreCase("count") && !params_.isStar() && children_.size() == 0) {
            throw new AnalysisException("count() is not allowed.");
        }

        // TODO: the distinct rewrite does not handle this but why?
        if (params_.isDistinct()) {
            if (fnName_.getFunction().equalsIgnoreCase("group_concat")) {
                throw new AnalysisException("GROUP_CONCAT() does not support DISTINCT.");
            }
            if (fn_.getBinaryType() != TFunctionBinaryType.BUILTIN) {
                throw new AnalysisException("User defined aggregates do not support DISTINCT.");
            }
        }

        AggregateFunction aggFn = (AggregateFunction) fn_;
        if (aggFn.ignoresDistinct())
            params_.setIsDistinct(false);
    }

    if (isScalarFunction())
        validateScalarFnParams(params_);
    if (fn_ instanceof AggregateFunction && ((AggregateFunction) fn_).isAnalyticFn()
            && !((AggregateFunction) fn_).isAggregateFn() && !isAnalyticFnCall_) {
        throw new AnalysisException("Analytic function requires an OVER clause: " + toSql());
    }

    castForFunctionCall(false);
    type_ = fn_.getReturnType();
    if (type_.isDecimal() && type_.isWildcardDecimal()) {
        type_ = resolveDecimalReturnType(analyzer);
    }

    // We do not allow any function to return a type CHAR or VARCHAR
    // TODO add support for CHAR(N) and VARCHAR(N) return values in post 2.0,
    // support for this was not added to the backend in 2.0
    if (type_.isWildcardChar() || type_.isWildcardVarchar()) {
        type_ = ScalarType.STRING;
    }

    // User needs uri access.
    // Here we will do the permission check against URI for the user and register the
    // uri privilege request to the analyzer, which will authorize all accesses later.
    if (fn_.getLocation() != null) {
        fn_.getLocation().analyze(analyzer, Privilege.ALL, FsAction.READ);
    }
}

From source file:com.cloudera.recordbreaker.fisheye.AccessController.java

License:Open Source License

public boolean hasReadAccess(FileSummary fs) {
    String fileOwner = fs.getOwner();
    String fileGroup = fs.getGroup();
    FsPermission fsp = fs.getPermissions();

    // Check world-readable
    FsAction otherAction = fsp.getOtherAction();
    if (otherAction == FsAction.ALL || otherAction == FsAction.READ || otherAction == FsAction.READ_EXECUTE
            || otherAction == FsAction.READ_WRITE) {
        return true;
    }/*from   w  w  w .  j a va2s.  co m*/

    // Check group-readable
    // REMIND -- mjc -- implement group-readable testing when we have the user database
    // that will tell us the current logged-in-user's groups.

    // Check owner-readable
    if (currentUser != null && currentUser.equals(fileOwner)) {
        FsAction userAction = fsp.getUserAction();
        if (userAction == FsAction.ALL || userAction == FsAction.READ || userAction == FsAction.READ_EXECUTE
                || userAction == FsAction.READ_WRITE) {
            return true;
        }
    }

    return false;
}

From source file:com.datatorrent.stram.util.FSUtil.java

License:Apache License

/**
 * Download the file from dfs to local file.
 *
 * @param fs//from   w  w w.j av a2s  . c  o  m
 * @param destinationFile
 * @param dfsFile
 * @param conf
 * @return
 * @throws IOException
 */
public static File copyToLocalFileSystem(FileSystem fs, String destinationPath, String destinationFile,
        String dfsFile, Configuration conf) throws IOException {
    File destinationDir = new File(destinationPath);
    if (!destinationDir.exists() && !destinationDir.mkdirs()) {
        throw new RuntimeException("Unable to create local directory");
    }
    RawLocalFileSystem localFileSystem = new RawLocalFileSystem();
    try {
        // allow app user to access local dir
        FsPermission permissions = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
        localFileSystem.setPermission(new Path(destinationDir.getAbsolutePath()), permissions);

        Path dfsFilePath = new Path(dfsFile);
        File localFile = new File(destinationDir, destinationFile);
        FileUtil.copy(fs, dfsFilePath, localFile, false, conf);
        // set permissions on actual file to be read-only for user
        permissions = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE);
        localFileSystem.setPermission(new Path(localFile.getAbsolutePath()), permissions);
        return localFile;
    } finally {
        localFileSystem.close();
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

@Test
public void testCopyReadableFiles() {
    try {/*w w w. j a v  a2s . c om*/
        deleteState();
        createSourceData();

        UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");

        final CopyMapper copyMapper = new CopyMapper();

        final Mapper<Text, FileStatus, NullWritable, Text>.Context context = tmpUser
                .doAs(new PrivilegedAction<Mapper<Text, FileStatus, NullWritable, Text>.Context>() {
                    @Override
                    public Mapper<Text, FileStatus, NullWritable, Text>.Context run() {
                        try {
                            StatusReporter reporter = new StubStatusReporter();
                            InMemoryWriter writer = new InMemoryWriter();
                            return getMapperContext(copyMapper, reporter, writer);
                        } catch (Exception e) {
                            LOG.error("Exception encountered ", e);
                            throw new RuntimeException(e);
                        }
                    }
                });

        touchFile(SOURCE_PATH + "/src/file.gz");
        mkdirs(TARGET_PATH);
        cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file.gz"),
                new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
        cluster.getFileSystem().setPermission(new Path(TARGET_PATH), new FsPermission((short) 511));

        final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
            @Override
            public FileSystem run() {
                try {
                    return FileSystem.get(configuration);
                } catch (IOException e) {
                    LOG.error("Exception encountered ", e);
                    Assert.fail("Test failed: " + e.getMessage());
                    throw new RuntimeException("Test ought to fail here");
                }
            }
        });

        tmpUser.doAs(new PrivilegedAction<Integer>() {
            @Override
            public Integer run() {
                try {
                    copyMapper.setup(context);
                    copyMapper.map(new Text("/src/file.gz"),
                            tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file.gz")), context);
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                return null;
            }
        });
    } catch (Exception e) {
        LOG.error("Exception encountered ", e);
        Assert.fail("Test failed: " + e.getMessage());
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

@Test
public void testSkipCopyNoPerms() {
    try {/*from  ww w.jav a  2  s . co m*/
        deleteState();
        createSourceData();

        final InMemoryWriter writer = new InMemoryWriter();
        UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");

        final CopyMapper copyMapper = new CopyMapper();

        final Mapper<Text, FileStatus, NullWritable, Text>.Context context = tmpUser
                .doAs(new PrivilegedAction<Mapper<Text, FileStatus, NullWritable, Text>.Context>() {
                    @Override
                    public Mapper<Text, FileStatus, NullWritable, Text>.Context run() {
                        try {
                            StatusReporter reporter = new StubStatusReporter();
                            return getMapperContext(copyMapper, reporter, writer);
                        } catch (Exception e) {
                            LOG.error("Exception encountered ", e);
                            throw new RuntimeException(e);
                        }
                    }
                });

        EnumSet<DistCpOptions.FileAttribute> preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class);

        context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
                DistCpUtils.packAttributes(preserveStatus));

        touchFile(SOURCE_PATH + "/src/file.gz");
        touchFile(TARGET_PATH + "/src/file.gz");
        cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file.gz"),
                new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
        cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file.gz"),
                new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));

        final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
            @Override
            public FileSystem run() {
                try {
                    return FileSystem.get(configuration);
                } catch (IOException e) {
                    LOG.error("Exception encountered ", e);
                    Assert.fail("Test failed: " + e.getMessage());
                    throw new RuntimeException("Test ought to fail here");
                }
            }
        });

        tmpUser.doAs(new PrivilegedAction<Integer>() {
            @Override
            public Integer run() {
                try {
                    copyMapper.setup(context);
                    copyMapper.map(new Text("/src/file.gz"),
                            tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file.gz")), context);
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                return null;
            }
        });
    } catch (Exception e) {
        LOG.error("Exception encountered ", e);
        Assert.fail("Test failed: " + e.getMessage());
    }
}