Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:com.pinterest.hdfsbackup.distcp.DistCp.java

License:Apache License

private static void updatePermissions(FileStatus src, FileStatus dst, EnumSet<FileAttribute> preseved,
        FileSystem destFileSys) throws IOException {
    String owner = null;/*from ww w .j  a  va  2  s  . c  o m*/
    String group = null;
    if (preseved.contains(FileAttribute.USER) && !src.getOwner().equals(dst.getOwner())) {
        owner = src.getOwner();
    }
    if (preseved.contains(FileAttribute.GROUP) && !src.getGroup().equals(dst.getGroup())) {
        group = src.getGroup();
    }
    if (owner != null || group != null) {
        destFileSys.setOwner(dst.getPath(), owner, group);
    }
    if (preseved.contains(FileAttribute.PERMISSION) && !src.getPermission().equals(dst.getPermission())) {
        destFileSys.setPermission(dst.getPath(), src.getPermission());
    }
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change the permissions of a path//from w w  w.j  ava  2 s .  c o m
 * 
 * @param fs
 * @param path
 * @param permission
 * @param recursive
 * @return Error Message
 */
protected String changePermission(FileSystem fs, Path path, String permission, boolean recursive) {
    String error = null;
    try {
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {
            if (recursive) {
                FileStatus[] child = fs.listStatus(path);
                for (int i = 0; i < child.length && error == null; ++i) {
                    error = changePermission(fs, child[i].getPath(), permission, recursive);
                }
            }
            if (error == null) {
                logger.debug("1 ----- path " + path.getName() + " new perms " + permission);
                fs.setPermission(path, new FsPermission(permission));
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }
    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Change the permission of a path//w  w  w  . j ava 2  s.c o m
 * 
 * @param path
 * @param permission
 * @param recursive
 * @return Error Message
 */
protected String changePermission(Path path, String permission, boolean recursive) {
    String error = null;
    try {
        logger.debug("1 " + path.getName());
        FileSystem fs = NameNodeVar.getFS();
        FileStatus stat = fs.getFileStatus(path);
        if (stat.getOwner().equals(System.getProperty("user.name"))) {
            FileStatus[] child = fs.listStatus(path);
            if (recursive) {
                logger.debug("children : " + child.length);
                for (int i = 0; i < child.length && error == null; ++i) {
                    error = changePermission(fs, child[i].getPath(), permission, recursive);
                }
            }
            if (error == null) {
                logger.debug("set permissions  : " + path.toString() + " , "
                        + new FsPermission(permission).toString());
                fs.setPermission(path, new FsPermission(permission));
                logger.debug(getProperties(path.getName()));
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror",
                    new Object[] { path.toString() });
        }
        // fs.close();
    } catch (IOException e) {
        logger.error("Cannot operate on the file or directory: " + path.toString());
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path });
    }
    if (error != null) {
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.jdbc.JdbcStore.java

License:Open Source License

public static String writePassword(String connectionName, JdbcDetails details) {
    String passwordPathStr = "/user/" + System.getProperty("user.name") + "/.redsqirl/jdbc_password/password_"
            + connectionName;/* w  ww.j  av  a  2 s.  c  o  m*/
    Path passwordPath = new Path(passwordPathStr);

    try {
        FileSystem fileSystem = NameNodeVar.getFS();
        if (fileSystem.exists(passwordPath)) {
            BufferedReader br = new BufferedReader(new InputStreamReader(fileSystem.open(passwordPath)));
            String line = br.readLine();
            if (line == null || !line.equals(details.getPassword())) {
                fileSystem.delete(passwordPath, false);
            }
            br.close();
        }
        if (!fileSystem.exists(passwordPath) && details.getPassword() != null) {
            if (!fileSystem.exists(passwordPath.getParent())) {
                fileSystem.mkdirs(passwordPath.getParent());
                fileSystem.setPermission(passwordPath.getParent(), new FsPermission("700"));
            }
            FSDataOutputStream out = fileSystem.create(passwordPath);
            out.write(details.getPassword().getBytes());
            out.close();
            fileSystem.setPermission(passwordPath, new FsPermission("400"));
        }
    } catch (Exception e) {
        logger.error(e, e);
    }
    return passwordPathStr;
}

From source file:com.revolutionanalytics.hadoop.hdfs.FileUtils.java

License:Apache License

public static void setPermissions(FileSystem fs, String[] p, String[] s) throws IOException {
    for (int i = 0; i < p.length; i++) {
        fs.setPermission(new Path(p[i]), new org.apache.hadoop.fs.permission.FsPermission(s[i]));
    }//from  w ww.  ja va2s.  c o  m
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();//from   ww w . j a v a  2  s.co  m
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs((PrivilegedExceptionAction<Void>) () -> {
                Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                LOG.info("Working on file: " + workingFile);

                // Create empty file if configured
                if (actions.taskType == TaskType.CREATE_EMPTY_FILE) {
                    ensureDirectoryExists(fs, workingFile.getParent());
                    if (!fs.createNewFile(workingFile)) {
                        throw new IOException("Can't create file (probably already exists): " + workingFile);
                    }
                }

                if (actions.taskType == TaskType.CHANGE_EXISTING_FILE
                        && (actions.shouldMoveFile || actions.shouldRename)) {
                    Path newPath = workingFile.getParent();
                    String newName = workingFile.getName();
                    if (actions.shouldMoveFile) {
                        newPath = new Path(evaluate(variables, "newLocation", actions.newLocation));
                    }
                    if (actions.shouldRename) {
                        newName = evaluate(variables, "newName", actions.newName);
                    }

                    Path destinationFile = new Path(newPath, newName);
                    ensureDirectoryExists(fs, newPath);

                    LOG.debug("Renaming to: {}", destinationFile);
                    if (!fs.rename(workingFile, destinationFile)) {
                        throw new IOException(
                                Utils.format("Can't rename '{}' to '{}''", workingFile, destinationFile));
                    }
                    workingFile = destinationFile;
                }

                if (actions.taskType.isOneOf(TaskType.CHANGE_EXISTING_FILE, TaskType.CREATE_EMPTY_FILE)) {
                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = HdfsUtils.parseFsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }
                }

                if (actions.taskType == TaskType.REMOVE_FILE) {
                    fs.delete(workingFile, true);
                }

                // Issue event with the final file name (e.g. the renamed one if applicable)
                actions.taskType.getEventCreator().create(getContext()).with("filepath", workingFile.toString())
                        .with("filename", workingFile.getName()).createAndSend();

                LOG.debug("Done changing metadata on file: {}", workingFile);
                return null;
            });
        } catch (Throwable e) {
            // Hadoop libraries will wrap any non InterruptedException, RuntimeException, Error or IOException to UndeclaredThrowableException,
            // so we manually unwrap it here and properly propagate it to user.
            if (e instanceof UndeclaredThrowableException) {
                e = e.getCause();
            }
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsMetadataExecutor.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    final ELVars variables = getContext().createELVars();
    final FileSystem fs = hdfsConnection.getFs();

    Iterator<Record> it = batch.getRecords();
    while (it.hasNext()) {
        Record record = it.next();/* w w  w .j a  va 2s  .  co  m*/
        RecordEL.setRecordInContext(variables, record);

        // Execute all configured HDFS metadata operations as target user
        try {
            hdfsConnection.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath));
                    LOG.info("Working on file: " + workingFile);

                    if (actions.shouldMoveFile) {
                        Path destinationFile = new Path(
                                evaluate(variables, "newLocation", actions.newLocation));

                        Path destinationParent = destinationFile.getParent();
                        if (!fs.exists(destinationParent)) {
                            LOG.debug("Creating parent directory for destination file: {}", destinationParent);
                            if (!fs.mkdirs(destinationParent)) {
                                throw new IOException("Can't create directory: " + destinationParent);
                            }
                        }

                        LOG.debug("Renaming to: {}", destinationFile);
                        if (!fs.rename(workingFile, destinationFile)) {
                            throw new IOException("Can't rename file to: " + destinationFile);
                        }
                        workingFile = destinationFile;
                    }

                    if (actions.shouldChangeOwnership) {
                        String newOwner = evaluate(variables, "newOwner", actions.newOwner);
                        String newGroup = evaluate(variables, "newGroup", actions.newGroup);
                        LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup);
                        fs.setOwner(workingFile, newOwner, newGroup);
                    }

                    if (actions.shouldSetPermissions) {
                        String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions);
                        FsPermission fsPerms = new FsPermission(stringPerms);
                        LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms);
                        fs.setPermission(workingFile, fsPerms);
                    }

                    if (actions.shouldSetAcls) {
                        String stringAcls = evaluate(variables, "newAcls", actions.newAcls);
                        List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true);
                        LOG.debug("Applying ACLs: {}", stringAcls);
                        fs.setAcl(workingFile, acls);
                    }

                    // Issue event with the final file name (e.g. the renamed one if applicable)
                    EventRecord event = getContext().createEventRecord("file-changed", 1);
                    event.set(Field.create(Field.Type.MAP, new ImmutableMap.Builder<String, Field>()
                            .put("filepath", Field.create(Field.Type.STRING, workingFile.toString())).build()));
                    getContext().toEvent(event);

                    LOG.debug("Done changing metadata on file: {}", workingFile);
                    return null;
                }
            });
        } catch (Exception e) {
            LOG.error("Failure when applying metadata changes to HDFS", e);
            errorRecordHandler.onError(
                    new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.WholeFileFormatFsHelper.java

License:Apache License

@Override
public Path renameAndGetPath(FileSystem fs, Path tempPath) throws IOException, StageException {
    Path finalPath = getRenamablePath(fs, tempPath);
    if (!fs.rename(tempPath, finalPath)) {
        throw new IOException(Utils.format("Could not rename '{}' to '{}'", tempPath, finalPath));
    }/*from   w ww .  j  a v a  2  s .c  o  m*/

    //updatePermissions
    if (fsPermissions != null) {
        fs.setPermission(finalPath, fsPermissions);
    }

    fsPermissions = null;

    //Throw file copied event here.
    context.toEvent(wholeFileEventRecord);

    return finalPath;
}

From source file:com.yahoo.glimmer.util.ComputeHashTool.java

License:Open Source License

public long buildHash(FileSystem fs, String srcFilename, Long numElements, boolean generateUnsigned,
        boolean generateSigned, final Charset charset, boolean writeInfoFile)
        throws IOException, ClassNotFoundException {
    final MapReducePartInputStreamEnumeration inputStreamEnumeration;
    try {//from w  w w  . ja v a2  s.c o m
        inputStreamEnumeration = new MapReducePartInputStreamEnumeration(fs, new Path(srcFilename));
    } catch (IOException e) {
        throw new RuntimeException("Failed to open " + srcFilename, e);
    }

    LineReaderCollection inCollection = new LineReaderCollection(new LineReaderCollection.ReaderFactory() {
        @Override
        public Reader newReader() {
            inputStreamEnumeration.reset();
            return new InputStreamReader(new SequenceInputStream(inputStreamEnumeration), charset);
        }
    });

    String destFilename = inputStreamEnumeration.removeCompressionSuffixIfAny(srcFilename);
    Path unsigendPath = new Path(destFilename + DOT_UNSIGNED);

    HollowTrieMonotoneMinimalPerfectHashFunction<CharSequence> unsignedHash;
    if (generateUnsigned) {
        //       if (numElements != null) {
        //      LOGGER.info("\tBuilding unsigned hash with given number of elements:" + numElements);
        //       } else {
        //      LOGGER.info("\tBuilding unsigned hash. Getting number of elements from collection...");
        //      long timeToGetSize = System.currentTimeMillis();
        //      numElements = inCollection.size64();
        //      timeToGetSize = System.currentTimeMillis() - timeToGetSize;
        //      LOGGER.info("\tNumber of elements is " + numElements + " found in " + timeToGetSize / 1000 + " seconds");
        //       }
        //       unsignedHash = new LcpMonotoneMinimalPerfectHashFunction<CharSequence>(inCollection, numElements, TransformationStrategies.prefixFreeUtf16());
        unsignedHash = new HollowTrieMonotoneMinimalPerfectHashFunction<CharSequence>(inCollection,
                TransformationStrategies.prefixFreeUtf32());
        LOGGER.info("\tSaving unsigned hash as " + unsigendPath.toString());
        writeMapToFile(unsignedHash, fs, unsigendPath);
    } else {
        LOGGER.info("\tLoading unsigned hash from " + unsigendPath.toString());
        unsignedHash = (HollowTrieMonotoneMinimalPerfectHashFunction<CharSequence>) readMpHashFromFile(fs,
                unsigendPath);
    }

    if (generateSigned) {
        LOGGER.info("\tBuilding signed hash...");
        //       ShiftAddXorSignedStringMap signedHash = new ShiftAddXorSignedStringMap(inCollection.iterator(), unsignedHash, signatureWidth);
        Path signedPath = new Path(destFilename + DOT_SIGNED);
        DataOutputStream signedDataOutputStream = null;
        try {
            signedDataOutputStream = new DataOutputStream(
                    new FastBufferedOutputStream(createOutputStream(fs, signedPath)));
            LongBigListSignedStringMap.sign(inCollection.iterator(), signedDataOutputStream, null);
        } finally {
            if (signedDataOutputStream != null) {
                signedDataOutputStream.close();
            }
        }

        LOGGER.info("\tSaving signed hash as " + signedPath.toString());
    }

    if (writeInfoFile) {
        Path infoPath = new Path(destFilename + DOT_MAPINFO);
        FSDataOutputStream infoStream = fs.create(infoPath, true);// overwrite
        fs.setPermission(infoPath, ALL_PERMISSIONS);
        OutputStreamWriter infoWriter = new OutputStreamWriter(infoStream);
        infoWriter.write("size\t");
        infoWriter.write(Long.toString(unsignedHash.size64()));
        infoWriter.write("\n");
        infoWriter.write("unsignedBits\t");
        infoWriter.write(Long.toString((unsignedHash).numBits()));
        infoWriter.write("\n");
        if (generateSigned) {
            infoWriter.write("signedWidth\t64\n");
        }
        infoWriter.close();
        infoStream.close();
    }

    return unsignedHash.size64();
}

From source file:com.yahoo.glimmer.util.ComputeHashTool.java

License:Open Source License

private static OutputStream createOutputStream(FileSystem fs, Path path) throws IOException {
    FSDataOutputStream outStream = fs.create(path, true);// overwrite;
    fs.setPermission(path, ALL_PERMISSIONS);
    return outStream;
}