Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:org.apache.tez.tests.MiniTezClusterWithTimeline.java

License:Apache License

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
    // Use libs from cluster since no build is available
    conf.setBoolean(TezConfiguration.TEZ_USE_CLUSTER_HADOOP_LIBS, true);
    // blacklisting disabled to prevent scheduling issues
    conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
    }/*from   w w  w .  j  av a  2 s  . c  o m*/

    if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
        // nothing defined. set quick delete value
        conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
    }

    File appJarLocalFile = new File(MiniTezClusterWithTimeline.APPJAR);

    if (!appJarLocalFile.exists()) {
        String message = "TezAppJar " + MiniTezClusterWithTimeline.APPJAR + " not found. Exiting.";
        LOG.info(message);
        throw new TezUncheckedException(message);
    } else {
        LOG.info("Using Tez AppJar: " + appJarLocalFile.getAbsolutePath());
    }

    FileSystem fs = FileSystem.get(conf);
    Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
    Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
    // Copy AppJar and make it public.
    Path appMasterJar = new Path(MiniTezClusterWithTimeline.APPJAR);
    fs.copyFromLocalFile(appMasterJar, appRemoteJar);
    fs.setPermission(appRemoteJar, new FsPermission("777"));

    conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
    LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));

    // VMEM monitoring disabled, PMEM monitoring enabled.
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
         * Re-configure the staging path on Windows if the file system is localFs.
         * We need to use a absolute path that contains the drive letter. The unit
         * test could run on a different drive than the AM. We can run into the
         * issue that job files are localized to the drive where the test runs on,
         * while the AM starts on a different drive and fails to find the job
         * metafiles. Using absolute path can avoid this ambiguity.
         */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                        new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        fc.mkdir(stagingPath, null, true);

        //mkdir done directory as well
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new TezUncheckedException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test");

    //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
            new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    conf.setClass(
            String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID),
            ShuffleHandler.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    super.serviceInit(conf);
}

From source file:org.bgi.flexlab.gaea.data.mapreduce.input.header.SamHdfsFileHeader.java

License:Open Source License

public static void writeHeader(Configuration conf, SAMFileHeader header, Path output) {
    Path rankSumTestObjPath = null;
    FsAction[] v = FsAction.values();//  www  .ja va 2 s .  co m
    StringBuilder uri = new StringBuilder();
    uri.append(output);
    if (!output.getName().endsWith("/")) {
        uri.append(System.getProperty("file.separator"));
    }
    uri.append(BAM_HEADER_FILE_NAME);
    conf.set(BAM_HEADER_FILE_NAME, uri.toString());
    rankSumTestObjPath = new Path(uri.toString());
    FileSystem fs = null;
    try {
        fs = rankSumTestObjPath.getFileSystem(conf);
        FsPermission permission = new FsPermission(v[7], v[7], v[7]);
        if (!fs.exists(output)) {
            fs.mkdirs(output, permission);
        } else {
            fs.setPermission(output, permission);
        }

        SamFileHeaderCodec.writeHeader(header, fs.create(rankSumTestObjPath));
    } catch (IOException e) {
        throw new RuntimeException(e.toString());
    } finally {
        try {
            fs.close();
        } catch (IOException ioe) {
            throw new RuntimeException(ioe.toString());
        }
    }
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _setPermission(String path, String permission) throws Exception {
    if (StringUtils.isEmpty(permission)) {
        return;//from  ww w .  j ava 2s . c  o  m
    }
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    if (!fs.exists(fsPath)) {
        this.notFoundException(fsPath.toString());
    }
    FsPermission fsPermission = new FsPermission(permission);
    fs.setPermission(fsPath, fsPermission);
    fs.close();
}

From source file:org.pentaho.di.job.entries.hadooptransjobexecutor.DistributedCacheUtil.java

License:Apache License

/**
 * Stages the source file or folder to a Hadoop file system and sets their permission and replication value appropriately
 * to be used with the Distributed Cache. WARNING: This will delete the contents of dest before staging the archive.
 *
 * @param source    File or folder to copy to the file system. If it is a folder all contents will be copied into dest.
 * @param fs        Hadoop file system to store the contents of the archive in
 * @param dest      Destination to copy source into. If source is a file, the new file name will be exactly dest. If source
 *                  is a folder its contents will be copied into dest. For more info see
 *                  {@link FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path)}.
 * @param overwrite Should an existing file or folder be overwritten? If not an exception will be thrown.
 * @throws IOException         Destination exists is not a directory
 * @throws KettleFileException Source does not exist or destination exists and overwrite is false.
 *//*from  w  w  w  .  jav  a 2 s. com*/
public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite)
        throws IOException, KettleFileException {
    if (!source.exists()) {
        throw new KettleFileException(BaseMessages.getString(DistributedCacheUtil.class,
                "DistributedCacheUtil.SourceDoesNotExist", source));
    }

    if (fs.exists(dest)) {
        if (overwrite) {
            // It is a directory, clear it out
            fs.delete(dest, true);
        } else {
            throw new KettleFileException(BaseMessages.getString(DistributedCacheUtil.class,
                    "DistributedCacheUtil.DestinationExists", dest.toUri().getPath()));
        }
    }

    // Use the same replication we'd use for submitting jobs
    short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10);

    Path local = new Path(source.getURL().getPath());
    fs.copyFromLocalFile(local, dest);
    fs.setPermission(dest, CACHED_FILE_PERMISSION);
    fs.setReplication(dest, replication);
}

From source file:org.pentaho.hadoop.shim.common.DistributedCacheUtilImpl.java

License:Apache License

/**
 * Stages the source file or folder to a Hadoop file system and sets their permission and replication value
 * appropriately to be used with the Distributed Cache. WARNING: This will delete the contents of dest before staging
 * the archive./*www  .j  a v  a 2  s . c  om*/
 *
 * @param source    File or folder to copy to the file system. If it is a folder all contents will be copied into
 *                  dest.
 * @param fs        Hadoop file system to store the contents of the archive in
 * @param dest      Destination to copy source into. If source is a file, the new file name will be exactly dest. If
 *                  source is a folder its contents will be copied into dest. For more info see {@link
 *                  FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path)}.
 * @param overwrite Should an existing file or folder be overwritten? If not an exception will be thrown.
 * @throws IOException         Destination exists is not a directory
 * @throws KettleFileException Source does not exist or destination exists and overwrite is false.
 */
public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite, boolean isPublic)
        throws IOException, KettleFileException {
    if (!source.exists()) {
        throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                "DistributedCacheUtil.SourceDoesNotExist", source));
    }

    if (fs.exists(dest)) {
        if (overwrite) {
            // It is a directory, clear it out
            fs.delete(dest, true);
        } else {
            throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                    "DistributedCacheUtil.DestinationExists", dest.toUri().getPath()));
        }
    }

    // Use the same replication we'd use for submitting jobs
    short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10);

    if (source.getURL().toString().endsWith(CONFIG_PROPERTIES)) {
        copyConfigProperties(source, fs, dest);
    } else {
        Path local = new Path(source.getURL().getPath());
        fs.copyFromLocalFile(local, dest);
    }

    if (isPublic) {
        fs.setPermission(dest, PUBLIC_CACHED_FILE_PERMISSION);
    } else {
        fs.setPermission(dest, CACHED_FILE_PERMISSION);
    }
    fs.setReplication(dest, replication);
}

From source file:org.pentaho.hadoop.shim.hsp101.HadoopShim.java

License:Apache License

@Override
public void onLoad(HadoopConfiguration config, HadoopConfigurationFileSystemManager fsm) throws Exception {
    fsm.addProvider(config, "hdfs", config.getIdentifier(), new HDFSFileProvider());
    setDistributedCacheUtil(new DistributedCacheUtilImpl(config) {
        /**/* w ww.jav a2  s.co  m*/
         * Default permission for cached files
         * <p/>
         * Not using FsPermission.createImmutable due to EOFExceptions when using it with Hadoop 0.20.2
         */
        private final FsPermission CACHED_FILE_PERMISSION = new FsPermission((short) 0755);

        public void addFileToClassPath(Path file, Configuration conf) throws IOException {
            String classpath = conf.get("mapred.job.classpath.files");
            conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
                    : classpath + getClusterPathSeparator() + file.toString());
            FileSystem fs = FileSystem.get(conf);
            URI uri = fs.makeQualified(file).toUri();

            DistributedCache.addCacheFile(uri, conf);
        }

        /**
         * Stages the source file or folder to a Hadoop file system and sets their permission and replication
         * value appropriately to be used with the Distributed Cache. WARNING: This will delete the contents of
         * dest before staging the archive.
         *
         * @param source    File or folder to copy to the file system. If it is a folder all contents will be
         *                  copied into dest.
         * @param fs        Hadoop file system to store the contents of the archive in
         * @param dest      Destination to copy source into. If source is a file, the new file name will be
         *                  exactly dest. If source is a folder its contents will be copied into dest. For more
         *                  info see {@link FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path,
         *                  org.apache.hadoop.fs.Path)}.
         * @param overwrite Should an existing file or folder be overwritten? If not an exception will be
         *                  thrown.
         * @throws IOException         Destination exists is not a directory
         * @throws KettleFileException Source does not exist or destination exists and overwrite is false.
         */
        public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite)
                throws IOException, KettleFileException {
            if (!source.exists()) {
                throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                        "DistributedCacheUtil.SourceDoesNotExist", source));
            }

            if (fs.exists(dest)) {
                if (overwrite) {
                    // It is a directory, clear it out
                    fs.delete(dest, true);
                } else {
                    throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class,
                            "DistributedCacheUtil.DestinationExists", dest.toUri().getPath()));
                }
            }

            // Use the same replication we'd use for submitting jobs
            short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10);

            copyFile(source, fs, dest, overwrite);
            fs.setReplication(dest, replication);
        }

        private void copyFile(FileObject source, FileSystem fs, Path dest, boolean overwrite)
                throws IOException {
            if (source.getType() == FileType.FOLDER) {
                fs.mkdirs(dest);
                fs.setPermission(dest, CACHED_FILE_PERMISSION);
                for (FileObject fileObject : source.getChildren()) {
                    copyFile(fileObject, fs, new Path(dest, fileObject.getName().getBaseName()), overwrite);
                }
            } else {
                try (FSDataOutputStream fsDataOutputStream = fs.create(dest, overwrite)) {
                    IOUtils.copy(source.getContent().getInputStream(), fsDataOutputStream);
                    fs.setPermission(dest, CACHED_FILE_PERMISSION);
                }
            }
        }

        public String getClusterPathSeparator() {
            return System.getProperty("hadoop.cluster.path.separator", ",");
        }
    });
}

From source file:org.trafodion.sql.HBaseAccess.HBulkLoadClient.java

License:Apache License

public boolean doBulkLoad(String prepLocation, String tableName, boolean quasiSecure, boolean snapshot)
        throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.doBulkLoad() - start");
    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.doBulkLoad() - Prep Location: " + prepLocation + ", Table Name:"
                + tableName + ", quasisecure : " + quasiSecure + ", snapshot: " + snapshot);

    HTable table = new HTable(config, tableName);
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(config);
    Path prepPath = new Path(prepLocation);
    prepPath = prepPath.makeQualified(prepPath.toUri(), null);
    FileSystem prepFs = FileSystem.get(prepPath.toUri(), config);

    Path[] hFams = FileUtil.stat2Paths(prepFs.listStatus(prepPath));

    if (quasiSecure) {
        throw new Exception(
                "HBulkLoadClient.doBulkLoad() - cannot perform load. Trafodion on secure HBase mode is not implemented yet");
    } else {/*from ww  w  .j a  v a2  s. c  o m*/
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfiles permissions");
        for (Path hfam : hFams) {
            Path[] hfiles = FileUtil.stat2Paths(prepFs.listStatus(hfam));
            prepFs.setPermission(hfam, PERM_ALL_ACCESS);
            for (Path hfile : hfiles) {
                if (logger.isDebugEnabled())
                    logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfile permissions:" + hfile);
                prepFs.setPermission(hfile, PERM_ALL_ACCESS);

            }
            //create _tmp dir used as temp space for Hfile processing
            FileSystem.mkdirs(prepFs, new Path(hfam, "_tmp"), PERM_ALL_ACCESS);
        }
        if (logger.isDebugEnabled())
            logger.debug(
                    "HBulkLoadClient.doBulkLoad() - bulk load started. Loading directly from preparation directory");
        doSnapshotNBulkLoad(prepPath, tableName, table, loader, snapshot);
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.doBulkLoad() - bulk load is done ");
    }
    return true;
}

From source file:org.trustedanalytics.samples.services.HdfsService.java

License:Apache License

/**
 * Create file.//from   ww w  .  j a  va2  s  .c o  m
 *
 * @param fs configured Hadoop FileSystem
 * @param filePath path to the file
 * @param text file content
 * @return full hdfs path to a file
  @throws IOException io exception
 */

private Path createFile(FileSystem fs, String filePath, String text) throws IOException {
    Path path = new Path(filePath);
    try (OutputStream os = fs.create(path)) {
        os.write(text.getBytes(Charset.forName("UTF-8")));
        fs.setPermission(path, FsPermission.valueOf("-rwxrwxrwx"));
        return fs.getFileStatus(path).getPath();
    }
}

From source file:org.trustedanalytics.samples.services.HdfsService.java

License:Apache License

/**
 * Create directory inside file system.//from   w  w  w . j  a va  2s  .  co  m
 *
 * @param fs configured Hadoop FileSystem
 * @param filePath path to the directory
 * @return path to the directory
  @throws IOException io exception
 */

private Path createDirectory(FileSystem fs, String filePath) throws IOException {
    Path path = new Path(filePath);
    fs.mkdirs(path);
    fs.setPermission(path, FsPermission.valueOf("drwxrwxrwx"));
    fs.modifyAclEntries(path, FsPermissionHelper
            .getDefaultAclsForTechnicalUsers(FsPermissionHelper.getToolUsers(), FsAction.ALL));
    fs.modifyAclEntries(path,
            FsPermissionHelper.getAclsForTechnicalUsers(FsPermissionHelper.getToolUsers(), FsAction.ALL));
    return fs.getFileStatus(path).getPath();
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

/**
 * Rename file or a folder using source and the destination of the give FS
 * Object//from   www  .jav  a  2s . c o m
 * 
 * @param srcPath
 *            Current path and the file name of the file to be renamed
 * @param dstPath
 *            new pathe and the file name
 * @return success if rename is successful
 * @throws HDFSServerManagementException
 */

public boolean renameFile(String srcPath, String dstPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    Path src = new Path(srcPath);
    Path dest = new Path(dstPath);
    boolean fileExists = false;
    try {
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }
    try {
        if (hdfsFS != null && !hdfsFS.exists(dest)) {
            hdfsFS.rename(src, dest);
            hdfsFS.setPermission(dest, fp);
        } else {
            fileExists = true;
        }
    } catch (IOException e) {
        String msg = "Error occurred while trying to rename file.";
        handleException(msg, e);
    }
    handleItemExistState(fileExists, true, false);
    return false;
}