Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:com.yata.core.HDFSManager.java

License:Apache License

/**
 *
 * @param hdfsTestDataSourceFile/*  ww  w .j a va2s  .c  o m*/
 * @param hdfsTestDataTargetFile
 * @throws IOException
 *
 * hadoop fs -cp /projects/ddsw/dev/data/backup/dealer_hierarchy/<<DOMAIN_NAME>>/<<FILE_NAME>> /projects/ddsw/dev/data/raw/nas/<<DOMAIN_NAME>>
 */
public void copyHDFSData(String hdfsTestDataSourceFile, String hdfsTestDataTargetFile)
        throws OozieClientException {

    System.out.println("copyHDFSData@" + className + " : Loading Test Data From :-> " + hdfsTestDataSourceFile
            + " : Into :-> " + hdfsTestDataTargetFile);

    FileSystem hdfs = null;
    Path hdfsTestDataSource = null;
    Path hdfsTestDataTarget = null;

    try {

        hdfs = getHdfsFileSytem();

        System.out.println("copyHDFSData@" + className + " : HDFS :-> " + hdfs);

        System.out.println("copyHDFSData@" + className + " : HDFSHomeDirectory :-> " + hdfs.getHomeDirectory());
        System.out.println("copyHDFSData@" + className + " : HDFS-URI :-> " + hdfs.getUri());
        System.out.println(
                "copyHDFSData@" + className + " : HDFSWorkingDirectory :-> " + hdfs.getWorkingDirectory());
        System.out.println("copyHDFSData@" + className + " : HDFS : " + hdfs + " : Exists :-> "
                + hdfs.exists(hdfs.getHomeDirectory()));

        hdfsTestDataSource = new Path(hdfs.getUri().getPath() + hdfsTestDataSourceFile);
        hdfsTestDataTarget = new Path(hdfs.getUri().getPath() + hdfsTestDataTargetFile);

        System.out.println("copyHDFSData@" + className + " : HDFS TEST DATA : " + hdfsTestDataSource
                + " : Exists :-> " + hdfs.exists(hdfsTestDataSource));
        System.out.println("copyHDFSData@" + className + " : HDFS DOMAIN DATA : " + hdfsTestDataTarget
                + " : Exists :-> " + hdfs.exists(hdfsTestDataTarget));

    } catch (IOException e) {

        e.printStackTrace();
        throw new OozieClientException("ERR_CODE_1218",
                "copyHDFSData@" + className + " : IOException while getting HDFS FileSystem - EXITING...");
    }

    FileUtil hdfsUtil = new FileUtil();

    try {

        hdfsUtil.copy(hdfs, hdfsTestDataSource, hdfs, hdfsTestDataTarget, false, true, hdfs.getConf());

        System.out.println("copyHDFSData@" + className + " : NOW : HDFS TEST DATA : " + hdfsTestDataSource
                + " : Exists :-> " + hdfs.exists(hdfsTestDataSource));
        System.out.println("copyHDFSData@" + className + " : HDFS DOMAIN DATA : " + hdfsTestDataTarget
                + " : Exists :-> " + hdfs.exists(hdfsTestDataTarget));

    } catch (IOException e) {

        e.printStackTrace();
        throw new OozieClientException("ERR_CODE_1218",
                "copyHDFSData@" + className + " : IOException while Copying HDFS Data - EXITING...");
    }

    /**
     * IMPORTANT
     * If the Source Data file on HDFS is not owned by the Hive/Hadoop User, then use the command below to
     * change the permission for Hive/Hadoop User to move/delete the file once processed...
     */
    try {

        hdfs.setPermission(hdfsTestDataTarget,
                new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ_EXECUTE));
    } catch (IOException e) {

        e.printStackTrace();
        throw new OozieClientException("ERR_CODE_1218", "copyHDFSData@" + className
                + " : IOException while Changing HDFS File Permissions - EXITING...");
    }

}

From source file:etl.cmd.test.XFsTestCase.java

License:Apache License

private Path initFileSystem(FileSystem fs) throws Exception {
    Path path = new Path(fs.getWorkingDirectory(), java.util.UUID.randomUUID().toString());
    Path testDirInFs = fs.makeQualified(path);
    System.out.println(XLog.format("Setting FS testcase work dir[{0}]", testDirInFs));
    if (fs.exists(testDirInFs)) {
        setAllPermissions(fs, testDirInFs);
    }// www  .j  a va2s.  c o m
    fs.delete(testDirInFs, true);
    if (!fs.mkdirs(path)) {
        throw new IOException(XLog.format("Could not create FS testcase dir [{0}]", testDirInFs));
    }
    fs.setOwner(testDirInFs, getTestUser(), getTestGroup());
    fs.setPermission(testDirInFs, FsPermission.valueOf("-rwxrwx--x"));
    return testDirInFs;
}

From source file:etl.cmd.test.XFsTestCase.java

License:Apache License

private void setAllPermissions(FileSystem fileSystem, Path path) throws IOException {
    FsPermission fsPermission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    try {//from  w ww . ja  va  2 s . c  om
        fileSystem.setPermission(path, fsPermission);
    } catch (IOException ex) {
        //NOP
    }
    FileStatus fileStatus = fileSystem.getFileStatus(path);
    if (fileStatus.isDir()) {
        for (FileStatus status : fileSystem.listStatus(path)) {
            setAllPermissions(fileSystem, status.getPath());
        }
    }
}

From source file:etl.cmd.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }/* ww  w. j av a  2  s .  co m*/
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
            dfsCluster = builder.build();
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));

            mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
            Configuration jobConf = mrCluster.getConfig();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapreduce.jobtracker.address"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            log.info("Job tracker: " + rmAddress);
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.defaultFS"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:etl.cmd.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop2() throws Exception {
    if (dfsCluster != null && dfsCluster2 == null) {
        // Trick dfs location for MiniDFSCluster since it doesn't accept location as input)
        String testBuildDataSaved = System.getProperty("test.build.data", "build/test/data");
        try {/*from   w  w w  .j a v a 2 s  .c  o m*/
            System.setProperty("test.build.data", FilenameUtils.concat(testBuildDataSaved, "2"));
            // Only DFS cluster is created based upon current need
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(createDFSConfig());
            dfsCluster2 = builder.build();
            FileSystem fileSystem = dfsCluster2.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            System.setProperty(OOZIE_TEST_NAME_NODE2, fileSystem.getConf().get("fs.defaultFS"));
        } catch (Exception ex) {
            shutdownMiniCluster2();
            throw ex;
        } finally {
            // Restore previus value
            System.setProperty("test.build.data", testBuildDataSaved);
        }
    }
}

From source file:eu.stratosphere.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Stratosphere YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }//from w  w w.ja va2  s .c o  m
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(STRATOSPHERE_CONF_DIR);
    options.addOption(STRATOSPHERE_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(STRATOSPHERE_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(STRATOSPHERE_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(STRATOSPHERE_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(STRATOSPHERE_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getStratosphereConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Stratosphere YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " eu.stratosphere.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, stratosphereConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("stratosphere.jar", appMasterJar);
    localResources.put("stratosphere-conf.yaml", stratosphereConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".stratosphere/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.STRATOSPHERE_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Stratosphere"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Stratosphere-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err
                    .println("Stratosphere JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:fr.ens.biologie.genomique.eoulsan.modules.mgmt.hadoop.DistCp.java

License:LGPL

private static void updatePermissions(final FileStatus src, final FileStatus dst,
        final EnumSet<FileAttribute> preseved, final FileSystem destFileSys) throws IOException {
    String owner = null;//w  w  w  .ja  v a2 s.c  om
    String group = null;
    if (preseved.contains(FileAttribute.USER) && !src.getOwner().equals(dst.getOwner())) {
        owner = src.getOwner();
    }
    if (preseved.contains(FileAttribute.GROUP) && !src.getGroup().equals(dst.getGroup())) {
        group = src.getGroup();
    }
    if (owner != null || group != null) {
        destFileSys.setOwner(dst.getPath(), owner, group);
    }
    if (preseved.contains(FileAttribute.PERMISSION) && !src.getPermission().equals(dst.getPermission())) {
        destFileSys.setPermission(dst.getPath(), src.getPermission());
    }
}

From source file:gaffer.accumulo.bulkimport.MoveIntoAccumulo.java

License:Apache License

public int run(String[] args) throws Exception {
    // Usage/*from  w w  w  . j  a va2 s . c o  m*/
    if (args.length < 3) {
        System.err.println("Usage: " + MoveIntoAccumulo.class.getName()
                + " <inputpath> <failurepath> <accumulo_properties_file>");
        return 1;
    }

    // Gets paths
    Path inputPath = new Path(args[0]);
    Path failurePath = new Path(args[1]);
    String accumuloPropertiesFile = args[2];

    // Hadoop configuration
    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);

    // Connect to Accumulo
    AccumuloConfig accConf = new AccumuloConfig(accumuloPropertiesFile);
    Connector conn = Accumulo.connect(accConf);
    String tableName = accConf.getTable();

    // Check if the table exists
    if (!conn.tableOperations().exists(tableName)) {
        System.err.println("Table " + tableName + " does not exist - create the table before running this");
        return 1;
    }

    // Make the failure directory
    fs.mkdirs(failurePath);
    fs.setPermission(failurePath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    // Remove the _SUCCESS file to prevent warning in accumulo
    fs.delete(new Path(inputPath + "/_SUCCESS"), false);

    // Set all permissions
    IngestUtils.setDirectoryPermsForAccumulo(fs, inputPath);

    // Import the files
    conn.tableOperations().importDirectory(tableName, inputPath.toString(), failurePath.toString(), false);

    // Delete the temporary directories
    fs.delete(failurePath, true);

    return 0;
}

From source file:gaffer.accumulo.utils.IngestUtils.java

License:Apache License

/**
 * Modify the permissions on a directory and its contents to allow Accumulo
 * access./*w  w w. ja v  a2  s . c om*/
 * 
 * @param fs  The FileSystem where the directory is
 * @param dirPath  The path to the directory
 * @throws IOException
 */
public static void setDirectoryPermsForAccumulo(FileSystem fs, Path dirPath) throws IOException {
    if (!fs.getFileStatus(dirPath).isDir()) {
        throw new RuntimeException(dirPath + " is not a directory");
    }
    fs.setPermission(dirPath, ACC_DIR_PERMS);
    for (FileStatus file : fs.listStatus(dirPath)) {
        fs.setPermission(file.getPath(), ACC_FILE_PERMS);
    }
}

From source file:gaffer.accumulostore.operation.handler.tool.ImportElementsToAccumulo.java

License:Apache License

@Override
public int run(final String[] strings) throws Exception {
    // Hadoop configuration
    final Configuration conf = getConf();
    final FileSystem fs = FileSystem.get(conf);

    // Make the failure directory
    fs.mkdirs(operation.getFailurePath());
    fs.setPermission(operation.getFailurePath(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    // Remove the _SUCCESS file to prevent warning in accumulo
    fs.delete(new Path(operation.getOutputPath().toString() + "/_SUCCESS"), false);

    // Set all permissions
    IngestUtils.setDirectoryPermsForAccumulo(fs, operation.getOutputPath());

    // Import the files
    connector.tableOperations().importDirectory(table, operation.getOutputPath().toString(),
            operation.getFailurePath().toString(), false);

    // Delete the temporary directories
    fs.delete(operation.getFailurePath(), true);

    return SUCCESS_RESPONSE;
}