Example usage for org.apache.hadoop.fs FileUtil chmod

List of usage examples for org.apache.hadoop.fs FileUtil chmod

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileUtil chmod.

Prototype

public static int chmod(String filename, String perm) throws IOException, InterruptedException 

Source Link

Document

Change the permissions on a filename.

Usage

From source file:it.crs4.pydoop.mapreduce.pipes.TestPipeApplication.java

License:Apache License

private File getFileCommand(String clazz) throws Exception {
    String classpath = System.getProperty("java.class.path");
    File fCommand = new File(workSpace + File.separator + "cache.sh");
    fCommand.deleteOnExit();//from  ww w.  ja  v a  2  s.c o m
    if (!fCommand.getParentFile().exists()) {
        fCommand.getParentFile().mkdirs();
    }
    fCommand.createNewFile();
    OutputStream os = new FileOutputStream(fCommand);
    os.write("#!/bin/sh \n".getBytes());
    if (clazz == null) {
        os.write(("ls ").getBytes());
    } else {
        os.write(("java -cp " + classpath + " " + clazz).getBytes());
    }
    os.flush();
    os.close();
    FileUtil.chmod(fCommand.getAbsolutePath(), "700");
    return fCommand;
}

From source file:it.crs4.pydoop.pipes.Application.java

License:Apache License

/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration/*from ww  w . j ava2  s .  co m*/
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, RecordReader<FloatWritable, NullWritable> recordReader,
        OutputCollector<K2, V2> output, Reporter reporter, Class<? extends K2> outputKeyClass,
        Class<? extends V2> outputValueClass) throws IOException, InterruptedException {
    serverSocket = new ServerSocket(0);
    Map<String, String> env = new HashMap<String, String>();
    // add TMPDIR environment variable with the value of java.io.tmpdir
    env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
    env.put(Submitter.PORT, Integer.toString(serverSocket.getLocalPort()));

    TaskAttemptID taskid = TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));

    // get the task's working directory
    String workDir = LocalJobRunner.getLocalTaskDir(conf.getUser(), taskid.getJobID().toString(),
            taskid.getTaskID().toString(), false);

    //Add token to the environment if security is enabled
    Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf.getCredentials());
    // This password is used as shared secret key between this application and
    // child pipes process
    byte[] password = jobToken.getPassword();

    String localPasswordFile = new File(workDir, "jobTokenPassword").getAbsolutePath();
    writePasswordToLocalFile(localPasswordFile, password, conf);
    env.put("hadoop.pipes.shared.secret.location", localPasswordFile);

    List<String> cmd = new ArrayList<String>();
    String interpretor = conf.get(Submitter.INTERPRETOR);
    if (interpretor != null) {
        cmd.add(interpretor);
    }
    String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
    if (!(new File(executable).canExecute())) {
        // LinuxTaskController sets +x permissions on all distcache files already.
        // In case of DefaultTaskController, set permissions here.
        FileUtil.chmod(executable, "u+x");
    }
    cmd.add(executable);
    // wrap the command in a stdout/stderr capture
    // we are starting map/reduce task of the pipes job. this is not a cleanup
    // attempt. 
    File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(conf);
    cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength, false);

    process = runClient(cmd, env);
    clientSocket = serverSocket.accept();

    String challenge = getSecurityChallenge();
    String digestToSend = createDigest(password, challenge);
    String digestExpected = createDigest(password, digestToSend);

    handler = new OutputHandler<K2, V2>(output, reporter, recordReader, digestExpected);
    K2 outputKey = (K2) ReflectionUtils.newInstance(outputKeyClass, conf);
    V2 outputValue = (V2) ReflectionUtils.newInstance(outputValueClass, conf);
    downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue, conf);

    downlink.authenticate(digestToSend, challenge);
    waitForAuthentication();
    LOG.debug("Authentication succeeded");
    downlink.start();
    downlink.setJobConf(conf);
}

From source file:org.apache.avro.mapred.tether.TetheredProcess.java

License:Apache License

private Process startSubprocess(JobConf job) throws IOException, InterruptedException {
    // get the executable command
    List<String> command = new ArrayList<String>();

    String executable = "";
    if (job.getBoolean(TetherJob.TETHER_EXEC_CACHED, false)) {
        //we want to use the cached executable
        Path[] localFiles = DistributedCache.getLocalCacheFiles(job);
        if (localFiles == null) { // until MAPREDUCE-476
            URI[] files = DistributedCache.getCacheFiles(job);
            localFiles = new Path[] { new Path(files[0].toString()) };
        }//from  w  w w .j  ava 2 s.  com
        executable = localFiles[0].toString();
        FileUtil.chmod(executable.toString(), "a+x");
    } else {
        executable = job.get(TetherJob.TETHER_EXEC);
    }

    command.add(executable);

    // Add the executable arguments. We assume the arguments are separated by
    // spaces so we split the argument string based on spaces and add each
    // token to command We need to do it this way because
    // TaskLog.captureOutAndError will put quote marks around each argument so
    // if we pass a single string containing all arguments we get quoted
    // incorrectly
    String args = job.get(TetherJob.TETHER_EXEC_ARGS);
    String[] aparams = args.split("\n");
    for (int i = 0; i < aparams.length; i++) {
        aparams[i] = aparams[i].trim();
        if (aparams[i].length() > 0) {
            command.add(aparams[i]);
        }
    }

    if (System.getProperty("hadoop.log.dir") == null && System.getenv("HADOOP_LOG_DIR") != null)
        System.setProperty("hadoop.log.dir", System.getenv("HADOOP_LOG_DIR"));

    // wrap the command in a stdout/stderr capture
    TaskAttemptID taskid = TaskAttemptID.forName(job.get("mapred.task.id"));
    File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(job);
    command = TaskLog.captureOutAndError(command, stdout, stderr, logLength);
    stdout.getParentFile().mkdirs();
    stderr.getParentFile().mkdirs();

    // add output server's port to env
    Map<String, String> env = new HashMap<String, String>();
    env.put("AVRO_TETHER_OUTPUT_PORT", Integer.toString(outputServer.getPort()));

    // start child process
    ProcessBuilder builder = new ProcessBuilder(command);
    System.out.println(command);
    builder.environment().putAll(env);
    return builder.start();
}

From source file:org.apache.falcon.service.SharedLibraryHostingService.java

License:Apache License

private void pushExtensionArtifactsToCluster(final Cluster cluster, final FileSystem clusterFs)
        throws FalconException {
    if (!Services.get().isRegistered(ExtensionService.SERVICE_NAME)) {
        LOG.info("ExtensionService not registered, return");
        return;//from  w  w  w. j  av a 2s.co  m
    }

    ExtensionStore store = ExtensionStore.get();
    if (!store.isExtensionStoreInitialized()) {
        LOG.info(
                "Extension store not initialized by Extension service. Make sure Extension service is added in "
                        + "start up properties");
        return;
    }

    final String filterPath = "/apps/falcon/extensions/mirroring/";
    Path extensionStorePath = store.getExtensionStorePath();
    LOG.info("extensionStorePath :{}", extensionStorePath);
    FileSystem falconFileSystem = HadoopClientFactory.get().createFalconFileSystem(extensionStorePath.toUri());
    String nameNode = StringUtils
            .removeEnd(falconFileSystem.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY), File.separator);

    String clusterStorageUrl = StringUtils.removeEnd(ClusterHelper.getStorageUrl(cluster), File.separator);

    // If default fs for Falcon server is same as cluster fs abort copy
    if (nameNode.equalsIgnoreCase(clusterStorageUrl)) {
        LOG.info("clusterStorageUrl :{} same return", clusterStorageUrl);
        return;
    }

    try {
        RemoteIterator<LocatedFileStatus> fileStatusListIterator = falconFileSystem
                .listFiles(extensionStorePath, true);

        while (fileStatusListIterator.hasNext()) {
            LocatedFileStatus srcfileStatus = fileStatusListIterator.next();
            Path filePath = Path.getPathWithoutSchemeAndAuthority(srcfileStatus.getPath());

            if (filePath != null && filePath.toString().startsWith(filterPath)) {
                /* HiveDR uses filter path as store path in DRStatusStore, so skip it. Copy only the extension
                 artifacts */
                continue;
            }

            if (srcfileStatus.isDirectory()) {
                if (!clusterFs.exists(filePath)) {
                    HadoopClientFactory.mkdirs(clusterFs, filePath, srcfileStatus.getPermission());
                }
            } else {
                if (clusterFs.exists(filePath)) {
                    FileStatus targetfstat = clusterFs.getFileStatus(filePath);
                    if (targetfstat.getLen() == srcfileStatus.getLen()) {
                        continue;
                    }
                }

                Path parentPath = filePath.getParent();
                if (!clusterFs.exists(parentPath)) {
                    FsPermission dirPerm = falconFileSystem.getFileStatus(parentPath).getPermission();
                    HadoopClientFactory.mkdirs(clusterFs, parentPath, dirPerm);
                }

                FileUtil.copy(falconFileSystem, srcfileStatus, clusterFs, filePath, false, true,
                        falconFileSystem.getConf());
                FileUtil.chmod(clusterFs.makeQualified(filePath).toString(),
                        srcfileStatus.getPermission().toString());
            }
        }
    } catch (IOException | InterruptedException e) {
        throw new FalconException("Failed to copy extension artifacts to cluster" + cluster.getName(), e);
    }
}

From source file:org.apache.hama.pipes.Application.java

License:Apache License

/**
 * Start the child process to handle the task for us.
 * /* w  w w .  ja va 2  s  .  c o m*/
 * @param peer the current peer including the task's configuration
 * @throws InterruptedException
 * @throws IOException
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
Application(BSPPeer<K1, V1, K2, V2, BytesWritable> peer) throws IOException, InterruptedException {

    Map<String, String> env = new HashMap<String, String>();
    boolean streamingEnabled = peer.getConfiguration().getBoolean("hama.streaming.enabled", false);

    if (!streamingEnabled) {
        serverSocket = new ServerSocket(0);
        env.put("hama.pipes.command.port", Integer.toString(serverSocket.getLocalPort()));
    }
    // add TMPDIR environment variable with the value of java.io.tmpdir
    env.put("TMPDIR", System.getProperty("java.io.tmpdir"));

    /* Set Logging Environment from Configuration */
    env.put("hama.pipes.logging", peer.getConfiguration().getBoolean("hama.pipes.logging", false) ? "1" : "0");
    LOG.debug("DEBUG hama.pipes.logging: " + peer.getConfiguration().getBoolean("hama.pipes.logging", false));

    List<String> cmd = new ArrayList<String>();
    String interpretor = peer.getConfiguration().get("hama.pipes.executable.interpretor");
    if (interpretor != null) {
        cmd.add(interpretor);
    }

    String executable = null;
    try {
        LOG.debug("DEBUG LocalCacheFilesCount: "
                + DistributedCache.getLocalCacheFiles(peer.getConfiguration()).length);
        for (Path u : DistributedCache.getLocalCacheFiles(peer.getConfiguration()))
            LOG.debug("DEBUG LocalCacheFiles: " + u);

        executable = DistributedCache.getLocalCacheFiles(peer.getConfiguration())[0].toString();

        LOG.info("executable: " + executable);

    } catch (Exception e) {
        LOG.error("Executable: " + executable + " fs.default.name: "
                + peer.getConfiguration().get("fs.default.name"));

        throw new IOException("Executable is missing!");
    }

    if (!new File(executable).canExecute()) {
        // LinuxTaskController sets +x permissions on all distcache files already.
        // In case of DefaultTaskController, set permissions here.
        FileUtil.chmod(executable, "u+x");
    }
    cmd.add(executable);

    String additionalArgs = peer.getConfiguration().get("hama.pipes.executable.args");
    // if true, we are resolving filenames with the linked paths in
    // DistributedCache
    boolean resolveArguments = peer.getConfiguration().getBoolean("hama.pipes.resolve.executable.args", false);
    if (additionalArgs != null && !additionalArgs.isEmpty()) {
        String[] split = additionalArgs.split(" ");
        for (String s : split) {
            if (resolveArguments) {
                for (Path u : DistributedCache.getLocalCacheFiles(peer.getConfiguration())) {
                    if (u.getName().equals(s)) {
                        LOG.info("Resolved argument \"" + s + "\" with fully qualified path \"" + u.toString()
                                + "\"!");
                        cmd.add(u.toString());
                        break;
                    }
                }
            } else {
                cmd.add(s);
            }
        }
    }

    // wrap the command in a stdout/stderr capture
    TaskAttemptID taskid = peer.getTaskId();
    File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
    // Get the desired maximum length of task's logs.
    long logLength = TaskLog.getTaskLogLength(peer.getConfiguration());
    if (!streamingEnabled) {
        cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength);
    } else {
        // use tee in streaming to get the output to file
        cmd = TaskLog.captureOutAndErrorTee(null, cmd, stdout, stderr, logLength);
    }

    if (!stdout.getParentFile().exists()) {
        stdout.getParentFile().mkdirs();
        LOG.info("STDOUT: " + stdout.getParentFile().getAbsolutePath() + " created!");
    }
    LOG.info("STDOUT: " + stdout.getAbsolutePath());

    if (!stderr.getParentFile().exists()) {
        stderr.getParentFile().mkdirs();
        LOG.info("STDERR: " + stderr.getParentFile().getAbsolutePath() + " created!");
    }
    LOG.info("STDERR: " + stderr.getAbsolutePath());

    LOG.info("DEBUG: cmd: " + cmd);

    process = runClient(cmd, env); // fork c++ binary

    try {
        if (streamingEnabled) {
            downlink = new StreamingProtocol(peer, process.getOutputStream(), process.getInputStream());
        } else {
            LOG.info("DEBUG: waiting for Client at " + serverSocket.getLocalSocketAddress());
            serverSocket.setSoTimeout(2000);
            clientSocket = serverSocket.accept();
            downlink = new BinaryProtocol<K1, V1, K2, V2>(peer, clientSocket.getOutputStream(),
                    clientSocket.getInputStream());
        }
        downlink.start();

    } catch (SocketException e) {
        throw new SocketException("Timout: Client pipes application was not connecting!");
    }
}

From source file:org.apache.hama.pipes.PipesApplication.java

License:Apache License

private List<String> setupCommand(Configuration conf) throws IOException, InterruptedException {

    List<String> cmd = new ArrayList<String>();
    String interpretor = conf.get("hama.pipes.executable.interpretor");
    if (interpretor != null) {
        cmd.add(interpretor);//from   ww w  . j a v a  2s  . c  o  m
    }

    String executable = null;
    try {
        if (DistributedCache.getLocalCacheFiles(conf) != null) {
            LOG.debug("DEBUG LocalCacheFilesCount: " + DistributedCache.getLocalCacheFiles(conf).length);
            for (Path u : DistributedCache.getLocalCacheFiles(conf))
                LOG.debug("DEBUG LocalCacheFiles: " + u);

            executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();

            LOG.debug("DEBUG: executable: " + executable);
        } else {
            LOG.debug("DEBUG: DistributedCache.getLocalCacheFiles(conf) returns null.");
            throw new IOException("Executable is missing!");
        }
    } catch (Exception e) {
        LOG.error("Executable: " + executable + " fs.default.name: " + conf.get("fs.default.name"));

        throw new IOException("Executable is missing!");
    }

    if (!new File(executable).canExecute()) {
        // LinuxTaskController sets +x permissions on all distcache files already.
        // In case of DefaultTaskController, set permissions here.
        FileUtil.chmod(executable, "u+x");
    }

    cmd.add(executable);

    String additionalArgs = conf.get("hama.pipes.executable.args");
    // if true, we are resolving filenames with the linked paths in
    // DistributedCache
    boolean resolveArguments = conf.getBoolean("hama.pipes.resolve.executable.args", false);
    if (additionalArgs != null && !additionalArgs.isEmpty()) {
        String[] split = additionalArgs.split(" ");
        for (String s : split) {
            if (resolveArguments) {
                for (Path u : DistributedCache.getLocalCacheFiles(conf)) {
                    if (u.getName().equals(s)) {
                        LOG.info("Resolved argument \"" + s + "\" with fully qualified path \"" + u.toString()
                                + "\"!");
                        cmd.add(u.toString());
                        break;
                    }
                }
            } else {
                cmd.add(s);
            }
        }
    }

    return cmd;
}

From source file:org.apache.pig.backend.hadoop.streaming.HadoopExecutableManager.java

License:Apache License

public void configure(POStream stream) throws IOException, ExecException {
    super.configure(stream);

    // Chmod +x the executable
    File executable = new File(command.getExecutable());
    if (executable.isAbsolute()) {
        // we don't own it. Hope it is executable ...
    } else {//from   www  .j a  va2  s  . co m
        try {
            FileUtil.chmod(executable.toString(), "a+x");
        } catch (InterruptedException ie) {
            int errCode = 6013;
            String msg = "Unable to chmod " + executable + " . Thread interrupted.";
            throw new ExecException(msg, errCode, PigException.REMOTE_ENVIRONMENT, ie);
        }
    }

    // Save a copy of the JobConf
    job = PigMapReduce.sJobConfInternal.get();

    // Save the output directory for the Pig Script
    scriptOutputDir = job.get("pig.streaming.task.output.dir");
    scriptLogDir = job.get("pig.streaming.log.dir", "_logs");

    // Save the taskid
    // TODO Get an equivalent property in Tez mode (currently this returns null)
    taskId = job.get(MRConfiguration.TASK_ID);
}

From source file:org.deeplearning4j.hadoop.util.HdfsUtils.java

License:Apache License

public static void ensureUserDirExists(Configuration conf) throws Exception {
    FileSystem fs = getFileSystem(conf);
    if (!fs.exists(new Path(prependUserPath("")))) {
        boolean dirs = fs.mkdirs(new Path(prependUserPath("")));
        if (!dirs)
            throw new IllegalStateException("Couldn't make " + prependUserPath(""));
        FileUtil.chmod(prependUserPath(""), "777");
    }//from   www. j  a v a 2  s  .  c o m

}

From source file:origin.hadoop.yarn.unmanagedamlauncher.UnmanagedAMLauncher.java

License:Apache License

public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    // Service will be empty but that's okay, we are just passing down only
    // AMRMToken down to the real AM which eventually sets the correct
    // service-address.
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {/*from w  w  w .ja  v a2 s .  co m*/
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true));
    credentials.writeTokenStorageToStream(os);
    os.close();

    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }

    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newInstance(attemptId, 0);

    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());

    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());

    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));

    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream()));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream()));

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }

    // wait for the process to finish and check the exit code
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }

    try {
        // make sure that the error thread exits
        // on Windows these threads sometimes get stuck and hang the execution
        // timeout and join later after destroying the process.
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}