Example usage for org.apache.hadoop.util Shell WINDOWS

List of usage examples for org.apache.hadoop.util Shell WINDOWS

Introduction

In this page you can find the example usage for org.apache.hadoop.util Shell WINDOWS.

Prototype

boolean WINDOWS

To view the source code for org.apache.hadoop.util Shell WINDOWS.

Click Source Link

Usage

From source file:ApplicationMaster.java

License:Apache License

/**
 * Dump out contents of $CWD and the environment to stdout for debugging
 *///from w  w  w . java 2 s.  c  om
private void dumpOutDebugInfo() {

    LOG.info("Dump debug output");
    Map<String, String> envs = System.getenv();
    for (Map.Entry<String, String> env : envs.entrySet()) {
        LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
        System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue());
    }

    BufferedReader buf = null;
    try {
        String lines = Shell.WINDOWS ? Shell.execCommand("cmd", "/c", "dir") : Shell.execCommand("ls", "-al");
        buf = new BufferedReader(new StringReader(lines));
        String line = "";
        while ((line = buf.readLine()) != null) {
            LOG.info("System CWD content: " + line);
            System.out.println("System CWD content: " + line);
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.cleanup(LOG, buf);
    }
}

From source file:com.bigjob.Client.java

License:Apache License

/**
 * Main run function for the client//  w w w . ja va  2s.  com
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
public boolean run() throws IOException, YarnException {

    LOG.info("Running Client");
    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM (RM)" + ", numNodeManagers="
            + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request 
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max. 
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max. 
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);

    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources         
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    //    if (dfsUrl!=null && dfsUrl.equals("")==false){
    //       conf.set("fs.defaultFS", dfsUrl);
    //    }
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), localResources, null);

    // Set the log4j properties if needed 
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), localResources, null);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed. 
    // To do this, we need to first copy into the filesystem that is visible 
    // to the yarn framework. 
    // We do not need to set this as a local resource for the application 
    // master as the application master does not need it.       
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.getId() + "/"
                + (Shell.WINDOWS ? windowBatPath : linuxShellPath);
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }

    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, appId.getId(), localResources, shellCommand);
    }

    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, appId.getId(), localResources,
                StringUtils.join(shellArgs, " "));
    }
    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();

    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the 
    // eventual containers that will be launched to execute the shell scripts
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));

    // Add AppMaster.jar location to classpath       
    // At some point we should not be required to add 
    // the hadoop specific classpaths to the env. 
    // It should be provided out of the box. 
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master 
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command 
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name 
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    vargs.add("--priority " + String.valueOf(shellCmdPriority));

    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and 
    // vcores requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    capability.setVirtualCores(amVCores);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    // TODO - what is the range for priority? how to decide? 
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // TODO
    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    //return monitorApplication(appId);
    System.out.println("ApplicationId:" + appId);
    return true;
}

From source file:com.cloudera.sqoop.io.TestNamedFifo.java

License:Apache License

public void testNamedFifo() throws Exception {

    if (Shell.WINDOWS) {
        // NamedFifo uses Linux specific commands like mknod
        // and mkfifo, so skip the test on Windows OS
        LOG.warn("Named FIFO is not supported on Windows. Skipping test");
        return;/*from  w  w  w  . j  av a2  s.  c o  m*/
    }

    File root = new File(TEMP_BASE_DIR.toString());
    File fifo = new File(root, "foo-fifo");

    NamedFifo nf = new NamedFifo(fifo);
    nf.create();

    File returned = nf.getFile();

    // These should be the same object.
    assertEquals(fifo, returned);

    ReaderThread rt = new ReaderThread(returned);
    WriterThread wt = new WriterThread(returned);

    rt.start();
    wt.start();

    rt.join();
    wt.join();

    IOException rex = rt.getException();
    IOException wex = wt.getException();

    if (null != rex) {
        LOG.error("reader exception: " + StringUtils.stringifyException(rex));
    }

    if (null != wex) {
        LOG.error("writer exception: " + StringUtils.stringifyException(wex));
    }

    assertNull(rex);
    assertNull(wex);
}

From source file:com.cloudera.sqoop.orm.TestClassWriter.java

License:Apache License

/**
 * Run a test to verify that we can generate code and it emits the output
 * files where we expect them./*from ww  w  .j  a  v a 2s. c  o m*/
 * @return
 */
private File runGenerationTest(String[] argv, String classNameToCheck) {
    File codeGenDirFile = new File(CODE_GEN_DIR);
    File classGenDirFile = new File(JAR_GEN_DIR);

    try {
        options = new ImportTool().parseArguments(argv, null, options, true);
    } catch (Exception e) {
        LOG.error("Could not parse options: " + e.toString());
    }

    CompilationManager compileMgr = new CompilationManager(options);
    ClassWriter writer = new ClassWriter(options, manager, HsqldbTestServer.getTableName(), compileMgr);

    try {
        writer.generate();
        compileMgr.compile();
        compileMgr.jar();
    } catch (IOException ioe) {
        LOG.error("Got IOException: " + ioe.toString());
        fail("Got IOException: " + ioe.toString());
    }

    String classFileNameToCheck = classNameToCheck.replace('.', File.separatorChar);
    LOG.debug("Class file to check for: " + classFileNameToCheck);

    // Check that all the files we expected to generate (.java, .class, .jar)
    // exist.
    File tableFile = new File(codeGenDirFile, classFileNameToCheck + ".java");
    assertTrue("Cannot find generated source file for table!", tableFile.exists());
    LOG.debug("Found generated source: " + tableFile);

    File tableClassFile = new File(classGenDirFile, classFileNameToCheck + ".class");
    assertTrue("Cannot find generated class file for table!", tableClassFile.exists());
    LOG.debug("Found generated class: " + tableClassFile);

    File jarFile = new File(compileMgr.getJarFilename());
    assertTrue("Cannot find compiled jar", jarFile.exists());
    LOG.debug("Found generated jar: " + jarFile);

    // check that the .class file made it into the .jar by enumerating
    // available entries in the jar file.
    boolean foundCompiledClass = false;
    if (Shell.WINDOWS) {
        // In Windows OS, elements in jar files still need to have a path
        // separator of '/' rather than the default File.separator which is '\'
        classFileNameToCheck = classFileNameToCheck.replace(File.separator, "/");
    }
    try {
        JarInputStream jis = new JarInputStream(new FileInputStream(jarFile));

        LOG.debug("Jar file has entries:");
        while (true) {
            JarEntry entry = jis.getNextJarEntry();
            if (null == entry) {
                // no more entries.
                break;
            }

            if (entry.getName().equals(classFileNameToCheck + ".class")) {
                foundCompiledClass = true;
                LOG.debug(" * " + entry.getName());
            } else {
                LOG.debug("   " + entry.getName());
            }
        }

        jis.close();
    } catch (IOException ioe) {
        fail("Got IOException iterating over Jar file: " + ioe.toString());
    }

    assertTrue("Cannot find .class file " + classFileNameToCheck + ".class in jar file", foundCompiledClass);

    LOG.debug("Found class in jar - test success!");
    return jarFile;
}

From source file:com.lenovo.tensorhusky.common.utils.ProcessIdFileReader.java

License:Apache License

/**
 * Get the process id from specified file path. Parses each line to find a
 * valid number and returns the first one found.
 *
 * @return Process Id if obtained from path specified else null
 * @throws IOException//from w ww  . j ava  2  s  . c o  m
 */
public static String getProcessId(Path path, boolean check) throws IOException {
    if (path == null) {
        throw new IOException("Trying to access process id from a null path");
    }

    LOG.debug("Accessing pid from pid file " + path);
    String processId = null;
    BufferedReader bufReader = null;

    try {
        File file = new File(path.toString());
        if (file.exists()) {
            FileInputStream fis = new FileInputStream(file);
            bufReader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));

            while (true) {
                String line = bufReader.readLine();
                if (line == null) {
                    break;
                }
                String temp = line.trim();
                if (!temp.isEmpty()) {
                    if (Shell.WINDOWS) {
                        // On Windows, pid is expected to be a container ID, so
                        // find first
                        // line that parses successfully as a container ID.
                        try {
                            processId = temp;
                            break;
                        } catch (Exception e) {
                            // do nothing
                        }
                    } else {
                        // Otherwise, find first line containing a numeric pid.
                        if (check) {
                            try {
                                Long pid = Long.valueOf(temp);
                                if (pid > 0) {
                                    processId = temp;
                                    break;
                                }
                            } catch (Exception e) {
                                // do nothing
                            }
                        } else {
                            processId = temp;
                        }
                    }
                }
            }
        }
    } finally {
        if (bufReader != null) {
            bufReader.close();
        }
    }
    LOG.debug("Got pid " + (processId != null ? processId : "null") + " from path " + path);
    return processId;
}

From source file:com.lenovo.tensorhusky.common.utils.WindowsBasedProcessTree.java

License:Apache License

public static boolean isAvailable() {
    if (Shell.WINDOWS) {
        ShellCommandExecutor shellExecutor = new ShellCommandExecutor(new String[] { Shell.WINUTILS, "help" });
        try {//w  w w .  j a  va  2 s  . c om
            shellExecutor.execute();
        } catch (IOException e) {
            LOG.error(StringUtils.stringifyException(e));
        } finally {
            String output = shellExecutor.getOutput();
            if (output != null && output.contains("Prints to stdout a list of processes in the task")) {
                return true;
            }
        }
    }
    return false;
}

From source file:io.hops.tensorflow.ApplicationMaster.java

License:Apache License

/**
 * Dump out contents of $CWD and the environment to stdout for debugging
 *///from   w  w w. j  a  v a  2s  . co  m
private void dumpOutDebugInfo() {
    LOG.info("Dump debug output");
    Map<String, String> envs = System.getenv();
    for (Map.Entry<String, String> env : envs.entrySet()) {
        LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
        System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue());
    }

    BufferedReader buf = null;
    try {
        String lines = Shell.WINDOWS ? Shell.execCommand("cmd", "/c", "dir") : Shell.execCommand("ls", "-al");
        buf = new BufferedReader(new StringReader(lines));
        String line = "";
        while ((line = buf.readLine()) != null) {
            LOG.info("System CWD content: " + line);
            System.out.println("System CWD content: " + line);
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.cleanup(LOG, buf);
    }
}

From source file:io.hops.tensorflow.TestUtils.java

License:Apache License

public static int verifyContainerLog(MiniYARNCluster yarnCluster, int containerNum,
        List<String> expectedContent, boolean count, String expectedWord) {
    File logFolder = new File(yarnCluster.getNodeManager(0).getConfig().get(YarnConfiguration.NM_LOG_DIRS,
            YarnConfiguration.DEFAULT_NM_LOG_DIRS));

    File[] listOfFiles = logFolder.listFiles();
    int currentContainerLogFileIndex = -1;
    for (int i = listOfFiles.length - 1; i >= 0; i--) {
        if (listOfFiles[i].listFiles().length == containerNum + 1) {
            currentContainerLogFileIndex = i;
            break;
        }//from   www  .  j av a  2s .  c o m
    }
    Assert.assertTrue(currentContainerLogFileIndex != -1);
    File[] containerFiles = listOfFiles[currentContainerLogFileIndex].listFiles();

    int numOfWords = 0;
    for (int i = 0; i < containerFiles.length; i++) {
        for (File output : containerFiles[i].listFiles()) {
            if (output.getName().trim().contains("stdout")) {
                BufferedReader br = null;
                List<String> stdOutContent = new ArrayList<String>();
                try {

                    String sCurrentLine;
                    br = new BufferedReader(new FileReader(output));
                    int numOfline = 0;
                    while ((sCurrentLine = br.readLine()) != null) {
                        if (count) {
                            if (sCurrentLine.contains(expectedWord)) {
                                numOfWords++;
                            }
                        } else if (output.getName().trim().equals("stdout")) {
                            if (!Shell.WINDOWS) {
                                Assert.assertEquals("The current is" + sCurrentLine,
                                        expectedContent.get(numOfline), sCurrentLine.trim());
                                numOfline++;
                            } else {
                                stdOutContent.add(sCurrentLine.trim());
                            }
                        }
                    }
                    /* By executing bat script using cmd /c,
                     * it will output all contents from bat script first
                     * It is hard for us to do check line by line
                     * Simply check whether output from bat file contains
                     * all the expected messages
                     */
                    if (Shell.WINDOWS && !count && output.getName().trim().equals("stdout")) {
                        Assert.assertTrue(stdOutContent.containsAll(expectedContent));
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                } finally {
                    try {
                        if (br != null) {
                            br.close();
                        }
                    } catch (IOException ex) {
                        ex.printStackTrace();
                    }
                }
            }
        }
    }
    return numOfWords;
}

From source file:it.crs4.pydoop.mapreduce.pipes.TaskLog.java

License:Apache License

/**
 * Construct the command line for running the task JVM
 * @param setup The setup commands for the execed process.
 * @param cmd The command and the arguments that should be run
 * @param stdoutFilename The filename that stdout should be saved to
 * @param stderrFilename The filename that stderr should be saved to
 * @param tailLength The length of the tail to be saved.
 * @return the command line as a String//  w  w  w.  ja v  a 2 s.  com
 * @throws IOException
 */
static String buildCommandLine(List<String> setup, List<String> cmd, File stdoutFilename, File stderrFilename,
        long tailLength, boolean useSetsid) throws IOException {

    String stdout = FileUtil.makeShellPath(stdoutFilename);
    String stderr = FileUtil.makeShellPath(stderrFilename);
    StringBuffer mergedCmd = new StringBuffer();

    // Export the pid of taskJvm to env variable JVM_PID.
    // Currently pid is not used on Windows
    if (!Shell.WINDOWS) {
        mergedCmd.append(" export JVM_PID=`echo $$` ; ");
    }

    if (setup != null && setup.size() > 0) {
        mergedCmd.append(addCommand(setup, false));
        mergedCmd.append(";");
    }
    if (tailLength > 0) {
        mergedCmd.append("(");
    } else if (ProcessTree.isSetsidAvailable && useSetsid && !Shell.WINDOWS) {
        mergedCmd.append("exec setsid ");
    } else {
        mergedCmd.append("exec ");
    }
    mergedCmd.append(addCommand(cmd, true));
    mergedCmd.append(" < /dev/null ");
    if (tailLength > 0) {
        mergedCmd.append(" | ");
        mergedCmd.append(tailCommand);
        mergedCmd.append(" -c ");
        mergedCmd.append(tailLength);
        mergedCmd.append(" >> ");
        mergedCmd.append(stdout);
        mergedCmd.append(" ; exit $PIPESTATUS ) 2>&1 | ");
        mergedCmd.append(tailCommand);
        mergedCmd.append(" -c ");
        mergedCmd.append(tailLength);
        mergedCmd.append(" >> ");
        mergedCmd.append(stderr);
        mergedCmd.append(" ; exit $PIPESTATUS");
    } else {
        mergedCmd.append(" 1>> ");
        mergedCmd.append(stdout);
        mergedCmd.append(" 2>> ");
        mergedCmd.append(stderr);
    }
    return mergedCmd.toString();
}

From source file:org.apache.falcon.hive.util.HiveDRUtils.java

License:Apache License

public static Configuration getDefaultConf() throws IOException {
    Configuration conf = new Configuration();

    if (System.getProperty("oozie.action.conf.xml") != null) {
        Path confPath = new Path("file:///", System.getProperty("oozie.action.conf.xml"));

        final boolean actionConfExists = confPath.getFileSystem(conf).exists(confPath);
        LOG.info("Oozie Action conf {} found ? {}", confPath, actionConfExists);
        if (actionConfExists) {
            LOG.info("Oozie Action conf found, adding path={}, conf={}", confPath, conf.toString());
            conf.addResource(confPath);/*from ww  w  .j av a  2 s.c o  m*/
        }
    }

    String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
    if (StringUtils.isNotBlank(tokenFile)) {
        if (Shell.WINDOWS) {
            if (tokenFile.charAt(0) == '"') {
                tokenFile = tokenFile.substring(1);
            }
            if (tokenFile.charAt(tokenFile.length() - 1) == '"') {
                tokenFile = tokenFile.substring(0, tokenFile.length() - 1);
            }
        }

        conf.set("mapreduce.job.credentials.binary", tokenFile);
        System.setProperty("mapreduce.job.credentials.binary", tokenFile);
        conf.set("tez.credentials.path", tokenFile);
        System.setProperty("tez.credentials.path", tokenFile);
    }

    return conf;
}