Example usage for org.apache.hadoop.fs FileSystem isFile

List of usage examples for org.apache.hadoop.fs FileSystem isFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem isFile.

Prototype

@Deprecated
public boolean isFile(Path f) throws IOException 

Source Link

Document

True iff the named path is a regular file.

Usage

From source file:org.apache.oozie.action.hadoop.FsELFunctions.java

License:Apache License

/**
 * Return the size of all files in the directory, it is not recursive.
 *
 * @param pathUri file system path uri.//w  w w .j av a2 s  .c  o m
 * @return the size of all files in the directory, -1 if the directory does not exist or if it is a file.
 * @throws Exception
 */
public static long fs_dirSize(String pathUri) throws Exception {
    URI uri = new URI(pathUri);
    String path = uri.getPath();
    long size = -1;
    try {
        FileSystem fs = getFileSystem(uri);
        Path p = new Path(path);
        if (fs.exists(p) && !fs.isFile(p)) {
            FileStatus[] stati = fs.listStatus(p);
            size = 0;
            if (stati != null) {
                for (FileStatus status : stati) {
                    if (!status.isDir()) {
                        size += status.getLen();
                    }
                }
            }
        }
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
    return size;
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

@SuppressWarnings("unchecked")
JobConf createLauncherConf(FileSystem actionFs, Context context, WorkflowAction action, Element actionXml,
        Configuration actionConf) throws ActionExecutorException {
    try {//from w w w . j ava2 s  . com

        // app path could be a file
        Path appPathRoot = new Path(context.getWorkflow().getAppPath());
        if (actionFs.isFile(appPathRoot)) {
            appPathRoot = appPathRoot.getParent();
        }

        // launcher job configuration
        JobConf launcherJobConf = createBaseHadoopConf(context, actionXml);
        // cancel delegation token on a launcher job which stays alive till child job(s) finishes
        // otherwise (in mapred action), doesn't cancel not to disturb running child job
        launcherJobConf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", true);
        setupLauncherConf(launcherJobConf, actionXml, appPathRoot, context);

        String launcherTag = null;
        // Extracting tag and appending action name to maintain the uniqueness.
        if (context.getVar(ActionStartXCommand.OOZIE_ACTION_YARN_TAG) != null) {
            launcherTag = context.getVar(ActionStartXCommand.OOZIE_ACTION_YARN_TAG);
        } else { //Keeping it to maintain backward compatibly with test cases.
            launcherTag = action.getId();
        }

        // Properties for when a launcher job's AM gets restarted
        if (ConfigurationService.getBoolean(HADOOP_YARN_KILL_CHILD_JOBS_ON_AMRESTART)) {
            // launcher time filter is required to prune the search of launcher tag.
            // Setting coordinator action nominal time as launcher time as it child job cannot launch before nominal
            // time. Workflow created time is good enough when workflow is running independently or workflow is
            // rerunning from failed node.
            long launcherTime = System.currentTimeMillis();
            String coordActionNominalTime = context.getProtoActionConf()
                    .get(CoordActionStartXCommand.OOZIE_COORD_ACTION_NOMINAL_TIME);
            if (coordActionNominalTime != null) {
                launcherTime = Long.parseLong(coordActionNominalTime);
            } else if (context.getWorkflow().getCreatedTime() != null) {
                launcherTime = context.getWorkflow().getCreatedTime().getTime();
            }
            LauncherMapperHelper.setupYarnRestartHandling(launcherJobConf, actionConf, launcherTag,
                    launcherTime);
        } else {
            LOG.info(MessageFormat.format("{0} is set to false, not setting YARN restart properties",
                    HADOOP_YARN_KILL_CHILD_JOBS_ON_AMRESTART));
        }

        String actionShareLibProperty = actionConf.get(ACTION_SHARELIB_FOR + getType());
        if (actionShareLibProperty != null) {
            launcherJobConf.set(ACTION_SHARELIB_FOR + getType(), actionShareLibProperty);
        }
        setLibFilesArchives(context, actionXml, appPathRoot, launcherJobConf);

        String jobName = launcherJobConf.get(HADOOP_JOB_NAME);
        if (jobName == null || jobName.isEmpty()) {
            jobName = XLog.format("oozie:launcher:T={0}:W={1}:A={2}:ID={3}", getType(),
                    context.getWorkflow().getAppName(), action.getName(), context.getWorkflow().getId());
            launcherJobConf.setJobName(jobName);
        }

        // Inject Oozie job information if enabled.
        injectJobInfo(launcherJobConf, actionConf, context, action);

        injectLauncherCallback(context, launcherJobConf);

        String jobId = context.getWorkflow().getId();
        String actionId = action.getId();
        Path actionDir = context.getActionDir();
        String recoveryId = context.getRecoveryId();

        // Getting the prepare XML from the action XML
        Namespace ns = actionXml.getNamespace();
        Element prepareElement = actionXml.getChild("prepare", ns);
        String prepareXML = "";
        if (prepareElement != null) {
            if (prepareElement.getChildren().size() > 0) {
                prepareXML = XmlUtils.prettyPrint(prepareElement).toString().trim();
            }
        }
        LauncherMapperHelper.setupLauncherInfo(launcherJobConf, jobId, actionId, actionDir, recoveryId,
                actionConf, prepareXML);

        // Set the launcher Main Class
        LauncherMapperHelper.setupMainClass(launcherJobConf, getLauncherMain(launcherJobConf, actionXml));
        LauncherMapperHelper.setupLauncherURIHandlerConf(launcherJobConf);
        LauncherMapperHelper.setupMaxOutputData(launcherJobConf, maxActionOutputLen);
        LauncherMapperHelper.setupMaxExternalStatsSize(launcherJobConf, maxExternalStatsSize);
        LauncherMapperHelper.setupMaxFSGlob(launcherJobConf, maxFSGlobMax);

        List<Element> list = actionXml.getChildren("arg", ns);
        String[] args = new String[list.size()];
        for (int i = 0; i < list.size(); i++) {
            args[i] = list.get(i).getTextTrim();
        }
        LauncherMapperHelper.setupMainArguments(launcherJobConf, args);

        // Make mapred.child.java.opts and mapreduce.map.java.opts equal, but give values from the latter priority; also append
        // <java-opt> and <java-opts> and give those highest priority
        StringBuilder opts = new StringBuilder(launcherJobConf.get(HADOOP_CHILD_JAVA_OPTS, ""));
        if (launcherJobConf.get(HADOOP_MAP_JAVA_OPTS) != null) {
            opts.append(" ").append(launcherJobConf.get(HADOOP_MAP_JAVA_OPTS));
        }
        List<Element> javaopts = actionXml.getChildren("java-opt", ns);
        for (Element opt : javaopts) {
            opts.append(" ").append(opt.getTextTrim());
        }
        Element opt = actionXml.getChild("java-opts", ns);
        if (opt != null) {
            opts.append(" ").append(opt.getTextTrim());
        }
        launcherJobConf.set(HADOOP_CHILD_JAVA_OPTS, opts.toString().trim());
        launcherJobConf.set(HADOOP_MAP_JAVA_OPTS, opts.toString().trim());

        // setting for uber mode
        if (launcherJobConf.getBoolean(HADOOP_YARN_UBER_MODE, false)) {
            if (checkPropertiesToDisableUber(launcherJobConf)) {
                launcherJobConf.setBoolean(HADOOP_YARN_UBER_MODE, false);
            } else {
                updateConfForUberMode(launcherJobConf);
            }
        }
        updateConfForJavaTmpDir(launcherJobConf);
        injectLauncherTimelineServiceEnabled(launcherJobConf, actionConf);

        // properties from action that are needed by the launcher (e.g. QUEUE NAME, ACLs)
        // maybe we should add queue to the WF schema, below job-tracker
        actionConfToLauncherConf(actionConf, launcherJobConf);

        return launcherJobConf;
    } catch (Exception ex) {
        throw convertException(ex);
    }
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

public void submitLauncher(FileSystem actionFs, Context context, WorkflowAction action)
        throws ActionExecutorException {
    JobClient jobClient = null;//  w  w w .ja  v  a2 s  .  c o m
    boolean exception = false;
    try {
        Path appPathRoot = new Path(context.getWorkflow().getAppPath());

        // app path could be a file
        if (actionFs.isFile(appPathRoot)) {
            appPathRoot = appPathRoot.getParent();
        }

        Element actionXml = XmlUtils.parseXml(action.getConf());

        // action job configuration
        Configuration actionConf = loadHadoopDefaultResources(context, actionXml);
        setupActionConf(actionConf, context, actionXml, appPathRoot);
        LOG.debug("Setting LibFilesArchives ");
        setLibFilesArchives(context, actionXml, appPathRoot, actionConf);

        String jobName = actionConf.get(HADOOP_JOB_NAME);
        if (jobName == null || jobName.isEmpty()) {
            jobName = XLog.format("oozie:action:T={0}:W={1}:A={2}:ID={3}", getType(),
                    context.getWorkflow().getAppName(), action.getName(), context.getWorkflow().getId());
            actionConf.set(HADOOP_JOB_NAME, jobName);
        }

        injectActionCallback(context, actionConf);

        if (actionConf.get(ACL_MODIFY_JOB) == null || actionConf.get(ACL_MODIFY_JOB).trim().equals("")) {
            // ONLY in the case where user has not given the
            // modify-job ACL specifically
            if (context.getWorkflow().getAcl() != null) {
                // setting the group owning the Oozie job to allow anybody in that
                // group to modify the jobs.
                actionConf.set(ACL_MODIFY_JOB, context.getWorkflow().getAcl());
            }
        }

        // Setting the credential properties in launcher conf
        JobConf credentialsConf = null;
        HashMap<String, CredentialsProperties> credentialsProperties = setCredentialPropertyToActionConf(
                context, action, actionConf);
        if (credentialsProperties != null) {

            // Adding if action need to set more credential tokens
            credentialsConf = new JobConf(false);
            XConfiguration.copy(actionConf, credentialsConf);
            setCredentialTokens(credentialsConf, context, action, credentialsProperties);

            // insert conf to action conf from credentialsConf
            for (Entry<String, String> entry : credentialsConf) {
                if (actionConf.get(entry.getKey()) == null) {
                    actionConf.set(entry.getKey(), entry.getValue());
                }
            }
        }

        JobConf launcherJobConf = createLauncherConf(actionFs, context, action, actionXml, actionConf);

        LOG.debug("Creating Job Client for action " + action.getId());
        jobClient = createJobClient(context, launcherJobConf);
        String launcherId = LauncherMapperHelper.getRecoveryId(launcherJobConf, context.getActionDir(),
                context.getRecoveryId());
        boolean alreadyRunning = launcherId != null;
        RunningJob runningJob;

        // if user-retry is on, always submit new launcher
        boolean isUserRetry = ((WorkflowActionBean) action).isUserRetry();

        if (alreadyRunning && !isUserRetry) {
            runningJob = jobClient.getJob(JobID.forName(launcherId));
            if (runningJob == null) {
                String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "unknown job [{0}@{1}], cannot recover", launcherId, jobTracker);
            }
        } else {
            LOG.debug("Submitting the job through Job Client for action " + action.getId());

            // setting up propagation of the delegation token.
            HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
            Token<DelegationTokenIdentifier> mrdt = jobClient
                    .getDelegationToken(has.getMRDelegationTokenRenewer(launcherJobConf));
            launcherJobConf.getCredentials().addToken(HadoopAccessorService.MR_TOKEN_ALIAS, mrdt);

            // insert credentials tokens to launcher job conf if needed
            if (needInjectCredentials() && credentialsConf != null) {
                for (Token<? extends TokenIdentifier> tk : credentialsConf.getCredentials().getAllTokens()) {
                    Text fauxAlias = new Text(tk.getKind() + "_" + tk.getService());
                    LOG.debug("ADDING TOKEN: " + fauxAlias);
                    launcherJobConf.getCredentials().addToken(fauxAlias, tk);
                }
            } else {
                LOG.info("No need to inject credentials.");
            }
            runningJob = jobClient.submitJob(launcherJobConf);
            if (runningJob == null) {
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "Error submitting launcher for action [{0}]", action.getId());
            }
            launcherId = runningJob.getID().toString();
            LOG.debug("After submission get the launcherId " + launcherId);
        }

        String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
        String consoleUrl = runningJob.getTrackingURL();
        context.setStartData(launcherId, jobTracker, consoleUrl);
    } catch (Exception ex) {
        exception = true;
        throw convertException(ex);
    } finally {
        if (jobClient != null) {
            try {
                jobClient.close();
            } catch (Exception e) {
                if (exception) {
                    LOG.error("JobClient error: ", e);
                } else {
                    throw convertException(e);
                }
            }
        }
    }
}

From source file:org.apache.oozie.command.bundle.BundleSubmitXCommand.java

License:Apache License

/**
 * Merge default configuration with user-defined configuration.
 *
 * @throws CommandException thrown if failed to merge configuration
 *///w  w w . j  a v  a2 s  . c o m
protected void mergeDefaultConfig() throws CommandException {
    Path configDefault = null;
    try {
        String bundleAppPathStr = conf.get(OozieClient.BUNDLE_APP_PATH);
        Path bundleAppPath = new Path(bundleAppPathStr);
        String user = ParamChecker.notEmpty(conf.get(OozieClient.USER_NAME), OozieClient.USER_NAME);
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(bundleAppPath.toUri().getAuthority());
        FileSystem fs = has.createFileSystem(user, bundleAppPath.toUri(), fsConf);

        // app path could be a directory
        if (!fs.isFile(bundleAppPath)) {
            configDefault = new Path(bundleAppPath, CONFIG_DEFAULT);
        } else {
            configDefault = new Path(bundleAppPath.getParent(), CONFIG_DEFAULT);
        }

        if (fs.exists(configDefault)) {
            Configuration defaultConf = new XConfiguration(fs.open(configDefault));
            PropertiesUtils.checkDisallowedProperties(defaultConf, DISALLOWED_DEFAULT_PROPERTIES);
            XConfiguration.injectDefaults(defaultConf, conf);
        } else {
            LOG.info("configDefault Doesn't exist " + configDefault);
        }
        PropertiesUtils.checkDisallowedProperties(conf, DISALLOWED_USER_PROPERTIES);
    } catch (IOException e) {
        throw new CommandException(ErrorCode.E0702,
                e.getMessage() + " : Problem reading default config " + configDefault, e);
    } catch (HadoopAccessorException e) {
        throw new CommandException(e);
    }
    LOG.debug("Merged CONF :" + XmlUtils.prettyPrint(conf).toString());
}

From source file:org.apache.oozie.command.bundle.BundleSubmitXCommand.java

License:Apache License

/**
 * Read bundle definition.//  w  ww .j  av  a 2 s  .  c o m
 *
 * @param appPath application path.
 * @return bundle definition.
 * @throws BundleJobException thrown if the definition could not be read.
 */
protected String readDefinition(String appPath) throws BundleJobException {
    String user = ParamChecker.notEmpty(conf.get(OozieClient.USER_NAME), OozieClient.USER_NAME);
    //Configuration confHadoop = CoordUtils.getHadoopConf(conf);
    try {
        URI uri = new URI(appPath);
        LOG.debug("user =" + user);
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(uri.getAuthority());
        FileSystem fs = has.createFileSystem(user, uri, fsConf);
        Path appDefPath = null;

        // app path could be a directory
        Path path = new Path(uri.getPath());
        if (!fs.isFile(path)) {
            appDefPath = new Path(path, BUNDLE_XML_FILE);
        } else {
            appDefPath = path;
        }

        Reader reader = new InputStreamReader(fs.open(appDefPath));
        StringWriter writer = new StringWriter();
        IOUtils.copyCharStream(reader, writer);
        return writer.toString();
    } catch (IOException ex) {
        LOG.warn("IOException :" + XmlUtils.prettyPrint(conf), ex);
        throw new BundleJobException(ErrorCode.E1301, ex.getMessage(), ex);
    } catch (URISyntaxException ex) {
        LOG.warn("URISyException :" + ex.getMessage());
        throw new BundleJobException(ErrorCode.E1302, appPath, ex.getMessage(), ex);
    } catch (HadoopAccessorException ex) {
        throw new BundleJobException(ex);
    } catch (Exception ex) {
        LOG.warn("Exception :", ex);
        throw new BundleJobException(ErrorCode.E1301, ex.getMessage(), ex);
    }
}

From source file:org.apache.oozie.command.coord.CoordSubmitXCommand.java

License:Apache License

/**
 * Merge default configuration with user-defined configuration.
 *
 * @throws CommandException thrown if failed to read or merge configurations
 *///  www.  ja  v  a  2  s  .c  om
protected void mergeDefaultConfig() throws CommandException {
    Path configDefault = null;
    try {
        String coordAppPathStr = conf.get(OozieClient.COORDINATOR_APP_PATH);
        Path coordAppPath = new Path(coordAppPathStr);
        String user = ParamChecker.notEmpty(conf.get(OozieClient.USER_NAME), OozieClient.USER_NAME);
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(coordAppPath.toUri().getAuthority());
        FileSystem fs = has.createFileSystem(user, coordAppPath.toUri(), fsConf);

        // app path could be a directory
        if (!fs.isFile(coordAppPath)) {
            configDefault = new Path(coordAppPath, CONFIG_DEFAULT);
        } else {
            configDefault = new Path(coordAppPath.getParent(), CONFIG_DEFAULT);
        }

        if (fs.exists(configDefault)) {
            Configuration defaultConf = new XConfiguration(fs.open(configDefault));
            PropertiesUtils.checkDisallowedProperties(defaultConf, DISALLOWED_DEFAULT_PROPERTIES);
            XConfiguration.injectDefaults(defaultConf, conf);
        } else {
            LOG.info("configDefault Doesn't exist " + configDefault);
        }
        PropertiesUtils.checkDisallowedProperties(conf, DISALLOWED_USER_PROPERTIES);

        // Resolving all variables in the job properties.
        // This ensures the Hadoop Configuration semantics is preserved.
        XConfiguration resolvedVarsConf = new XConfiguration();
        for (Map.Entry<String, String> entry : conf) {
            resolvedVarsConf.set(entry.getKey(), conf.get(entry.getKey()));
        }
        conf = resolvedVarsConf;
    } catch (IOException e) {
        throw new CommandException(ErrorCode.E0702,
                e.getMessage() + " : Problem reading default config " + configDefault, e);
    } catch (HadoopAccessorException e) {
        throw new CommandException(e);
    }
    LOG.debug("Merged CONF :" + XmlUtils.prettyPrint(conf).toString());
}

From source file:org.apache.oozie.command.coord.CoordSubmitXCommand.java

License:Apache License

/**
 * Read coordinator definition./*from   w w w. java  2s  .com*/
 *
 * @param appPath application path.
 * @return coordinator definition.
 * @throws CoordinatorJobException thrown if the definition could not be read.
 */
protected String readDefinition(String appPath) throws CoordinatorJobException {
    String user = ParamChecker.notEmpty(conf.get(OozieClient.USER_NAME), OozieClient.USER_NAME);
    // Configuration confHadoop = CoordUtils.getHadoopConf(conf);
    try {
        URI uri = new URI(appPath);
        LOG.debug("user =" + user);
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(uri.getAuthority());
        FileSystem fs = has.createFileSystem(user, uri, fsConf);
        Path appDefPath = null;

        // app path could be a directory
        Path path = new Path(uri.getPath());
        // check file exists for dataset include file, app xml already checked
        if (!fs.exists(path)) {
            throw new URISyntaxException(path.toString(), "path not existed : " + path.toString());
        }
        if (!fs.isFile(path)) {
            appDefPath = new Path(path, COORDINATOR_XML_FILE);
        } else {
            appDefPath = path;
        }

        Reader reader = new InputStreamReader(fs.open(appDefPath));
        StringWriter writer = new StringWriter();
        IOUtils.copyCharStream(reader, writer);
        return writer.toString();
    } catch (IOException ex) {
        LOG.warn("IOException :" + XmlUtils.prettyPrint(conf), ex);
        throw new CoordinatorJobException(ErrorCode.E1001, ex.getMessage(), ex);
    } catch (URISyntaxException ex) {
        LOG.warn("URISyException :" + ex.getMessage());
        throw new CoordinatorJobException(ErrorCode.E1002, appPath, ex.getMessage(), ex);
    } catch (HadoopAccessorException ex) {
        throw new CoordinatorJobException(ex);
    } catch (Exception ex) {
        LOG.warn("Exception :", ex);
        throw new CoordinatorJobException(ErrorCode.E1001, ex.getMessage(), ex);
    }
}

From source file:org.apache.oozie.command.wf.ReRunXCommand.java

License:Apache License

private void setupReRun() throws CommandException {
    InstrumentUtils.incrJobCounter(getName(), 1, getInstrumentation());
    LogUtils.setLogInfo(wfBean);/*from  w  w w  . j a v a2  s  .co  m*/
    WorkflowInstance oldWfInstance = this.wfBean.getWorkflowInstance();
    WorkflowInstance newWfInstance;
    String appPath = null;

    WorkflowAppService wps = Services.get().get(WorkflowAppService.class);
    try {
        XLog.Info.get().setParameter(DagXLogInfoService.TOKEN, conf.get(OozieClient.LOG_TOKEN));
        WorkflowApp app = wps.parseDef(conf, null);
        XConfiguration protoActionConf = wps.createProtoActionConf(conf, true);
        WorkflowLib workflowLib = Services.get().get(WorkflowStoreService.class).getWorkflowLibWithNoDB();

        appPath = conf.get(OozieClient.APP_PATH);
        URI uri = new URI(appPath);
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(uri.getAuthority());
        FileSystem fs = has.createFileSystem(wfBean.getUser(), uri, fsConf);

        Path configDefault = null;
        // app path could be a directory
        Path path = new Path(uri.getPath());
        if (!fs.isFile(path)) {
            configDefault = new Path(path, SubmitXCommand.CONFIG_DEFAULT);
        } else {
            configDefault = new Path(path.getParent(), SubmitXCommand.CONFIG_DEFAULT);
        }

        if (fs.exists(configDefault)) {
            Configuration defaultConf = new XConfiguration(fs.open(configDefault));
            PropertiesUtils.checkDisallowedProperties(defaultConf, DISALLOWED_DEFAULT_PROPERTIES);
            XConfiguration.injectDefaults(defaultConf, conf);
        }

        PropertiesUtils.checkDisallowedProperties(conf, DISALLOWED_USER_PROPERTIES);

        // Resolving all variables in the job properties. This ensures the Hadoop Configuration semantics are
        // preserved. The Configuration.get function within XConfiguration.resolve() works recursively to get the
        // final value corresponding to a key in the map Resetting the conf to contain all the resolved values is
        // necessary to ensure propagation of Oozie properties to Hadoop calls downstream
        conf = ((XConfiguration) conf).resolve();

        try {
            newWfInstance = workflowLib.createInstance(app, conf, jobId);
        } catch (WorkflowException e) {
            throw new CommandException(e);
        }
        String appName = ELUtils.resolveAppName(app.getName(), conf);
        if (SLAService.isEnabled()) {
            Element wfElem = XmlUtils.parseXml(app.getDefinition());
            ELEvaluator evalSla = SubmitXCommand.createELEvaluatorForGroup(conf, "wf-sla-submit");
            Element eSla = XmlUtils.getSLAElement(wfElem);
            String jobSlaXml = null;
            if (eSla != null) {
                jobSlaXml = SubmitXCommand.resolveSla(eSla, evalSla);
            }
            writeSLARegistration(wfElem, jobSlaXml, newWfInstance.getId(),
                    conf.get(SubWorkflowActionExecutor.PARENT_ID), conf.get(OozieClient.USER_NAME), appName,
                    evalSla);
        }
        wfBean.setAppName(appName);
        wfBean.setProtoActionConf(protoActionConf.toXmlString());
    } catch (WorkflowException ex) {
        throw new CommandException(ex);
    } catch (IOException ex) {
        throw new CommandException(ErrorCode.E0803, ex.getMessage(), ex);
    } catch (HadoopAccessorException ex) {
        throw new CommandException(ex);
    } catch (URISyntaxException ex) {
        throw new CommandException(ErrorCode.E0711, appPath, ex.getMessage(), ex);
    } catch (Exception ex) {
        throw new CommandException(ErrorCode.E1007, ex.getMessage(), ex);
    }

    for (int i = 0; i < actions.size(); i++) {
        // Skipping to delete the sub workflow when rerun failed node option has been provided. As same
        // action will be used to rerun the job.
        if (!nodesToSkip.contains(actions.get(i).getName())
                && !(conf.getBoolean(OozieClient.RERUN_FAIL_NODES, false)
                        && SubWorkflowActionExecutor.ACTION_TYPE.equals(actions.get(i).getType()))) {
            deleteList.add(actions.get(i));
            LOG.info("Deleting Action[{0}] for re-run", actions.get(i).getId());
        } else {
            copyActionData(newWfInstance, oldWfInstance);
        }
    }

    wfBean.setAppPath(conf.get(OozieClient.APP_PATH));
    wfBean.setConf(XmlUtils.prettyPrint(conf).toString());
    wfBean.setLogToken(conf.get(OozieClient.LOG_TOKEN, ""));
    wfBean.setUser(conf.get(OozieClient.USER_NAME));
    String group = ConfigUtils.getWithDeprecatedCheck(conf, OozieClient.JOB_ACL, OozieClient.GROUP_NAME, null);
    wfBean.setGroup(group);
    wfBean.setExternalId(conf.get(OozieClient.EXTERNAL_ID));
    wfBean.setEndTime(null);
    wfBean.setRun(wfBean.getRun() + 1);
    wfBean.setStatus(WorkflowJob.Status.PREP);
    wfBean.setWorkflowInstance(newWfInstance);

    try {
        wfBean.setLastModifiedTime(new Date());
        updateList.add(new UpdateEntry<WorkflowJobQuery>(WorkflowJobQuery.UPDATE_WORKFLOW_RERUN, wfBean));
        // call JPAExecutor to do the bulk writes
        BatchQueryExecutor.getInstance().executeBatchInsertUpdateDelete(null, updateList, deleteList);
    } catch (JPAExecutorException je) {
        throw new CommandException(je);
    } finally {
        updateParentIfNecessary(wfBean);
    }

}

From source file:org.apache.oozie.command.wf.SubmitXCommand.java

License:Apache License

@Override
protected String execute() throws CommandException {
    InstrumentUtils.incrJobCounter(getName(), 1, getInstrumentation());
    WorkflowAppService wps = Services.get().get(WorkflowAppService.class);
    try {/*from ww w  .  j  av  a 2s. c om*/
        XLog.Info.get().setParameter(DagXLogInfoService.TOKEN, conf.get(OozieClient.LOG_TOKEN));
        String user = conf.get(OozieClient.USER_NAME);
        URI uri = new URI(conf.get(OozieClient.APP_PATH));
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        Configuration fsConf = has.createJobConf(uri.getAuthority());
        FileSystem fs = has.createFileSystem(user, uri, fsConf);

        Path configDefault = null;
        Configuration defaultConf = null;
        // app path could be a directory
        Path path = new Path(uri.getPath());
        if (!fs.isFile(path)) {
            configDefault = new Path(path, CONFIG_DEFAULT);
        } else {
            configDefault = new Path(path.getParent(), CONFIG_DEFAULT);
        }

        if (fs.exists(configDefault)) {
            try {
                defaultConf = new XConfiguration(fs.open(configDefault));
                PropertiesUtils.checkDisallowedProperties(defaultConf, DISALLOWED_DEFAULT_PROPERTIES);
                XConfiguration.injectDefaults(defaultConf, conf);
            } catch (IOException ex) {
                throw new IOException("default configuration file, " + ex.getMessage(), ex);
            }
        }

        WorkflowApp app = wps.parseDef(conf, defaultConf);
        XConfiguration protoActionConf = wps.createProtoActionConf(conf, true);
        WorkflowLib workflowLib = Services.get().get(WorkflowStoreService.class).getWorkflowLibWithNoDB();

        PropertiesUtils.checkDisallowedProperties(conf, DISALLOWED_USER_PROPERTIES);

        // Resolving all variables in the job properties.
        // This ensures the Hadoop Configuration semantics is preserved.
        XConfiguration resolvedVarsConf = new XConfiguration();
        for (Map.Entry<String, String> entry : conf) {
            resolvedVarsConf.set(entry.getKey(), conf.get(entry.getKey()));
        }
        conf = resolvedVarsConf;

        WorkflowInstance wfInstance;
        try {
            wfInstance = workflowLib.createInstance(app, conf);
        } catch (WorkflowException e) {
            throw new StoreException(e);
        }

        Configuration conf = wfInstance.getConf();
        // System.out.println("WF INSTANCE CONF:");
        // System.out.println(XmlUtils.prettyPrint(conf).toString());

        WorkflowJobBean workflow = new WorkflowJobBean();
        workflow.setId(wfInstance.getId());
        workflow.setAppName(ELUtils.resolveAppName(app.getName(), conf));
        workflow.setAppPath(conf.get(OozieClient.APP_PATH));
        workflow.setConf(XmlUtils.prettyPrint(conf).toString());
        workflow.setProtoActionConf(protoActionConf.toXmlString());
        workflow.setCreatedTime(new Date());
        workflow.setLastModifiedTime(new Date());
        workflow.setLogToken(conf.get(OozieClient.LOG_TOKEN, ""));
        workflow.setStatus(WorkflowJob.Status.PREP);
        workflow.setRun(0);
        workflow.setUser(conf.get(OozieClient.USER_NAME));
        workflow.setGroup(conf.get(OozieClient.GROUP_NAME));
        workflow.setWorkflowInstance(wfInstance);
        workflow.setExternalId(conf.get(OozieClient.EXTERNAL_ID));
        // Set parent id if it doesn't already have one (for subworkflows)
        if (workflow.getParentId() == null) {
            workflow.setParentId(conf.get(SubWorkflowActionExecutor.PARENT_ID));
        }
        // Set to coord action Id if workflow submitted through coordinator
        if (workflow.getParentId() == null) {
            workflow.setParentId(parentId);
        }

        LogUtils.setLogInfo(workflow);
        LOG.debug("Workflow record created, Status [{0}]", workflow.getStatus());
        Element wfElem = XmlUtils.parseXml(app.getDefinition());
        ELEvaluator evalSla = createELEvaluatorForGroup(conf, "wf-sla-submit");
        String jobSlaXml = verifySlaElements(wfElem, evalSla);
        if (!dryrun) {
            writeSLARegistration(wfElem, jobSlaXml, workflow.getId(), workflow.getParentId(),
                    workflow.getUser(), workflow.getGroup(), workflow.getAppName(), LOG, evalSla);
            workflow.setSlaXml(jobSlaXml);
            // System.out.println("SlaXml :"+ slaXml);

            //store.insertWorkflow(workflow);
            insertList.add(workflow);
            JPAService jpaService = Services.get().get(JPAService.class);
            if (jpaService != null) {
                try {
                    BatchQueryExecutor.getInstance().executeBatchInsertUpdateDelete(insertList, null, null);
                } catch (JPAExecutorException je) {
                    throw new CommandException(je);
                }
            } else {
                LOG.error(ErrorCode.E0610);
                return null;
            }

            return workflow.getId();
        } else {
            // Checking variable substitution for dryrun
            ActionExecutorContext context = new ActionXCommand.ActionExecutorContext(workflow, null, false,
                    false);
            Element workflowXml = XmlUtils.parseXml(app.getDefinition());
            removeSlaElements(workflowXml);
            String workflowXmlString = XmlUtils.removeComments(XmlUtils.prettyPrint(workflowXml).toString());
            workflowXmlString = context.getELEvaluator().evaluate(workflowXmlString, String.class);
            workflowXml = XmlUtils.parseXml(workflowXmlString);

            Iterator<Element> it = workflowXml.getDescendants(new ElementFilter("job-xml"));

            // Checking all variable substitutions in job-xml files
            while (it.hasNext()) {
                Element e = it.next();
                String jobXml = e.getTextTrim();
                Path xmlPath = new Path(workflow.getAppPath(), jobXml);
                Configuration jobXmlConf = new XConfiguration(fs.open(xmlPath));

                String jobXmlConfString = XmlUtils.prettyPrint(jobXmlConf).toString();
                jobXmlConfString = XmlUtils.removeComments(jobXmlConfString);
                context.getELEvaluator().evaluate(jobXmlConfString, String.class);
            }

            return "OK";
        }
    } catch (WorkflowException ex) {
        throw new CommandException(ex);
    } catch (HadoopAccessorException ex) {
        throw new CommandException(ex);
    } catch (Exception ex) {
        throw new CommandException(ErrorCode.E0803, ex.getMessage(), ex);
    }
}

From source file:org.apache.oozie.service.AuthorizationService.java

License:Apache License

/**
 * Check if the user+group is authorized to use the specified application. <p> The check is done by checking the
 * file system permissions on the workflow application.
 *
 * @param user user name.//w ww.  ja v  a 2  s.c  om
 * @param group group name.
 * @param appPath application path.
 * @throws AuthorizationException thrown if the user is not authorized for the app.
 */
public void authorizeForApp(String user, String group, String appPath, Configuration jobConf)
        throws AuthorizationException {
    try {
        HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
        URI uri = new Path(appPath).toUri();
        Configuration fsConf = has.createJobConf(uri.getAuthority());
        FileSystem fs = has.createFileSystem(user, uri, fsConf);

        Path path = new Path(appPath);
        try {
            if (!fs.exists(path)) {
                incrCounter(INSTR_FAILED_AUTH_COUNTER, 1);
                throw new AuthorizationException(ErrorCode.E0504, appPath);
            }
            Path wfXml = new Path(path, "workflow.xml");
            if (!fs.exists(wfXml)) {
                incrCounter(INSTR_FAILED_AUTH_COUNTER, 1);
                throw new AuthorizationException(ErrorCode.E0505, appPath);
            }
            if (!fs.isFile(wfXml)) {
                incrCounter(INSTR_FAILED_AUTH_COUNTER, 1);
                throw new AuthorizationException(ErrorCode.E0506, appPath);
            }
            fs.open(wfXml).close();
        }
        // TODO change this when stopping support of 0.18 to the new
        // Exception
        catch (org.apache.hadoop.fs.permission.AccessControlException ex) {
            incrCounter(INSTR_FAILED_AUTH_COUNTER, 1);
            throw new AuthorizationException(ErrorCode.E0507, appPath, ex.getMessage(), ex);
        }
    } catch (IOException ex) {
        incrCounter(INSTR_FAILED_AUTH_COUNTER, 1);
        throw new AuthorizationException(ErrorCode.E0501, ex.getMessage(), ex);
    } catch (HadoopAccessorException e) {
        throw new AuthorizationException(e);
    }
}