Example usage for org.apache.hadoop.fs Path isUriPathAbsolute

List of usage examples for org.apache.hadoop.fs Path isUriPathAbsolute

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path isUriPathAbsolute.

Prototype

public boolean isUriPathAbsolute() 

Source Link

Document

Returns true if the path component (i.e.

Usage

From source file:org.apache.reef.io.checkpoint.fs.FSCheckpointService.java

License:Apache License

public CheckpointWriteChannel create() throws IOException {

    final String name = namingPolicy.getNewName();

    final Path p = new Path(name);
    if (p.isUriPathAbsolute()) {
        throw new IOException("Checkpoint cannot be an absolute path");
    }/*from w  ww .ja v  a 2s  .co m*/
    return createInternal(new Path(base, p));
}

From source file:org.apache.sentry.core.common.utils.PathUtils.java

License:Apache License

/**
 * Make fully qualified URI if Scheme and/or Authority is missing,
 * based on the default file system Scheme and Authority.
 * Notes:/*from  w ww . jav a 2s  .com*/
 * a) input URI path must be absolute; otherwise return null.
 * b) Path.makeQualified() provides no assurance that the
 *    default file system Scheme and Authority values are not null.
 *
 * @param uriName The Uri name.
 * @return Returns the fully qualified URI or null if URI path is not absolute.
 * @throws IOException
 */
private static URI makeFullQualifiedURI(String uriName) throws IOException {
    Path uriPath = new Path(uriName);
    if (isNormalized(uriName) && uriPath.isUriPathAbsolute()) {
        // add scheme and/or authority if either is missing
        if ((uriPath.toUri().getScheme() == null || uriPath.toUri().getAuthority() == null)) {
            URI defaultUri = FileSystem.getDefaultUri(CONF);
            uriPath = uriPath.makeQualified(defaultUri, uriPath);
        }
        return uriPath.toUri();
    } else { // relative URI path is unacceptable
        return null;
    }
}

From source file:org.apache.sentry.provider.file.SimpleFileProviderBackend.java

License:Apache License

/**
 * Relative for our purposes is no scheme, no authority
 * and a non-absolute path portion./*from w w w.j  av a  2s.  com*/
 */
private boolean isRelative(Path path) {
    URI uri = path.toUri();
    return uri.getAuthority() == null && uri.getScheme() == null && !path.isUriPathAbsolute();
}

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

License:Open Source License

public ControllerServer(Configuration _hconf) throws IOException {
    this.yConf = new YarnConfiguration(_hconf);
    yConf.addResource("yarn-site.xml");
    UserGroupInformation.setConfiguration(yConf);

    this.pConf = new PConfiguration(_hconf);

    //do the Kerberos authentication
    if (UserGroupInformation.isSecurityEnabled()) {
        final String principal = pConf.get(PConfiguration.KEY_CONTROLLER_PRINCIPAL);
        String keytab = pConf.get(PConfiguration.KEY_CONTROLLER_KEYTAB);
        File fKeytab = new File(keytab);
        if (!fKeytab.exists()) {
            if (!fKeytab.isAbsolute()) {
                keytab = System.getProperty("probos.conf") + '/' + keytab;
                fKeytab = new File(keytab);
                pConf.set(PConfiguration.KEY_CONTROLLER_KEYTAB, keytab);
            }// w ww.  j  a  v a  2  s .co  m
            if (!fKeytab.exists())
                throw new FileNotFoundException("Could not find keytab file " + keytab);
        }

        LOG.debug("Starting login for " + principal + " using keytab " + keytab);
        SecurityUtil.login(pConf, PConfiguration.KEY_CONTROLLER_KEYTAB, PConfiguration.KEY_CONTROLLER_PRINCIPAL,
                Utils.getHostname());
        LOG.info("Switched principal to " + UserGroupInformation.getCurrentUser().getUserName());
    }

    this.mClient = MailClient.getMailClient(this.pConf);
    final String bindAddress = pConf.get(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS);
    if (bindAddress == null)
        throw new IllegalArgumentException(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS + " cannot be null");

    secretManager = new ControllerAPISecretManager(
            //delegationKeyUpdateInterval
            //renewal interval for delegation token
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenMaxLifetime -- maximum lifetime for which a delegation token is valid
            //i.e. how long can we keep renewing the token for?
            14 * 24 * 3600 * 1000, //Yarn default is 14 days

            //delegationTokenRenewInterval -- how long should a token last?
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenRemoverScanInterval -- how often are expired keys removed?
            3600 * 1000); //Yarn default is 1 hour

    //build the client rpc server: 8027
    int port = pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSClient.class.getSimpleName() + " on port " + port);
    clientRpcserver = new RPC.Builder(yConf).setInstance(this).setBindAddress(bindAddress)
            .setProtocol(PBSClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    System.setProperty("hadoop.policy.file", Constants.PRODUCT_NAME + "-policy.xml");
    clientRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the master rpc server: 8028
    port = Constants.CONTROLLER_MASTER_PORT_OFFSET + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSMasterClient.class.getSimpleName() + " on port " + port);
    masterRpcserver = new RPC.Builder(yConf).setInstance(new ApplicationMasterAPI()).setBindAddress(bindAddress)
            .setProtocol(PBSMasterClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    masterRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    port = Constants.CONTROLLER_INTERACTIVE_PORT_OFFSET
            + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSInteractiveClient.class.getSimpleName() + " on port " + port);
    //build the interactive rpc server: 8026
    interactiveRpcserver = new RPC.Builder(yConf).setInstance(new InteractiveTaskAPI())
            .setBindAddress(bindAddress).setProtocol(PBSInteractiveClient.class).setPort(port)
            .setSecretManager(secretManager).
            //setVerbose(true).
            build();
    interactiveRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the webapp UI server
    final List<Entry<String, HttpServlet>> controllerServlets = new ArrayList<>();
    controllerServlets
            .add(new MapEntry<String, HttpServlet>("/", new QstatServlet("/", controllerServlets, this)));
    controllerServlets.add(
            new MapEntry<String, HttpServlet>("/pbsnodes", new PbsnodesServlet("/", controllerServlets, this)));
    //metrics is the Servlet from metrics.dropwizard for accessing metrics
    controllerServlets.add(new MapEntry<String, HttpServlet>("/metrics", new MetricsServlet(metrics)));
    //this is the hadoop servlet for accessing anything defined in JMX
    controllerServlets.add(new MapEntry<String, HttpServlet>("/jmx", new JMXJsonServlet()));
    final int httpport = pConf.getInt(PConfiguration.KEY_CONTROLLER_HTTP_PORT,
            Constants.DEFAULT_CONTROLLER_PORT + Constants.CONTROLLER_HTTP_PORT_OFFSET);
    LOG.info("Starting Jetty ProbosControllerHttp on port " + httpport);
    webServer = new WebServer("ProbosControllerHttp", controllerServlets, httpport);
    webServer.init(pConf);

    //this thread detects yarn jobs that have ended
    watcherThread = new Thread(new ControllerWatcher());
    watcherThread.setName(ControllerWatcher.class.getSimpleName());

    //ensure we have the directory
    Path _probosFolder = new Path(pConf.get(PConfiguration.KEY_CONTROLLER_JOBDIR));
    FileSystem controllerFS = FileSystem.get(yConf);
    if (!_probosFolder.isUriPathAbsolute()) {
        _probosFolder = _probosFolder.makeQualified(controllerFS.getUri(), controllerFS.getWorkingDirectory());
        assert _probosFolder.isUriPathAbsolute();
    }
    probosFolder = _probosFolder;
    if (!controllerFS.exists(probosFolder)) {
        throw new IllegalArgumentException(probosFolder.toString() + " does not exist");
    }

    //now initialise the metrics

    //jobs.queued.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "queued.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            int sum = 0;
            for (int i : user2QueuedCount.values())
                sum += i;
            return sum;
        }
    });
    //jobs.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobArray.size();
        }
    });
    //jobs.held.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "held.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobHolds.size();
        }
    });

    //nodes.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                return getNodesStatus().length;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    //nodes.free.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "free.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                PBSNodeStatus[] nodes = getNodesStatus();
                int count = 0;
                for (PBSNodeStatus n : nodes)
                    if ("free".equals(n.getState()))
                        count++;
                return count;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    runningJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "running.counter"));
    rejectedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "rejected.counter"));
    killedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "killed.counter"));
    mailEvents = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "counter"));
    mailFailures = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "failure.counter"));

}

From source file:uk.ac.gla.terrier.probos.controller.KittenUtils2.java

License:Open Source License

protected void printTaskContainer(String jt_id, Path targetScript, PrintWriter w, String prefix,
        Map<String, String> extraEnv, NodeRequest nr) {
    //its important to have an absolute path, lets just check this here
    assert targetScript.isUriPathAbsolute() : targetScript.toString() + " is not absolute";

    w.println(prefix + "resources = {");
    w.println(prefix + " [\"job.SC\"] = { hdfs = \"" + targetScript.toString() + "\" },");
    w.println(prefix + "},");

    String shell = job.getShell();
    if (shell == null) {
        shell = "${SHELL}";
    }//  w ww.j  av a 2  s  .c om

    //configure where the output
    String stdOutErrRedirect = null;
    String stdOutErrCopy = null;
    String jobJoin = job.getJoin();
    if (jobJoin == null) {
        stdOutErrRedirect = "1>> <LOG_DIR>/stdout 2>> <LOG_DIR>/stderr";
        stdOutErrCopy = prepareCopy(pConf, "<LOG_DIR>/stdout", job.getOutput_Path() + jt_id) + " ; " + " "
                + prepareCopy(pConf, "<LOG_DIR>/stderr", job.getError_Path() + jt_id);
    } else if (jobJoin.equals("oe")) {
        stdOutErrRedirect = "1>> <LOG_DIR>/stdout 2>&1 ";
        stdOutErrCopy = prepareCopy(pConf, "<LOG_DIR>/stdout", job.getOutput_Path() + jt_id);
    } else if (jobJoin.equals("eo")) {
        stdOutErrRedirect = "2>> <LOG_DIR>/stdout 1>&2 ";
        stdOutErrCopy = prepareCopy(pConf, "<LOG_DIR>/stderr", job.getError_Path() + jt_id);
    } else {
        throw new IllegalArgumentException("invalid job join: " + jobJoin);
    }

    //find where we are going to start the job
    String initdir = job.getVariable_List().get("PBS_O_INITDIR");

    //TODO: in the following PBS_O_HOME is a hack. YARN overwrites the HOME env var,
    //(see ContainerLaunch.java:690)
    //so we assume, possibly unreasonably, that the user's home directory on 
    //the execution node is the same as on the submission host
    String finalCommand = " " + "cd " + (initdir != null ? initdir : " ${PBS_O_HOME}") + "; " //change to the correct folder
            + shell + " ${PBS_YARN_WORKDIR}/job.SC" // run the command
            + " " + stdOutErrRedirect + " ; " //capture the stdout & stderr
            + " EXIT=\\\\${?}; "
            + (DEBUG_TASKS ? "echo Exit code was \\\\${EXIT} 1>> <LOG_DIR>/stdout 2>> <LOG_DIR>/stderr;" : "")
            + " " + stdOutErrCopy + " ; "; //copy the stdout & stderr back to the submission node:/path

    if (DEBUG_TASKS)
        finalCommand = "echo '" + finalCommand + "' 1>> <LOG_DIR>/stdout 2>><LOG_DIR>/stderr ; " + finalCommand;

    final String finalCmd = finalCommentPrefix() + finalCommand + finalCommentSuffix() + " exit \\\\${EXIT}; ";
    printContainer(jt_id, finalCmd, w, prefix, extraEnv, nr);
}