Example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB.

Prototype

String RM_SCHEDULER_MINIMUM_ALLOCATION_MB

To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB.

Click Source Link

Document

Minimum request grant-able by the RM scheduler.

Usage

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

public static Configuration createMiniLlamaConf(Configuration conf, int nodes) {
    ParamChecker.notNull(conf, "conf");
    ParamChecker.greaterThan(nodes, 0, "nodes");
    conf.set(ServerConfiguration.CONFIG_DIR_KEY, "");
    conf.setIfUnset(LlamaAM.RM_CONNECTOR_CLASS_KEY, YarnRMConnector.class.getName());
    conf.setInt(MINI_CLUSTER_NODES_KEY, nodes);
    conf.setIfUnset(S_CONF.getPropertyName(ServerConfiguration.SERVER_ADDRESS_KEY), "localhost:0");
    conf.setIfUnset(S_CONF.getPropertyName(ServerConfiguration.SERVER_ADMIN_ADDRESS_KEY), "localhost:0");
    conf.setIfUnset(S_CONF.getPropertyName(ServerConfiguration.HTTP_ADDRESS_KEY), "localhost:0");
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

    conf.set(YarnRMConnector.HADOOP_USER_NAME_KEY, System.getProperty("user.name"));
    return conf;//from   ww  w  .  j a  va2s  .  c  o  m
}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Configuration startMiniHadoop() throws Exception {
    int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1);
    if (System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA) == null) {
        String testBuildData = new File("target").getAbsolutePath();
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testBuildData);
    }/*  w  w  w . ja v a  2s  .c o m*/
    //to trigger hdfs-site.xml registration as default resource
    new HdfsConfiguration();
    Configuration conf = new YarnConfiguration();
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);

    int hdfsPort = 0;
    String fsUri = conf.get("fs.defaultFS");
    if (fsUri != null && !fsUri.equals("file:///")) {
        int i = fsUri.lastIndexOf(":");
        if (i > -1) {
            try {
                hdfsPort = Integer.parseInt(fsUri.substring(i + 1));
            } catch (Exception ex) {
                throw new RuntimeException(
                        "Could not parse port from Hadoop's " + "'fs.defaultFS property: " + fsUri);
            }
        }
    }
    miniHdfs = new MiniDFSCluster(hdfsPort, conf, clusterNodes, !skipDfsFormat, true, null, null);
    miniHdfs.waitActive();
    conf = miniHdfs.getConfiguration(0);
    miniYarn = new MiniYARNCluster("minillama", clusterNodes, 1, 1);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

    miniYarn.init(conf);
    miniYarn.start();
    conf = miniYarn.getConfig();

    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    return conf;
}

From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java

License:Apache License

private Configuration createMiniYarnConfig(boolean usePortInName) throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.set("yarn.scheduler.fair.allocation.file", "test-fair-scheduler.xml");
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, FairScheduler.class);

    //proxy user config
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, usePortInName);
    return conf;//from   ww w  .jav  a  2s.c o  m
}

From source file:com.datatorrent.stram.StramMiniClusterTest.java

License:Apache License

@BeforeClass
public static void setup() throws InterruptedException, IOException {
    LOG.info("Starting up YARN cluster");
    conf = StramClientUtils.addDTDefaultResources(conf);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setInt("yarn.nodemanager.vmem-pmem-ratio", 20); // workaround to avoid containers being killed because java allocated too much vmem
    conf.setStrings("yarn.scheduler.capacity.root.queues", "default");
    conf.setStrings("yarn.scheduler.capacity.root.default.capacity", "100");

    StringBuilder adminEnv = new StringBuilder(1024);
    if (System.getenv("JAVA_HOME") == null) {
        adminEnv.append("JAVA_HOME=").append(System.getProperty("java.home"));
        adminEnv.append(",");
    }// w  ww .j  a va 2  s  .c o m
    adminEnv.append("MALLOC_ARENA_MAX=4"); // see MAPREDUCE-3068, MAPREDUCE-3065
    adminEnv.append(",");
    adminEnv.append("CLASSPATH=").append(getTestRuntimeClasspath());

    conf.set(YarnConfiguration.NM_ADMIN_USER_ENV, adminEnv.toString());

    if (yarnCluster == null) {
        yarnCluster = new MiniYARNCluster(StramMiniClusterTest.class.getName(), 1, 1, 1);
        yarnCluster.init(conf);
        yarnCluster.start();
    }

    conf = yarnCluster.getConfig();
    URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (url == null) {
        LOG.error("Could not find 'yarn-site.xml' dummy file in classpath");
        throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
    }
    File confFile = new File(url.getPath());
    yarnCluster.getConfig().set("yarn.application.classpath", confFile.getParent());
    OutputStream os = new FileOutputStream(confFile);
    LOG.debug("Conf file: {}", confFile);
    yarnCluster.getConfig().writeXml(os);
    os.close();

    try {
        Thread.sleep(2000);
    } catch (InterruptedException e) {
        LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
    }
}

From source file:com.ibm.bi.dml.yarn.ropt.YarnClusterAnalyzer.java

License:Open Source License

/**
 * Analyzes properties of Yarn cluster and Hadoop configurations.
 *///from  w  w  w  .  ja  v  a2  s .c  o  m
public static void analyzeYarnCluster(YarnClient yarnClient, YarnConfiguration conf, boolean verbose) {
    try {
        List<NodeReport> nodesReport = yarnClient.getNodeReports();
        if (verbose)
            System.out.println("There are " + nodesReport.size() + " nodes in the cluster");
        if (nodesReport.isEmpty())
            throw new YarnException("There are zero available nodes in the yarn cluster");

        nodesMaxPhySorted = new ArrayList<Long>(nodesReport.size());
        clusterTotalMem = 0;
        clusterTotalCores = 0;
        clusterTotalNodes = 0;
        minimumMRContainerPhyMB = -1;
        for (NodeReport node : nodesReport) {
            Resource resource = node.getCapability();
            Resource used = node.getUsed();
            if (used == null)
                used = Resource.newInstance(0, 0);
            int mb = resource.getMemory();
            int cores = resource.getVirtualCores();
            if (mb <= 0)
                throw new YarnException("A node has non-positive memory " + mb);

            int myMinMRPhyMB = mb / cores / CPU_HYPER_FACTOR;
            if (minimumMRContainerPhyMB < myMinMRPhyMB)
                minimumMRContainerPhyMB = myMinMRPhyMB; // minimumMRContainerPhyMB needs to be the largest among the mins

            clusterTotalMem += (long) mb * 1024 * 1024;
            nodesMaxPhySorted.add((long) mb * 1024 * 1024);
            clusterTotalCores += cores;
            clusterTotalNodes++;
            if (verbose)
                System.out.println("\t" + node.getNodeId() + " has " + mb + " MB (" + used.getMemory()
                        + " MB used) memory and " + resource.getVirtualCores() + " (" + used.getVirtualCores()
                        + " used) cores");

        }
        Collections.sort(nodesMaxPhySorted, Collections.reverseOrder());

        nodesMaxBudgetSorted = new ArrayList<Double>(nodesMaxPhySorted.size());
        for (int i = 0; i < nodesMaxPhySorted.size(); i++)
            nodesMaxBudgetSorted.add(ResourceOptimizer.phyToBudget(nodesMaxPhySorted.get(i)));

        _remotePar = nodesReport.size();
        if (_remotePar == 0)
            throw new YarnException("There are no available nodes in the yarn cluster");

        // Now get the default cluster settings
        _remoteMRSortMem = (1024 * 1024) * conf.getLong("io.sort.mb", 100); //100MB

        //handle jvm max mem (map mem budget is relevant for map-side distcache and parfor)
        //(for robustness we probe both: child and map configuration parameters)
        String javaOpts1 = conf.get("mapred.child.java.opts"); //internally mapred/mapreduce synonym
        String javaOpts2 = conf.get("mapreduce.map.java.opts", null); //internally mapred/mapreduce synonym
        String javaOpts3 = conf.get("mapreduce.reduce.java.opts", null); //internally mapred/mapreduce synonym
        if (javaOpts2 != null) //specific value overrides generic
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts2);
        else
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts1);
        if (javaOpts3 != null) //specific value overrides generic
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts3);
        else
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts1);

        //HDFS blocksize
        String blocksize = conf.get(MRConfigurationNames.DFS_BLOCK_SIZE, "134217728");
        _blocksize = Long.parseLong(blocksize);

        minimalPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
        maximumPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
        mrAMPhy = (long) conf.getInt("yarn.app.mapreduce.am.resource.mb", 1536) * 1024 * 1024;

    } catch (Exception e) {
        throw new RuntimeException("Unable to analyze yarn cluster ", e);
    }

    /*
     * This is for AppMaster to query available resource in the cluster during heartbeat 
     * 
    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();
    AllocateResponse response = rmClient.allocate(0);
    int nodeCount = response.getNumClusterNodes();
    Resource resource = response.getAvailableResources();
    List<NodeReport> nodeUpdate = response.getUpdatedNodes();
            
    LOG.info("This is a " + nodeCount + " node cluster with totally " +
    resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    LOG.info(nodereport.size() + " updatedNode reports received");
    for (NodeReport node : nodeUpdate) {
       resource = node.getCapability();
       LOG.info(node.getNodeId() + " updated with " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    }*/
}

From source file:com.splicemachine.test.SpliceTestYarnPlatform.java

License:Apache License

private void configForTesting() throws URISyntaxException {
    yarnSiteConfigURL = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (yarnSiteConfigURL == null) {
        throw new RuntimeException("Could not find 'yarn-site.xml' file in classpath");
    } else {//from   w ww .  ja v a  2  s.c  om
        LOG.info("Found 'yarn-site.xml' at " + yarnSiteConfigURL.toURI().toString());
    }

    conf = new YarnConfiguration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    conf.set("fs.default.name", "file:///");
    conf.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");
    System.setProperty("zookeeper.sasl.client", "false");
    System.setProperty("zookeeper.sasl.serverconfig", "fake");

    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, DEFAULT_HEARTBEAT_INTERVAL);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent());
}

From source file:com.yahoo.storm.yarn.TestIntegration.java

License:Open Source License

@SuppressWarnings({ "rawtypes", "unchecked" })
@BeforeClass/*w  w  w. ja v a 2  s .c  om*/
public static void setup() {
    try {
        zkServer = new EmbeddedZKServer();
        zkServer.start();

        LOG.info("Starting up MiniYARN cluster");
        if (yarnCluster == null) {
            yarnCluster = new MiniYARNCluster(TestIntegration.class.getName(), 2, 1, 1);
            Configuration conf = new YarnConfiguration();
            conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
            conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 2 * 1024);
            yarnCluster.init(conf);
            yarnCluster.start();
        }
        sleep(2000);

        Configuration miniyarn_conf = yarnCluster.getConfig();
        yarn_site_xml = testConf.createYarnSiteConfig(miniyarn_conf);

        storm_home = testConf.stormHomePath();
        LOG.info("Will be using storm found on PATH at " + storm_home);

        //create a storm configuration file with zkport 
        final Map storm_conf = Config.readStormConfig();
        storm_conf.put(backtype.storm.Config.STORM_ZOOKEEPER_PORT, zkServer.port());
        storm_conf_file = testConf.createConfigFile(storm_conf);

        List<String> cmd = java.util.Arrays.asList("bin/storm-yarn", "launch", storm_conf_file.toString(),
                "--stormZip", "lib/storm.zip", "--appname", "storm-on-yarn-test", "--output",
                "target/appId.txt");
        execute(cmd);

        //wait for Storm cluster to be fully luanched
        sleep(15000);

        BufferedReader reader = new BufferedReader(new FileReader("target/appId.txt"));
        appId = reader.readLine();
        reader.close();
        if (appId != null)
            appId = appId.trim();
        LOG.info("application ID:" + appId);
    } catch (Exception ex) {
        LOG.error("setup failure", ex);
        Assert.assertEquals(null, ex);
    }
}

From source file:de.huberlin.wbi.hiway.am.WorkflowDriver.java

License:Apache License

/** Parse command line arguments, initialize HDFS, manage environment variables. */
public boolean init(String[] args) throws ParseException, IOException, JSONException {

    DefaultMetricsSystem.initialize("ApplicationMaster");

    Options opts = new Options();
    opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used unless for testing purposes");
    opts.addOption("u", "summary", true,
            "The name of the json summary file. No file is created if this parameter is not specified.");
    opts.addOption("m", "memory", true,
            "The amount of memory (in MB) to be allocated per worker container. Overrides settings in hiway-site.xml.");
    opts.addOption("c", "custom", true,
            "The name of an (optional) JSON file, in which custom amounts of memory can be specified per task.");
    opts.addOption("s", "scheduler", true,
            "The scheduling policy that is to be employed. Valid arguments: "
                    + Arrays.toString(HiWayConfiguration.HIWAY_SCHEDULERS.values())
                    + ". Overrides settings in hiway-site.xml.");
    opts.addOption("d", "debug", false, "Provide additional logs and information for debugging");
    opts.addOption("v", "verbose", false, "Increase verbosity of output / reporting.");
    opts.addOption("appid", true, "Id of this Application Master.");

    opts.addOption("h", "help", false, "Print usage");
    CommandLine cliParser = new GnuParser().parse(opts, args);

    if (args.length == 0) {
        Logger.printUsage(opts);/*w w  w .  j  av a 2 s.  c om*/
        throw new IllegalArgumentException("No args specified for application master to initialize");
    }

    if (cliParser.getArgs().length == 0) {
        Logger.printUsage(opts);
        throw new IllegalArgumentException("No workflow file specified.");
    }

    if (!cliParser.hasOption("appid")) {
        throw new IllegalArgumentException("No id of Application Master specified");
    }

    if (cliParser.hasOption("verbose")) {
        HiWayConfiguration.verbose = true;
    }

    appId = cliParser.getOptionValue("appid");
    try {
        logger.statLog = new BufferedWriter(new FileWriter(appId + ".log"));
    } catch (IOException e) {
        e.printStackTrace(System.out);
        System.exit(-1);
    }

    if (cliParser.hasOption("help")) {
        Logger.printUsage(opts);
        return false;
    }

    if (cliParser.hasOption("debug")) {
        Logger.dumpOutDebugInfo();
        HiWayConfiguration.debug = true;
    }

    if (cliParser.hasOption("summary")) {
        summaryPath = new Path(cliParser.getOptionValue("summary"));
    }

    String hdfsBaseDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_BASE_DEFAULT);
    String hdfsSandboxDirectoryName = conf.get(HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE,
            HiWayConfiguration.HIWAY_AM_DIRECTORY_CACHE_DEFAULT);
    Path hdfsBaseDirectory = new Path(new Path(hdfs.getUri()), hdfsBaseDirectoryName);
    Data.setHdfsBaseDirectory(hdfsBaseDirectory);
    Path hdfsSandboxDirectory = new Path(hdfsBaseDirectory, hdfsSandboxDirectoryName);
    hdfsApplicationDirectory = new Path(hdfsSandboxDirectory, appId);
    Data.setHdfsApplicationDirectory(hdfsApplicationDirectory);
    Data.setHdfs(hdfs);

    if (cliParser.hasOption("custom")) {
        Data customMemPath = new Data(cliParser.getOptionValue("custom"));
        customMemPath.stageIn();
        StringBuilder sb = new StringBuilder();
        try (BufferedReader in = new BufferedReader(new FileReader(customMemPath.getLocalPath().toString()))) {
            String line;
            while ((line = in.readLine()) != null) {
                sb.append(line);
            }
        }
        JSONObject obj = new JSONObject(sb.toString());
        Iterator<?> keys = obj.keys();
        while (keys.hasNext()) {
            String key = (String) keys.next();
            int minMem = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
            int desiredMem = obj.getInt(key);
            customMemoryMap.put(key,
                    (desiredMem % minMem) == 0 ? desiredMem : (desiredMem / minMem + 1) * minMem);
        }
    }

    Map<String, String> envs = System.getenv();

    /* this application's attempt id (combination of attemptId and fail count) */
    ApplicationAttemptId appAttemptID;
    if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
        if (cliParser.hasOption("app_attempt_id")) {
            String appIdStr = cliParser.getOptionValue("app_attempt_id", "");
            appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
        } else {
            throw new IllegalArgumentException("Application Attempt Id not set in the environment");
        }
    } else {
        ContainerId containerId = ConverterUtils.toContainerId(envs.get(Environment.CONTAINER_ID.name()));
        appAttemptID = containerId.getApplicationAttemptId();
    }

    if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
        throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HOST.name())) {
        throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
        throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment");
    }
    if (!envs.containsKey(Environment.NM_PORT.name())) {
        throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment");
    }

    Logger.writeToStdout("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId()
            + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId="
            + appAttemptID.getAttemptId());

    String shellEnvs[] = conf.getStrings(HiWayConfiguration.HIWAY_WORKER_SHELL_ENV,
            HiWayConfiguration.HIWAY_WORKER_SHELL_ENV_DEFAULT);
    for (String env : shellEnvs) {
        env = env.trim();
        int index = env.indexOf('=');
        if (index == -1) {
            shellEnv.put(env, "");
            continue;
        }
        String key = env.substring(0, index);
        String val = "";
        if (index < (env.length() - 1)) {
            val = env.substring(index + 1);
        }
        shellEnv.put(key, val);
    }

    String workflowParam = cliParser.getArgs()[0];
    try {
        workflowPath = new Path(new URI(workflowParam).getPath());
    } catch (URISyntaxException e) {
        workflowPath = new Path(workflowParam);
    }

    schedulerEnumValue = HiWayConfiguration.HIWAY_SCHEDULERS.valueOf(conf
            .get(HiWayConfiguration.HIWAY_SCHEDULER, HiWayConfiguration.HIWAY_SCHEDULER_DEFAULT.toString()));
    if (cliParser.hasOption("scheduler")) {
        schedulerEnumValue = HiWayConfiguration.HIWAY_SCHEDULERS.valueOf(cliParser.getOptionValue("scheduler"));
    }

    containerMemory = conf.getInt(HiWayConfiguration.HIWAY_WORKER_MEMORY,
            HiWayConfiguration.HIWAY_WORKER_MEMORY_DEFAULT);
    if (cliParser.hasOption("memory")) {
        containerMemory = Integer.parseInt(cliParser.getOptionValue("memory"));
    }

    containerCores = conf.getInt(HiWayConfiguration.HIWAY_WORKER_VCORES,
            HiWayConfiguration.HIWAY_WORKER_VCORES_DEFAULT);
    requestPriority = conf.getInt(HiWayConfiguration.HIWAY_WORKER_PRIORITY,
            HiWayConfiguration.HIWAY_WORKER_PRIORITY_DEFAULT);

    // Create and start the Timeline timelineClient
    if (conf.getBoolean("yarn.timeline-service.enabled", false)) {
        timelineClient = TimelineClient.createTimelineClient();
        timelineClient.init(conf);
        timelineClient.start();
        Logger.writeToStdout("Started TimeLineClient.");
    } else {
        Logger.writeToStdErr("TimeLineClient disabled.");
    }
    return true;
}

From source file:edu.uci.ics.asterix.aoya.test.YARNCluster.java

License:Apache License

/**
 * Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
 * Post instantiation, data is laoded to HDFS.
 * Called prior to running the Runtime test suite.
 *///  w  w w.  jav  a  2 s .  com
public void setup() throws Exception {
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data");
    cleanupLocal();
    //this constructor is deprecated in hadoop 2x 
    //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
    miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1);
    miniCluster.init(conf);
    dfs = FileSystem.get(conf);
}

From source file:io.hops.tensorflow.TestCluster.java

License:Apache License

protected void setupInternal(int numNodeManager) throws Exception {

    LOG.info("Starting up YARN cluster");

    conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.set("yarn.log.dir", "target");
    conf.set("yarn.log-aggregation-enable", "true");
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
    conf.setBoolean(YarnConfiguration.NM_GPU_RESOURCE_ENABLED, false);

    if (yarnCluster == null) {
        yarnCluster = new MiniYARNCluster(TestCluster.class.getSimpleName(), 1, numNodeManager, 1, 1);
        yarnCluster.init(conf);/*w ww .  ja  v a  2s  .co m*/

        yarnCluster.start();

        conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
                MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());

        waitForNMsToRegister();

        URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
        if (url == null) {
            throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
        }
        Configuration yarnClusterConfig = yarnCluster.getConfig();
        yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent());
        //write the document to a buffer (not directly to the file, as that
        //can cause the file being written to get read -which will then fail.
        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
        yarnClusterConfig.writeXml(bytesOut);
        bytesOut.close();
        //write the bytes to the file in the classpath
        OutputStream os = new FileOutputStream(new File(url.getPath()));
        os.write(bytesOut.toByteArray());
        os.close();
    }
    FileContext fsContext = FileContext.getLocalFSFileContext();
    fsContext.delete(new Path(conf.get("yarn.timeline-service.leveldb-timeline-store.path")), true);
    try {
        Thread.sleep(2000);
    } catch (InterruptedException e) {
        LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
    }
}