Example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB.

Prototype

String RM_SCHEDULER_MINIMUM_ALLOCATION_MB

To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration RM_SCHEDULER_MINIMUM_ALLOCATION_MB.

Click Source Link

Document

Minimum request grant-able by the RM scheduler.

Usage

From source file:org.apache.asterix.aoya.test.YARNCluster.java

License:Apache License

/**
 * Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
 * Post instantiation, data is laoded to HDFS.
 * Called prior to running the Runtime test suite.
 *///from ww w  .  ja v a2s.  co  m
public void setup() throws Exception {
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data");
    cleanupLocal();
    //this constructor is deprecated in hadoop 2x
    //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
    miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1);
    miniCluster.init(conf);
}

From source file:org.apache.flink.yarn.YARNSessionFIFOITCase.java

License:Apache License

@BeforeClass
public static void setup() {
    yarnConfiguration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    yarnConfiguration.setInt(YarnConfiguration.NM_PMEM_MB, 768);
    yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    yarnConfiguration.set(YarnTestBase.TEST_CLUSTER_NAME_KEY, "flink-yarn-tests-fifo");
    startYARNWithConfig(yarnConfiguration);
}

From source file:org.apache.flink.yarn.YARNSessionFIFOSecuredITCase.java

License:Apache License

@BeforeClass
public static void setup() {

    LOG.info("starting secure cluster environment for testing");

    yarnConfiguration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    yarnConfiguration.setInt(YarnConfiguration.NM_PMEM_MB, 768);
    yarnConfiguration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    yarnConfiguration.set(YarnTestBase.TEST_CLUSTER_NAME_KEY, "flink-yarn-tests-fifo-secured");

    SecureTestEnvironment.prepare(tmp);/*from   w w  w .j  av  a  2 s . c  om*/

    populateYarnSecureConfigurations(yarnConfiguration, SecureTestEnvironment.getHadoopServicePrincipal(),
            SecureTestEnvironment.getTestKeytab());

    Configuration flinkConfig = new Configuration();
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, SecureTestEnvironment.getTestKeytab());
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL,
            SecureTestEnvironment.getHadoopServicePrincipal());

    SecurityUtils.SecurityConfiguration ctx = new SecurityUtils.SecurityConfiguration(flinkConfig,
            yarnConfiguration);
    try {
        TestingSecurityContext.install(ctx, SecureTestEnvironment.getClientSecurityConfigurationMap());

        SecurityUtils.getInstalledContext().runSecured(new Callable<Object>() {
            @Override
            public Integer call() {
                startYARNSecureMode(yarnConfiguration, SecureTestEnvironment.getHadoopServicePrincipal(),
                        SecureTestEnvironment.getTestKeytab());
                return null;
            }
        });

    } catch (Exception e) {
        throw new RuntimeException("Exception occurred while setting up secure test context. Reason: {}", e);
    }

}

From source file:org.apache.metron.integration.components.YarnComponent.java

License:Apache License

@Override
public void start() throws UnableToStartException {
    conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.set("yarn.log.dir", "target");
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);

    try {/*w  ww . j ava2  s. co  m*/
        yarnCluster = new MiniYARNCluster(testName, 1, NUM_NMS, 1, 1, true);
        yarnCluster.init(conf);

        yarnCluster.start();

        waitForNMsToRegister();

        URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
        if (url == null) {
            throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
        }
        Configuration yarnClusterConfig = yarnCluster.getConfig();
        yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent());
        //write the document to a buffer (not directly to the file, as that
        //can cause the file being written to get read -which will then fail.
        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
        yarnClusterConfig.writeXml(bytesOut);
        bytesOut.close();
        //write the bytes to the file in the classpath
        OutputStream os = new FileOutputStream(new File(url.getPath()));
        os.write(bytesOut.toByteArray());
        os.close();
        FileContext fsContext = FileContext.getLocalFSFileContext();
        fsContext.delete(new Path(conf.get("yarn.timeline-service.leveldb-timeline-store.path")), true);
        try {
            Thread.sleep(2000);
        } catch (InterruptedException e) {
        }
    } catch (Exception e) {
        throw new UnableToStartException("Exception setting up yarn cluster", e);
    }
}

From source file:org.apache.sysml.yarn.ropt.YarnClusterAnalyzer.java

License:Apache License

/**
 * Analyzes properties of Yarn cluster and Hadoop configurations.
 * /*from   w  w  w  .j av a2  s  .  co  m*/
 * @param yarnClient hadoop yarn client
 * @param conf hadoop yarn configuration
 * @param verbose output info to standard output
 */
public static void analyzeYarnCluster(YarnClient yarnClient, YarnConfiguration conf, boolean verbose) {
    try {
        List<NodeReport> nodesReport = yarnClient.getNodeReports();
        if (verbose)
            System.out.println("There are " + nodesReport.size() + " nodes in the cluster");
        if (nodesReport.isEmpty())
            throw new YarnException("There are zero available nodes in the yarn cluster");

        nodesMaxPhySorted = new ArrayList<>(nodesReport.size());
        clusterTotalMem = 0;
        clusterTotalCores = 0;
        clusterTotalNodes = 0;
        minimumMRContainerPhyMB = -1;
        for (NodeReport node : nodesReport) {
            Resource resource = node.getCapability();
            Resource used = node.getUsed();
            if (used == null)
                used = Resource.newInstance(0, 0);
            int mb = resource.getMemory();
            int cores = resource.getVirtualCores();
            if (mb <= 0)
                throw new YarnException("A node has non-positive memory " + mb);

            int myMinMRPhyMB = mb / cores / CPU_HYPER_FACTOR;
            if (minimumMRContainerPhyMB < myMinMRPhyMB)
                minimumMRContainerPhyMB = myMinMRPhyMB; // minimumMRContainerPhyMB needs to be the largest among the mins

            clusterTotalMem += (long) mb * 1024 * 1024;
            nodesMaxPhySorted.add((long) mb * 1024 * 1024);
            clusterTotalCores += cores;
            clusterTotalNodes++;
            if (verbose)
                System.out.println("\t" + node.getNodeId() + " has " + mb + " MB (" + used.getMemory()
                        + " MB used) memory and " + resource.getVirtualCores() + " (" + used.getVirtualCores()
                        + " used) cores");

        }
        Collections.sort(nodesMaxPhySorted, Collections.reverseOrder());

        nodesMaxBudgetSorted = new ArrayList<>(nodesMaxPhySorted.size());
        for (int i = 0; i < nodesMaxPhySorted.size(); i++)
            nodesMaxBudgetSorted.add(ResourceOptimizer.phyToBudget(nodesMaxPhySorted.get(i)));

        _remotePar = nodesReport.size();
        if (_remotePar == 0)
            throw new YarnException("There are no available nodes in the yarn cluster");

        // Now get the default cluster settings
        _remoteMRSortMem = (1024 * 1024) * conf.getLong(MRConfigurationNames.MR_TASK_IO_SORT_MB, 100); //100MB

        //handle jvm max mem (map mem budget is relevant for map-side distcache and parfor)
        //(for robustness we probe both: child and map configuration parameters)
        String javaOpts1 = conf.get(MRConfigurationNames.MR_CHILD_JAVA_OPTS); //internally mapred/mapreduce synonym
        String javaOpts2 = conf.get(MRConfigurationNames.MR_MAP_JAVA_OPTS, null); //internally mapred/mapreduce synonym
        String javaOpts3 = conf.get(MRConfigurationNames.MR_REDUCE_JAVA_OPTS, null); //internally mapred/mapreduce synonym
        if (javaOpts2 != null) //specific value overrides generic
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts2);
        else
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts1);
        if (javaOpts3 != null) //specific value overrides generic
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts3);
        else
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts1);

        //HDFS blocksize
        String blocksize = conf.get(MRConfigurationNames.DFS_BLOCKSIZE, "134217728");
        _blocksize = Long.parseLong(blocksize);

        minimalPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
        maximumPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
        mrAMPhy = (long) conf.getInt(MRConfigurationNames.YARN_APP_MR_AM_RESOURCE_MB, 1536) * 1024 * 1024;

    } catch (Exception e) {
        throw new RuntimeException("Unable to analyze yarn cluster ", e);
    }

    /*
     * This is for AppMaster to query available resource in the cluster during heartbeat 
     * 
    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();
    AllocateResponse response = rmClient.allocate(0);
    int nodeCount = response.getNumClusterNodes();
    Resource resource = response.getAvailableResources();
    List<NodeReport> nodeUpdate = response.getUpdatedNodes();
            
    LOG.info("This is a " + nodeCount + " node cluster with totally " +
    resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    LOG.info(nodereport.size() + " updatedNode reports received");
    for (NodeReport node : nodeUpdate) {
       resource = node.getCapability();
       LOG.info(node.getNodeId() + " updated with " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    }*/
}

From source file:org.springframework.yarn.test.TestCluster.java

License:Apache License

@BeforeClass
public static void setup() throws InterruptedException, IOException, URISyntaxException {

    LOG.info("Starting up YARN cluster");
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    if (yarnCluster == null) {
        yarnCluster = new MiniYARNCluster(TestCluster.class.getSimpleName(), 1, 1, 1);
        yarnCluster.init(conf);//from  w  w w .  ja  v a 2  s .c o  m
        yarnCluster.start();
    }
    try {
        Thread.sleep(2000);
    } catch (InterruptedException e) {
        LOG.info("setup thread sleep interrupted. message=" + e.getMessage());
    }
}

From source file:org.testifyproject.resource.yarn.MiniYarnResource.java

License:Apache License

@Override
public YarnConfiguration configure(TestContext testContext, LocalResource localResource,
        PropertiesReader configReader) {
    String testName = testContext.getName();
    String logDirectory = fileSystemUtil.createPath("target", "yarn", testName);

    YarnConfiguration configuration = new YarnConfiguration();
    configuration.set(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR, logDirectory);
    configuration.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    configuration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);

    return configuration;
}

From source file:org.zuinnote.hadoop.office.example.MapReduceExcelInputIntegrationTest.java

License:Apache License

@BeforeAll
public static void oneTimeSetUp() throws IOException {
    // Create temporary directory for HDFS base and shutdownhook 
    // create temp directory
    tmpPath = Files.createTempDirectory(tmpPrefix);
    // create shutdown hook to remove temp files (=HDFS MiniCluster) after shutdown, may need to rethink to avoid many threads are created
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override/*from   w  ww .  ja  v  a2 s . c  o m*/
        public void run() {
            try {
                Files.walkFileTree(tmpPath, new SimpleFileVisitor<java.nio.file.Path>() {

                    @Override
                    public FileVisitResult visitFile(java.nio.file.Path file, BasicFileAttributes attrs)
                            throws IOException {
                        Files.delete(file);
                        return FileVisitResult.CONTINUE;
                    }

                    @Override
                    public FileVisitResult postVisitDirectory(java.nio.file.Path dir, IOException e)
                            throws IOException {
                        if (e == null) {
                            Files.delete(dir);
                            return FileVisitResult.CONTINUE;
                        }
                        throw e;
                    }
                });
            } catch (IOException e) {
                throw new RuntimeException(
                        "Error temporary files in following path could not be deleted " + tmpPath, e);
            }
        }
    }));
    // Create Configuration
    Configuration conf = new Configuration();
    // create HDFS cluster
    File baseDir = new File(tmpPath.toString()).getAbsoluteFile();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    dfsCluster = builder.numDataNodes(NOOFDATANODES).build();
    // create Yarn cluster
    YarnConfiguration clusterConf = new YarnConfiguration(conf);
    conf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniMRYarnCluster(CLUSTERNAME, NOOFNODEMANAGERS, STARTTIMELINESERVER);
    miniCluster.init(conf);
    miniCluster.start();
}

From source file:probos.TestEndToEnd.java

License:Open Source License

@Before
public void setupCluster() throws Exception {
    System.setProperty("probos.home", System.getProperty("user.dir"));
    (probosJobDir = new File(System.getProperty("user.dir"), "probos")).mkdir();
    String name = "mycluster";
    int noOfNodeManagers = 1;
    int numLocalDirs = 1;
    int numLogDirs = 1;
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniYARNCluster(name, noOfNodeManagers, numLocalDirs, numLogDirs);
    miniCluster.init(conf);//from   w  w w.  j a  va 2  s  .co  m
    miniCluster.start();

    //once the cluster is created, you can get its configuration
    //with the binding details to the cluster added from the minicluster
    YarnConfiguration appConf = new YarnConfiguration(miniCluster.getConfig());
    cs = new ControllerServer(appConf);
    cs.startAndWait();
    Thread.sleep(1000);
    new pbsnodes().run(new String[0]);
}

From source file:probos.TestProblem.java

License:Open Source License

@Before
public void setupCluster() throws Exception {
    String name = "mycluster";
    int noOfNodeManagers = 1;
    int numLocalDirs = 1;
    int numLogDirs = 1;
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniYARNCluster(name, noOfNodeManagers, numLocalDirs, numLogDirs);
    miniCluster.init(conf);//from  w w  w.j a v  a  2 s  .  c o  m
    miniCluster.start();

    //once the cluster is created, you can get its configuration
    //with the binding details to the cluster added from the minicluster
    yConf = new YarnConfiguration(miniCluster.getConfig());

}