Example usage for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration.

Prototype

public YarnConfiguration() 

Source Link

Usage

From source file:com.sogou.dockeronyarn.client.DockerApplicationMaster_23.java

License:Apache License

public DockerApplicationMaster_23() {
    // Set up the configuration
    conf = new YarnConfiguration();
}

From source file:com.sogou.dockeronyarn.client.DockerClient.java

License:Apache License

/**
 */
public DockerClient() throws Exception {
    this(new YarnConfiguration());
}

From source file:com.sogou.dockeronyarn.service.DockerApplicationMaster_24.java

License:Apache License

public DockerApplicationMaster_24() {
    // Set up the configuration
    conf = new YarnConfiguration();
}

From source file:com.splicemachine.test.SpliceTestYarnPlatform.java

License:Apache License

private void configForTesting() throws URISyntaxException {
    yarnSiteConfigURL = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (yarnSiteConfigURL == null) {
        throw new RuntimeException("Could not find 'yarn-site.xml' file in classpath");
    } else {//from  www .j  a  v a  2s .c o m
        LOG.info("Found 'yarn-site.xml' at " + yarnSiteConfigURL.toURI().toString());
    }

    conf = new YarnConfiguration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    conf.set("fs.default.name", "file:///");
    conf.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");
    System.setProperty("zookeeper.sasl.client", "false");
    System.setProperty("zookeeper.sasl.serverconfig", "fake");

    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, DEFAULT_HEARTBEAT_INTERVAL);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent());
}

From source file:com.splicemachine.yarn.test.BareYarnTest.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    // start yarn server
    yarnPlatform = new SpliceTestYarnPlatform();
    yarnPlatform.start(SpliceTestYarnPlatform.DEFAULT_NODE_COUNT);

    URL configURL = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (configURL == null) {
        throw new RuntimeException("Could not find 'yarn-site.xml' file in classpath");
    }/*  w ww .ja v a  2 s  .co m*/

    Configuration conf = new YarnConfiguration();
    conf.set("yarn.application.classpath", new File(configURL.getPath()).getParent());

    // start rm client
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
}

From source file:com.streamsets.datacollector.base.PipelineOperationsClusterIT.java

License:Apache License

public static void beforeClass(String pipelineJson, String testName) throws Exception {
    ClusterUtil.setupCluster(testName, pipelineJson, new YarnConfiguration());
    serverURI = ClusterUtil.getServerURI();
    miniSDC = ClusterUtil.getMiniSDC();//from   w w  w  . ja va  2 s  .co  m
}

From source file:com.streamsets.datacollector.flume.cluster.KafkaToFlumeIT.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    //setup kafka to read from
    KafkaTestUtil.startZookeeper();//from   w w w  .  j a  v a2  s . co m
    KafkaTestUtil.startKafkaBrokers(1);
    KafkaTestUtil.createTopic(TOPIC, 1, 1);
    producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
    produceRecords(RECORDS_PRODUCED);

    //setup flume to write to
    source = new AvroSource();
    ch = new MemoryChannel();
    Configurables.configure(ch, new Context());

    Context context = new Context();
    //This should match whats present in the pipeline.json file
    flumePort = TestUtil.getFreePort();
    context.put("port", String.valueOf(flumePort));
    context.put("bind", "localhost");
    Configurables.configure(source, context);

    List<Channel> channels = new ArrayList<>();
    channels.add(ch);
    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);
    source.setChannelProcessor(new ChannelProcessor(rcs));
    source.start();

    //setup Cluster and start pipeline
    ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), new YarnConfiguration());
    serverURI = ClusterUtil.getServerURI();
    miniSDC = ClusterUtil.getMiniSDC();
}

From source file:com.streamsets.datacollector.hdfs.cluster.KafkaToHDFSIT.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    //setup kafka to read from
    KafkaTestUtil.startZookeeper();//from   w ww.ja v a  2  s. com
    KafkaTestUtil.startKafkaBrokers(1);
    KafkaTestUtil.createTopic(TOPIC, 1, 1);
    producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
    produceRecords(RECORDS_PRODUCED);

    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();

    //setup Cluster and start pipeline
    YarnConfiguration entries = new YarnConfiguration();
    //TODO: Investigate why this is required for test to pass. Is yarn messing with the miniDFS cluster configuration?
    entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), entries);
    serverURI = ClusterUtil.getServerURI();
    miniSDC = ClusterUtil.getMiniSDC();
}

From source file:com.streamsets.datacollector.spark.SparkOnYarnIT.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    System.setProperty(MiniSDCTestingUtility.PRESERVE_TEST_DIR, "true");
    miniSDCTestingUtility = new MiniSDCTestingUtility();
    File dataTestDir = miniSDCTestingUtility.getDataTestDir();
    File sparkHome = ClusterUtil.createSparkHome(dataTestDir);

    YarnConfiguration entries = new YarnConfiguration();
    miniYarnCluster = miniSDCTestingUtility.startMiniYarnCluster(TEST_NAME, 1, 1, 1, entries);

    Configuration config = miniYarnCluster.getConfig();
    long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
    while (config.get(YarnConfiguration.RM_ADDRESS).split(":")[1] == "0") {
        if (System.currentTimeMillis() > deadline) {
            throw new IllegalStateException("Timed out waiting for RM to come up.");
        }/* w  w  w  .j  a va2s  . c  o  m*/
        LOG.debug("RM address still not set in configuration, waiting...");
        TimeUnit.MILLISECONDS.sleep(100);
    }
    LOG.debug("RM at " + config.get(YarnConfiguration.RM_ADDRESS));

    Properties sparkHadoopProps = new Properties();

    for (Map.Entry<String, String> entry : config) {
        sparkHadoopProps.setProperty("spark.hadoop." + entry.getKey(), entry.getValue());
    }

    LOG.debug("Creating spark properties file at " + dataTestDir);
    File propertiesFile = new File(dataTestDir, "spark.properties");
    propertiesFile.createNewFile();
    FileOutputStream sdcOutStream = new FileOutputStream(propertiesFile);
    sparkHadoopProps.store(sdcOutStream, null);
    sdcOutStream.flush();
    sdcOutStream.close();
    // Need to pass this property file to spark-submit for it pick up yarn confs
    System.setProperty(SPARK_PROPERTY_FILE, propertiesFile.getAbsolutePath());

    URI uri = Resources.getResource("cluster_pipeline.json").toURI();
    pipelineJson = new String(Files.readAllBytes(Paths.get(uri)), StandardCharsets.UTF_8);
    // TODO - Move setup of Kafka in separate class
    setupKafka();

    File sparkBin = new File(sparkHome, "bin");
    for (File file : sparkBin.listFiles()) {
        MiniSDCTestingUtility.setExecutePermission(file.toPath());
    }
}

From source file:com.toy.Client.java

License:Apache License

static Configuration getDefaultConfiguration() throws Exception {
    return new YarnConfiguration();
}