Example usage for org.apache.hadoop.conf Configuration addDefaultResource

List of usage examples for org.apache.hadoop.conf Configuration addDefaultResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addDefaultResource.

Prototype

public static synchronized void addDefaultResource(String name) 

Source Link

Document

Add a default resource.

Usage

From source file:azkaban.jobtype.HadoopConfigurationInjector.java

License:Apache License

public static void injectResources(Props props) {
    // Add mapred, yarn and hdfs site configs (in addition to core-site, which
    // is automatically added) as default resources before we add the injected
    // configuration. This will cause the injected properties to override the
    // default site properties (instead of vice-versa). This is safe to do,
    // even when these site files don't exist for your Hadoop installation.
    if (props.getBoolean("azkaban.inject.hadoop-site.configs", true)) {
        Configuration.addDefaultResource("mapred-default.xml");
        Configuration.addDefaultResource("mapred-site.xml");
        Configuration.addDefaultResource("yarn-default.xml");
        Configuration.addDefaultResource("yarn-site.xml");
        Configuration.addDefaultResource("hdfs-default.xml");
        Configuration.addDefaultResource("hdfs-site.xml");
    }//w ww  .  j a v  a  2  s  .  co m
    Configuration.addDefaultResource(INJECT_FILE);
}

From source file:azkaban.jobtype.TestHadoopSecureSparkWrapper.java

License:Apache License

@Test
public void testAutoLabeling() {
    // When both spark.node.labeling.enforced and spark.auto.node.labeling are set to true,
    // the job type plugin should ignore both the queue and label expression configurations
    // passed by the user. In addition, when the user requested memory to vcore ratio exceeds
    // the configured min ratio, the plugin should also add configurations to use the configured
    // desired label.
    Map<String, String> envs = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    envs.put(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_AUTO_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR, "test2");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_VCORE_RATIO_ENV_VAR, "3");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_SIZE_ENV_VAR, "8");
    setEnv(envs);/*from w ww  .  j a v a2  s  .c o  m*/
    Configuration.addDefaultResource("yarn-site.xml");
    String[] argArray = new String[] { "--conf", "spark.yarn.queue=test", "--conf",
            "spark.yarn.executor.nodeLabelExpression=test", "--executor-cores", "2", "--executor-memory",
            "7G" };
    argArray = HadoopSecureSparkWrapper.handleNodeLabeling(argArray);
    argArray = HadoopSecureSparkWrapper.removeNullsFromArgArray(argArray);
    Assert.assertTrue(argArray.length == 6);
    Assert.assertTrue(argArray[1].contains("test2"));
}

From source file:azkaban.jobtype.TestHadoopSecureSparkWrapper.java

License:Apache License

@Test
public void testDisableAutoLabeling() {
    // When spark.auto.node.labeling is set to false, the plugin should not modify
    // the user provided label expressions.
    Map<String, String> envs = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    envs.put(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR, "test2");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_VCORE_RATIO_ENV_VAR, "3");
    setEnv(envs);/*from   w  ww  . j  av  a 2 s.c om*/
    Configuration.addDefaultResource("yarn-site.xml");
    String[] argArray = new String[] { "--conf", "spark.yarn.queue=test", "--conf",
            "spark.yarn.executor.nodeLabelExpression=test", "--executor-cores", "2", "--executor-memory",
            "7G" };
    argArray = HadoopSecureSparkWrapper.handleNodeLabeling(argArray);
    argArray = HadoopSecureSparkWrapper.removeNullsFromArgArray(argArray);
    Assert.assertTrue(argArray.length == 6);
    Assert.assertTrue(argArray[1].contains("test"));
}

From source file:azkaban.jobtype.TestHadoopSecureSparkWrapper.java

License:Apache License

@Test
public void testLoadConfigFromPropertyFile() {
    // Test when user do not provide the resource configuration, the one in the default
    // config file is loaded and tested for whether ratio is exceeded.
    Map<String, String> envs = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    envs.put(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_AUTO_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR, "test2");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_VCORE_RATIO_ENV_VAR, "3");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_SIZE_ENV_VAR, "6");
    setEnv(envs);//from w  w w.ja v  a 2  s  . com
    Configuration.addDefaultResource("yarn-site.xml");
    String[] argArray = new String[] { "--conf", "spark.yarn.queue=test", "--conf",
            "spark.yarn.executor.nodeLabelExpression=test" };
    argArray = HadoopSecureSparkWrapper.handleNodeLabeling(argArray);
    argArray = HadoopSecureSparkWrapper.removeNullsFromArgArray(argArray);
    Assert.assertTrue(argArray.length == 2);
    Assert.assertTrue(argArray[1].contains("test2"));
}

From source file:azkaban.jobtype.TestHadoopSecureSparkWrapper.java

License:Apache License

@Test
public void testAutoLabelingWithMemSizeExceedingLimit() {
    // When user requested executor container memory size exceeds spark.min.memory-gb.size,
    // even if the ratio is still below the threshold, the job should still be submitted
    // with the desired node label expression.
    Map<String, String> envs = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    envs.put(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_AUTO_NODE_LABELING_ENV_VAR, Boolean.TRUE.toString());
    envs.put(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR, "test2");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_VCORE_RATIO_ENV_VAR, "4");
    envs.put(HadoopSparkJob.SPARK_MIN_MEM_SIZE_ENV_VAR, "5");
    setEnv(envs);//from   ww w . ja  v a 2 s  .  c o  m
    Configuration.addDefaultResource("yarn-site.xml");
    String[] argArray = new String[] { "--conf", "spark.yarn.queue=test", "--conf",
            "spark.yarn.executor.nodeLabelExpression=test", "--executor-cores", "3", "--executor-memory",
            "7G" };
    argArray = HadoopSecureSparkWrapper.handleNodeLabeling(argArray);
    argArray = HadoopSecureSparkWrapper.removeNullsFromArgArray(argArray);
    Assert.assertTrue(argArray.length == 6);
    Assert.assertTrue(argArray[1].contains("test2"));
}

From source file:com.bah.culvert.CulvertHiveIT.java

License:Apache License

@Before
public void prepareEnvironment() throws Throwable {
    Configuration.addDefaultResource("hive-site.xml");
    tempTestDir = mkDirP("target/hive-test/");
    tempLog = mkDirP("target/hive-log/");
    tempHadoopLogs = mkDirP("target/hive-hadoop-log/");
    System.setProperty("hadoop.log.dir", tempHadoopLogs);
    System.setProperty("test.tmp.dir", tempTestDir);
    System.setProperty("test.output.overwrite", "true");
    // System.setProperty("javax.jdo.PersistenceManagerFactoryClass", );
    util = new QTestUtil("src/test/resources/clientresults/", tempLog, true, "0.20");
}

From source file:com.flaptor.hounder.crawler.Nutch9Fetcher.java

License:Apache License

/**
 * Initialize the fetcher./*from ww w .j a  v  a2 s. co m*/
 */
public Nutch9Fetcher() {
    Config config = Config.getConfig("nutchfetcher.properties");
    segmentsDir = config.getString("fetchlist.dir");
    keepUrl = config.getBoolean("keep.original.url.on.redirect");
    fetcher = new Fetcher();
    Configuration conf = new Configuration();
    // conf.addDefaultResource("crawl-tool.xml");
    conf.addDefaultResource("nutch-default.xml");
    conf.addDefaultResource("nutch-site.xml");
    JobConf job = new NutchJob(conf);
    threads = job.getInt("fetcher.threads.fetch", 10);
    fetcher.setConf(conf);
}

From source file:com.hdfs.concat.crush.Crush.java

License:Apache License

public static void main(String[] args) throws Exception {

    Configuration.addDefaultResource("hdfs-default.xml");
    Configuration.addDefaultResource("hdfs-site.xml");

    Crush crusher = new Crush();

    int exitCode = ToolRunner.run(crusher, args);

    System.exit(exitCode);//from w ww . jav a  2s . com
}

From source file:com.inmobi.messaging.consumer.databus.AbstractMessagingDatabusConsumer.java

License:Apache License

protected void initializeConfig(ClientConfig config) throws IOException {
    String hadoopConfFileName = config.getString(hadoopConfigFileKey);
    if (hadoopConfFileName != null) {
        Configuration.addDefaultResource(hadoopConfFileName);
    }//from   w ww. j a  va 2  s .  c  om
    conf = new Configuration();
    super.init(config);
    // verify authentication
    if (UserGroupInformation.isSecurityEnabled()) {
        String principal = config.getString(consumerPrincipal);
        String keytab = config.getString(consumerKeytab);
        if (principal != null && keytab != null) {
            Configuration conf = new Configuration();
            conf.set(consumerPrincipal, principal);
            conf.set(consumerKeytab, keytab);
            SecurityUtil.login(conf, consumerKeytab, consumerPrincipal);
            UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            LOG.info("User logged in :" + ugi);
        } else {
            LOG.info(
                    "There is no principal or key tab file passed. Using the" + " commandline authentication.");
        }
    }
    // Read consumer id
    String consumerIdStr = config.getString(consumerIdInGroupConfig, DEFAULT_CONSUMER_ID);
    String[] id = consumerIdStr.split("/");
    try {
        consumerNumber = Integer.parseInt(id[0]);
        totalConsumers = Integer.parseInt(id[1]);
        partitionMinList = new HashSet<Integer>();
        if (isValidConfiguration()) {
            for (int i = 0; i < 60; i++) {
                if ((i % totalConsumers) == (consumerNumber - 1)) {
                    partitionMinList.add(i);
                }
            }
        } else {
            throw new IllegalArgumentException("Invalid consumer group membership");
        }
    } catch (NumberFormatException nfe) {
        throw new IllegalArgumentException("Invalid consumer group membership", nfe);
    }
    // Create checkpoint provider and initialize checkpoint
    String chkpointProviderClassName = config.getString(chkProviderConfig, DEFAULT_CHK_PROVIDER);
    String databusCheckpointDir = config.getString(checkpointDirConfig, DEFAULT_CHECKPOINT_DIR);
    this.checkpointProvider = createCheckpointProvider(chkpointProviderClassName, databusCheckpointDir);

    createCheckpoint();
    currentCheckpoint.read(checkpointProvider, getChkpointKey());

    //create buffer
    bufferSize = config.getInteger(queueSizeConfig, DEFAULT_QUEUE_SIZE);
    buffer = new LinkedBlockingQueue<QueueEntry>(bufferSize);

    // initialize other common configuration
    waitTimeForFileCreate = config.getLong(waitTimeForFileCreateConfig, DEFAULT_WAIT_TIME_FOR_FILE_CREATE);

    // get the retention period of the topic
    retentionInHours = config.getString(retentionConfig);

    relativeStartTimeStr = config.getString(relativeStartTimeConfig);

    if (relativeStartTimeStr == null && retentionInHours != null) {
        LOG.warn(retentionConfig + " is deprecated." + " Use " + relativeStartTimeConfig + " instead");
        int minutes = (Integer.parseInt(retentionInHours)) * 60;
        relativeStartTimeStr = String.valueOf(minutes);
    }

    String stopTimeStr = config.getString(stopDateConfig);
    stopTime = getDateFromString(stopTimeStr);

    startOfStream = config.getBoolean(startOfStreamConfig, DEFAULT_START_OF_STREAM);
    closedReadercount = 0;
}

From source file:com.yahoo.omid.OmidConfiguration.java

License:Open Source License

public static Configuration create() {
    Configuration conf = new Configuration();
    conf.addDefaultResource("omid-site.xml");
    return conf;//from w w  w  . j  av  a  2  s. c  o  m
}