Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.cloudera.recordservice.mr.WorkerUtil.java

License:Apache License

/**
 * Creates a builder for RecordService worker client from the configuration and
 * the delegation token.// ww w  .  ja va2s.c  o m
 * @param jobConf the hadoop configuration
 * @param delegationToken the delegation token that the worker client should use to
 *                        talk to the RS worker process.
 * @throws IOException
 */
public static Builder getBuilder(Configuration jobConf, DelegationToken delegationToken) {
    // Try to get the delegation token from the credentials. If it is there, use it.
    RecordServiceWorkerClient.Builder builder = new RecordServiceWorkerClient.Builder();
    int fetchSize = jobConf.getInt(ConfVars.FETCH_SIZE_CONF.name, DEFAULT_FETCH_SIZE);
    long memLimit = jobConf.getLong(ConfVars.MEM_LIMIT_CONF.name, -1);
    long limit = jobConf.getLong(ConfVars.RECORDS_LIMIT_CONF.name, -1);
    int maxAttempts = jobConf.getInt(ConfVars.WORKER_RETRY_ATTEMPTS_CONF.name, -1);
    int taskSleepMs = jobConf.getInt(ConfVars.WORKER_RETRY_SLEEP_MS_CONF.name, -1);
    int connectionTimeoutMs = jobConf.getInt(ConfVars.WORKER_CONNECTION_TIMEOUT_MS_CONF.name, -1);
    int rpcTimeoutMs = jobConf.getInt(ConfVars.WORKER_RPC_TIMEOUT_MS_CONF.name, -1);
    boolean enableLogging = jobConf.getBoolean(ConfVars.WORKER_ENABLE_SERVER_LOGGING_CONF.name, false);

    if (fetchSize != -1)
        builder.setFetchSize(fetchSize);
    if (memLimit != -1)
        builder.setMemLimit(memLimit);
    if (limit != -1)
        builder.setLimit(limit);
    if (maxAttempts != -1)
        builder.setMaxAttempts(maxAttempts);
    if (taskSleepMs != -1)
        builder.setSleepDurationMs(taskSleepMs);
    if (connectionTimeoutMs != -1)
        builder.setConnectionTimeoutMs(connectionTimeoutMs);
    if (rpcTimeoutMs != -1)
        builder.setRpcTimeoutMs(rpcTimeoutMs);
    if (enableLogging)
        builder.setLoggingLevel(LOG);
    if (delegationToken != null)
        builder.setDelegationToken(delegationToken);

    return builder;
}

From source file:com.cloudera.spark.bulkload.TotalOrderPartitioner.java

License:Apache License

/**
   * Read in the partition file and build indexing data structures.
   * If the keytype is {@link BinaryComparable} and
   * <tt>total.order.partitioner.natural.order</tt> is not false, a trie
   * of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
   * will be built. Otherwise, keys will be located using a binary search of
   * the partition keyset using the {@link RawComparator}
   * defined for this job. The input file must be sorted with the same
   * comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
   *//*from w w  w.ja v a2s.c  o  m*/
  @SuppressWarnings("unchecked") // keytype from conf not static
  public void setConf(Configuration conf) {
      try {
          this.conf = conf;
          String parts = getPartitionFile(conf);
          final Path partFile = new Path(parts);
          final FileSystem fs = (DEFAULT_PATH.equals(parts)) ? FileSystem.getLocal(conf) // assume in DistributedCache
                  : partFile.getFileSystem(conf);

          Job job = new Job(conf);
          Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
          K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
          if (splitPoints.length != job.getNumReduceTasks() - 1) {
              throw new IOException("Wrong number of partitions in keyset");
          }
          RawComparator<K> comparator = (RawComparator<K>) job.getSortComparator();
          for (int i = 0; i < splitPoints.length - 1; ++i) {
              if (comparator.compare(splitPoints[i], splitPoints[i + 1]) >= 0) {
                  throw new IOException("Split points are out of order");
              }
          }
          boolean natOrder = conf.getBoolean(NATURAL_ORDER, true);
          if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
              partitions = buildTrie((BinaryComparable[]) splitPoints, 0, splitPoints.length, new byte[0],
                      // Now that blocks of identical splitless trie nodes are 
                      // represented reentrantly, and we develop a leaf for any trie
                      // node with only one split point, the only reason for a depth
                      // limit is to refute stack overflow or bloat in the pathological
                      // case where the split points are long and mostly look like bytes 
                      // iii...iixii...iii   .  Therefore, we make the default depth
                      // limit large but not huge.
                      conf.getInt(MAX_TRIE_DEPTH, 200));
          } else {
              partitions = new BinarySearchNode(splitPoints, comparator);
          }
      } catch (IOException e) {
          throw new IllegalArgumentException("Can't read partitions file", e);
      }
  }

From source file:com.cloudera.sqoop.metastore.hsqldb.AutoHsqldbStorage.java

License:Apache License

@Override
/** {@inheritDoc} */
public boolean canAccept(Map<String, String> descriptor) {
    Configuration conf = this.getConf();
    return conf.getBoolean(AUTO_STORAGE_IS_ACTIVE_KEY, true);
}

From source file:com.datasalt.pangool.tuplemr.serialization.TupleSerialization.java

License:Apache License

/**
 * see {@link #CONF_SCHEMA_VALIDATION}//from w  ww.ja  va 2 s  . c  om
 */
public static boolean getSchemaValidation(Configuration conf) {
    return conf.getBoolean(CONF_SCHEMA_VALIDATION, false);
}

From source file:com.datascience.hadoop.CsvInputFormat.java

License:Apache License

/**
 * Creates a CSV format from a Hadoop configuration.
 *///from www.  ja  v  a2 s.  co  m
private static CSVFormat createFormat(Configuration conf) {
    CSVFormat format = CSVFormat
            .newFormat(conf.get(CSV_READER_DELIMITER, DEFAULT_CSV_READER_DELIMITER).charAt(0))
            .withSkipHeaderRecord(conf.getBoolean(CSV_READER_SKIP_HEADER, DEFAULT_CSV_READER_SKIP_HEADER))
            .withRecordSeparator(conf.get(CSV_READER_RECORD_SEPARATOR, DEFAULT_CSV_READER_RECORD_SEPARATOR))
            .withIgnoreEmptyLines(
                    conf.getBoolean(CSV_READER_IGNORE_EMPTY_LINES, DEFAULT_CSV_READER_IGNORE_EMPTY_LINES))
            .withIgnoreSurroundingSpaces(conf.getBoolean(CSV_READER_IGNORE_SURROUNDING_SPACES,
                    DEFAULT_CSV_READER_IGNORE_SURROUNDING_SPACES))
            .withNullString(conf.get(CSV_READER_NULL_STRING, DEFAULT_CSV_READER_NULL_STRING));

    String[] header = conf.getStrings(CSV_READER_COLUMNS);
    if (header != null && header.length > 0)
        format = format.withHeader(header);

    String escape = conf.get(CSV_READER_ESCAPE_CHARACTER, DEFAULT_CSV_READER_ESCAPE_CHARACTER);
    if (escape != null)
        format = format.withEscape(escape.charAt(0));

    String quote = conf.get(CSV_READER_QUOTE_CHARACTER, DEFAULT_CSV_READER_QUOTE_CHARACTER);
    if (quote != null)
        format = format.withQuote(quote.charAt(0));

    String quoteMode = conf.get(CSV_READER_QUOTE_MODE, DEFAULT_CSV_READER_QUOTE_MODE);
    if (quoteMode != null)
        format = format.withQuoteMode(QuoteMode.valueOf(quoteMode));
    return format;
}

From source file:com.datascience.hadoop.CsvOutputFormat.java

License:Apache License

/**
 * Creates a CSV format from a Hadoop configuration.
 *//*w w w.ja v a2 s .  c  om*/
private static CSVFormat createFormat(Configuration conf) {
    CSVFormat format = CSVFormat
            .newFormat(conf.get(CSV_WRITER_DELIMITER, DEFAULT_CSV_WRITER_DELIMITER).charAt(0))
            .withSkipHeaderRecord(conf.getBoolean(CSV_WRITER_SKIP_HEADER, DEFAULT_CSV_WRITER_SKIP_HEADER))
            .withRecordSeparator(conf.get(CSV_WRITER_RECORD_SEPARATOR, DEFAULT_CSV_WRITER_RECORD_SEPARATOR))
            .withIgnoreEmptyLines(
                    conf.getBoolean(CSV_WRITER_IGNORE_EMPTY_LINES, DEFAULT_CSV_WRITER_IGNORE_EMPTY_LINES))
            .withIgnoreSurroundingSpaces(conf.getBoolean(CSV_WRITER_IGNORE_SURROUNDING_SPACES,
                    DEFAULT_CSV_WRITER_IGNORE_SURROUNDING_SPACES))
            .withNullString(conf.get(CSV_WRITER_NULL_STRING, DEFAULT_CSV_WRITER_NULL_STRING));

    String[] header = conf.getStrings(CSV_WRITER_COLUMNS);
    if (header != null && header.length > 0)
        format = format.withHeader(header);

    String escape = conf.get(CSV_WRITER_ESCAPE_CHARACTER, DEFAULT_CSV_WRITER_ESCAPE_CHARACTER);
    if (escape != null)
        format = format.withEscape(escape.charAt(0));

    String quote = conf.get(CSV_WRITER_QUOTE_CHARACTER, DEFAULT_CSV_WRITER_QUOTE_CHARACTER);
    if (quote != null)
        format = format.withQuote(quote.charAt(0));

    String quoteMode = conf.get(CSV_WRITER_QUOTE_MODE, DEFAULT_CSV_WRITER_QUOTE_MODE);
    if (quoteMode != null)
        format = format.withQuoteMode(QuoteMode.valueOf(quoteMode));
    return format;
}

From source file:com.datatorrent.benchmark.state.ManagedStateBenchmarkApp.java

License:Apache License

@Override
public void populateDAG(DAG dag, Configuration conf) {
    TestStatsListener sl = new TestStatsListener();
    sl.adjustRate = conf.getBoolean("dt.ManagedStateBenchmark.adjustRate", false);
    TestGenerator gen = dag.addOperator("Generator", new TestGenerator());
    dag.setAttribute(gen, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));

    StoreOperator storeOperator = new StoreOperator();
    storeOperator.setStore(createStore(conf));
    StoreOperator store = dag.addOperator("Store", storeOperator);

    dag.setAttribute(store, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));

    dag.addStream("Events", gen.data, store.input).setLocality(Locality.CONTAINER_LOCAL);
}

From source file:com.datatorrent.benchmark.window.AbstractWindowedOperatorBenchmarkApp.java

License:Apache License

@Override
public void populateDAG(DAG dag, Configuration conf) {
    TestStatsListener sl = new TestStatsListener();
    sl.adjustRate = conf.getBoolean("dt.ManagedStateBenchmark.adjustRate", false);

    G generator = createGenerator();/*  w w w.j av  a  2  s  . c  o m*/
    dag.addOperator("Generator", generator);
    //generator.setRange(timeRange);
    dag.setAttribute(generator, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));

    O windowedOperator = createWindowedOperator(conf);
    dag.addOperator("windowedOperator", windowedOperator);
    dag.setAttribute(windowedOperator, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));
    //dag.addStream("Data", generator.data, windowedOperator.input).setLocality(Locality.CONTAINER_LOCAL);
    connectGeneratorToWindowedOperator(dag, generator, windowedOperator);

    //    WatermarkGenerator watermarkGenerator = new WatermarkGenerator();
    //    dag.addOperator("WatermarkGenerator", watermarkGenerator);
    //    dag.addStream("Control", watermarkGenerator.control, windowedOperator.controlInput)
    //      .setLocality(Locality.CONTAINER_LOCAL);

    DevNull output = dag.addOperator("output", new DevNull());
    dag.addStream("output", windowedOperator.output, output.data).setLocality(Locality.CONTAINER_LOCAL);
}

From source file:com.datatorrent.contrib.hdht.HDHTBenchmarkTest.java

License:Open Source License

@Override
public void populateDAG(DAG dag, Configuration conf) {
    TestStatsListener sl = new TestStatsListener();
    sl.adjustRate = conf.getBoolean("dt.hdsbench.adjustRate", false);
    TestGenerator gen = dag.addOperator("Generator", new TestGenerator());
    dag.setAttribute(gen, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));
    TestStoreOperator store = dag.addOperator("Store", new TestStoreOperator());
    dag.setAttribute(store, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) sl));
    HDHTFileAccessFSImpl hfa = new HFileImpl();
    hfa.setBasePath(this.getClass().getSimpleName());
    store.setFileStore(hfa);//from  w  ww .  j ava2 s  . c  om
    dag.setInputPortAttribute(store.input, PortContext.PARTITION_PARALLEL, true);
    dag.getOperatorMeta("Store").getAttributes().put(Context.OperatorContext.COUNTERS_AGGREGATOR,
            new HDHTWriter.BucketIOStatAggregator());
    dag.addStream("Events", gen.data, store.input).setLocality(Locality.THREAD_LOCAL);
}

From source file:com.datatorrent.contrib.machinedata.Application.java

License:Open Source License

/**
 * This method returns new SmtpOutputOperator Operator
 * @param name the name of the operator in DAG
 * @param dag the DAG instance/*from w w w. j  a  va 2  s . c  om*/
 * @param conf the configuration object
 * @return SmtpOutputOperator
 */
private SmtpOutputOperator getSmtpOutputOperator(String name, DAG dag, Configuration conf) {
    SmtpOutputOperator mailOper = new SmtpOutputOperator();

    String from = conf.get("machinedata.smtp.from", "admin@datatorrent.com");
    String recipient = conf.get("machinedata.smtp.recipient", "atul@datatorrent.com");
    String subject = conf.get("machinedata.smtp.subject", "Alert!!!");
    String content = conf.get("machinedata.smtp.content", "{}");
    String host = conf.get("machinedata.smtp.host", "localhost");
    int port = conf.getInt("machinedata.smtp.port", 25);
    boolean useSsl = conf.getBoolean("machinedata.smtp.ssl", false);

    mailOper.setFrom(from);
    mailOper.addRecipient(SmtpOutputOperator.RecipientType.TO, recipient);
    mailOper.setSubject(subject);
    mailOper.setContent(content);
    mailOper.setSmtpHost(host);
    mailOper.setSmtpPort(port);
    // mailOper.setSmtpUserName(userName);
    // mailOper.setSmtpPassword(password);
    mailOper.setUseSsl(useSsl);

    dag.addOperator(name, mailOper);

    return mailOper;

}