Example usage for org.apache.hadoop.conf Configuration setEnum

List of usage examples for org.apache.hadoop.conf Configuration setEnum

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setEnum.

Prototype

public <T extends Enum<T>> void setEnum(String name, T value) 

Source Link

Document

Set the value of the name property to the given type.

Usage

From source file:com.datatorrent.stram.client.StramAppLauncher.java

License:Apache License

/**
 * Submit application to the cluster and return the app id.
 * Sets the context class loader for application dependencies.
 *
 * @param appConfig//from   w  w  w. ja  v a  2s .  c  o m
 * @return ApplicationId
 * @throws Exception
 */
public ApplicationId launchApp(AppFactory appConfig) throws Exception {
    loadDependencies();
    Configuration conf = propertiesBuilder.conf;
    conf.setEnum(StreamingApplication.ENVIRONMENT, StreamingApplication.Environment.CLUSTER);
    LogicalPlan dag = appConfig.createApp(propertiesBuilder);
    String hdfsTokenMaxLifeTime = conf.get(StramClientUtils.HDFS_TOKEN_MAX_LIFE_TIME);
    if (hdfsTokenMaxLifeTime != null && hdfsTokenMaxLifeTime.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.HDFS_TOKEN_LIFE_TIME, Long.parseLong(hdfsTokenMaxLifeTime));
    }
    String rmTokenMaxLifeTime = conf.get(StramClientUtils.RM_TOKEN_MAX_LIFE_TIME);
    if (rmTokenMaxLifeTime != null && rmTokenMaxLifeTime.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.RM_TOKEN_LIFE_TIME, Long.parseLong(rmTokenMaxLifeTime));
    }
    if (conf.get(StramClientUtils.KEY_TAB_FILE) != null) {
        dag.setAttribute(LogicalPlan.KEY_TAB_FILE, conf.get(StramClientUtils.KEY_TAB_FILE));
    } else if (conf.get(StramUserLogin.DT_AUTH_KEYTAB) != null) {
        Path localKeyTabPath = new Path(conf.get(StramUserLogin.DT_AUTH_KEYTAB));
        FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
        try {
            Path destPath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), localKeyTabPath.getName());
            if (!fs.exists(destPath)) {
                fs.copyFromLocalFile(false, false, localKeyTabPath, destPath);
            }
            dag.setAttribute(LogicalPlan.KEY_TAB_FILE, destPath.toString());
        } finally {
            fs.close();
        }
    }
    String tokenRefreshFactor = conf.get(StramClientUtils.TOKEN_ANTICIPATORY_REFRESH_FACTOR);
    if (tokenRefreshFactor != null && tokenRefreshFactor.trim().length() > 0) {
        dag.setAttribute(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR, Double.parseDouble(tokenRefreshFactor));
    }
    StramClient client = new StramClient(conf, dag);
    try {
        client.start();
        LinkedHashSet<String> libjars = Sets.newLinkedHashSet();
        String libjarsCsv = conf.get(LIBJARS_CONF_KEY_NAME);
        if (libjarsCsv != null) {
            String[] jars = StringUtils.splitByWholeSeparator(libjarsCsv, StramClient.LIB_JARS_SEP);
            libjars.addAll(Arrays.asList(jars));
        }
        if (deployJars != null) {
            for (File deployJar : deployJars) {
                libjars.add(deployJar.getAbsolutePath());
            }
        }

        client.setResources(libjars);
        client.setFiles(conf.get(FILES_CONF_KEY_NAME));
        client.setArchives(conf.get(ARCHIVES_CONF_KEY_NAME));
        client.setOriginalAppId(conf.get(ORIGINAL_APP_ID));
        client.setQueueName(conf.get(QUEUE_NAME));
        client.startApplication();
        return client.getApplicationReport().getApplicationId();
    } finally {
        client.stop();
    }
}

From source file:com.yahoo.glimmer.indexing.RDFDocumentFactory.java

License:Open Source License

protected static void setupConf(Configuration conf, IndexType type, boolean withContexts, String resourcesHash,
        String resourceIdPrefix, String... fields) {
    conf.setEnum(CONF_INDEX_TYPE_KEY, type);
    conf.setBoolean(CONF_WITH_CONTEXTS_KEY, withContexts);
    if (resourcesHash != null) {
        conf.set(CONF_RESOURCES_HASH_KEY, resourcesHash);
    }/*from  w  w  w. jav  a2  s.c o m*/
    conf.set(CONF_RESOURCE_ID_PREFIX_KEY, resourceIdPrefix);
    conf.setStrings(CONF_FIELDNAMES_KEY, fields);
}

From source file:org.apache.apex.examples.kafka.kafka2hdfs.ApplicationTest.java

License:Apache License

private Configuration getConfig() {
    Configuration conf = new Configuration(false);
    String pre = "dt.operator.kafkaIn.prop.";
    conf.setEnum(pre + "initialOffset", AbstractKafkaInputOperator.InitialOffset.EARLIEST);
    conf.setInt(pre + "initialPartitionCount", 1);
    conf.set(pre + "topics", TOPIC);
    conf.set(pre + "clusters", BROKER);

    pre = "dt.operator.fileOut.prop.";
    conf.set(pre + "filePath", FILE_DIR);
    conf.set(pre + "baseName", FILE_NAME);
    conf.setInt(pre + "maxLength", 40);
    conf.setInt(pre + "rotationWindows", 3);

    return conf;/*from  w w  w  . j a  v  a2  s.  co m*/
}

From source file:org.apache.crunch.types.avro.AvroMode.java

License:Apache License

public void configureShuffle(Configuration conf) {
    conf.setEnum(AVRO_SHUFFLE_MODE_PROPERTY, this);
    configureFactory(conf);
}

From source file:org.apache.nutch.mapreduce.FetchJob.java

License:Apache License

@Override
protected void setup(Map<String, Object> args) throws Exception {
    super.setup(args);

    Params params = new Params(args);
    Configuration conf = getConf();

    checkConfiguration(conf);//  w  w  w  . j a  v a  2  s  .c o  m

    String crawlId = params.get(ARG_CRAWL, conf.get(Nutch.PARAM_CRAWL_ID));
    FetchMode fetchMode = params.getEnum(ARG_FETCH_MODE, conf.getEnum(PARAM_FETCH_MODE, FetchMode.NATIVE));
    batchId = params.get(ARG_BATCH, ALL_BATCH_ID_STR);
    int threads = params.getInt(ARG_THREADS, 5);
    boolean resume = params.getBoolean(ARG_RESUME, false);
    int limit = params.getInt(ARG_LIMIT, -1);
    numTasks = params.getInt(ARG_NUMTASKS, conf.getInt(PARAM_MAPREDUCE_JOB_REDUCES, 2));
    boolean index = params.getBoolean(ARG_INDEX, false);

    /** Solr */
    String solrUrl = params.get(ARG_SOLR_URL, conf.get(PARAM_SOLR_SERVER_URL));
    String zkHostString = params.get(ARG_ZK, conf.get(PARAM_SOLR_ZK));
    String solrCollection = params.get(ARG_COLLECTION, conf.get(PARAM_SOLR_COLLECTION));

    /** Set re-computed config variables */
    NutchConfiguration.setIfNotNull(conf, PARAM_CRAWL_ID, crawlId);
    conf.setEnum(PARAM_FETCH_MODE, fetchMode);
    NutchConfiguration.setIfNotNull(conf, PARAM_BATCH_ID, batchId);

    conf.setInt(PARAM_THREADS, threads);
    conf.setBoolean(PARAM_RESUME, resume);
    conf.setInt(PARAM_MAPPER_LIMIT, limit);
    conf.setInt(PARAM_MAPREDUCE_JOB_REDUCES, numTasks);

    conf.setBoolean(PARAM_INDEX_JUST_IN_TIME, index);
    NutchConfiguration.setIfNotNull(conf, PARAM_SOLR_SERVER_URL, solrUrl);
    NutchConfiguration.setIfNotNull(conf, PARAM_SOLR_ZK, zkHostString);
    NutchConfiguration.setIfNotNull(conf, PARAM_SOLR_COLLECTION, solrCollection);

    LOG.info(Params.format("className", this.getClass().getSimpleName(), "crawlId", crawlId, "batchId", batchId,
            "fetchMode", fetchMode, "numTasks", numTasks, "threads", threads, "resume", resume, "limit", limit,
            "index", index, "solrUrl", solrUrl, "zkHostString", zkHostString, "solrCollection",
            solrCollection));
}

From source file:org.apache.nutch.mapreduce.GenerateJob.java

License:Apache License

@Override
protected void setup(Map<String, Object> args) throws Exception {
    super.setup(args);

    Params params = new Params(args);
    Configuration conf = getConf();

    String crawlId = params.get(ARG_CRAWL, conf.get(PARAM_CRAWL_ID));
    String batchId = params.get(ARG_BATCH, NutchUtil.generateBatchId());
    boolean reGenerate = params.getBoolean(ARG_REGENERATE, false);
    long topN = params.getLong(ARG_TOPN, Long.MAX_VALUE);
    boolean filter = params.getBoolean(ARG_FILTER, true);
    boolean norm = params.getBoolean(ARG_NORMALIZE, true);
    long pseudoCurrTime = params.getLong(ARG_CURTIME, startTime);

    String nutchTmpDir = conf.get(PARAM_NUTCH_TMP_DIR, PATH_NUTCH_TMP_DIR);

    conf.set(PARAM_CRAWL_ID, crawlId);//w w  w  .jav  a  2 s.  co m
    conf.set(PARAM_BATCH_ID, batchId);
    conf.setLong(GENERATE_TIME_KEY, startTime); // seems not used, (or pseudoCurrTime used?)
    conf.setLong(PARAM_GENERATOR_CUR_TIME, pseudoCurrTime);
    conf.setBoolean(PARAM_GENERATE_REGENERATE, reGenerate);
    conf.setLong(PARAM_GENERATOR_TOP_N, topN);
    conf.setBoolean(PARAM_GENERATE_FILTER, filter);
    conf.setBoolean(PARAM_GENERATE_NORMALISE, norm);

    URLUtil.HostGroupMode hostGroupMode = conf.getEnum(PARAM_GENERATOR_COUNT_MODE,
            URLUtil.HostGroupMode.BY_HOST);
    conf.setEnum(PARTITION_MODE_KEY, hostGroupMode);

    LOG.info(Params.format("className", this.getClass().getSimpleName(), "crawlId", crawlId, "batchId", batchId,
            "filter", filter, "norm", norm, "pseudoCurrTime", DateTimeUtil.format(pseudoCurrTime), "topN", topN,
            "reGenerate", reGenerate, PARAM_GENERATOR_COUNT_MODE, hostGroupMode, PARTITION_MODE_KEY,
            hostGroupMode, "nutchTmpDir", nutchTmpDir));

    Files.write(Paths.get(PATH_LAST_BATCH_ID), (batchId + "\n").getBytes(), StandardOpenOption.CREATE,
            StandardOpenOption.WRITE);
}

From source file:org.apache.rya.indexing.geoExamples.GeowaveDirectExample.java

License:Apache License

public static void main(final String[] args) throws Exception {
    final Configuration conf = getConf();
    conf.set(PrecomputedJoinIndexerConfig.PCJ_STORAGE_TYPE, PrecomputedJoinStorageType.ACCUMULO.name());
    conf.setBoolean(ConfigUtils.DISPLAY_QUERY_PLAN, PRINT_QUERIES);
    conf.setBoolean(OptionalConfigUtils.USE_GEO, true);
    conf.setEnum(OptionalConfigUtils.GEO_INDEXER_TYPE, GeoIndexerType.GEO_WAVE);

    log.info("Creating the tables as root.");

    SailRepository repository = null;/* w  w  w.  j av a2  s  .  c  om*/
    SailRepositoryConnection conn = null;

    try {
        log.info("Connecting to Geo Sail Repository.");
        final Sail extSail = GeoRyaSailFactory.getInstance(conf);
        repository = new SailRepository(extSail);
        conn = repository.getConnection();

        final long start = System.currentTimeMillis();
        log.info("Running SPARQL Example: Add Point and Geo Search with PCJ");
        testAddPointAndWithinSearchWithPCJ(conn);
        log.info("Running SPARQL Example: Temporal, Freetext, and Geo Search");
        testTemporalFreeGeoSearch(conn);
        log.info("Running SPARQL Example: Geo, Freetext, and PCJ Search");
        testGeoFreetextWithPCJSearch(conn);
        log.info("Running SPARQL Example: Delete Geo Data");
        testDeleteGeoData(conn);

        log.info("TIME: " + (System.currentTimeMillis() - start) / 1000.);
    } finally {
        log.info("Shutting down");
        closeQuietly(conn);
        closeQuietly(repository);
    }
}

From source file:org.apache.tez.dag.history.TestHistoryEventHandler.java

License:Apache License

private void testLogLevel(HistoryLogLevel defaultLogLevel, HistoryLogLevel dagLogLevel, int expectedCount) {
    HistoryEventHandler handler = createHandler(defaultLogLevel);
    InMemoryHistoryLoggingService.events.clear();
    TezDAGID dagId1 = TezDAGID.getInstance(appId, 1);
    for (DAGHistoryEvent event : makeHistoryEvents(dagId1, handler.getConfig())) {
        handler.handle(event);/* w  ww  . ja  va2s .  c  o  m*/
    }
    TezDAGID dagId2 = TezDAGID.getInstance(appId, 2);
    Configuration conf = new Configuration(handler.getConfig());
    conf.setEnum(TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL, dagLogLevel);
    for (DAGHistoryEvent event : makeHistoryEvents(dagId2, conf)) {
        handler.handle(event);
    }

    assertEquals(expectedCount, InMemoryHistoryLoggingService.events.size());
    handler.stop();
}

From source file:org.apache.tez.dag.history.TestHistoryEventHandler.java

License:Apache License

private HistoryEventHandler createHandler(HistoryLogLevel logLevel) {
    Configuration conf = new Configuration(baseConfig);
    conf.setBoolean(TezConfiguration.DAG_RECOVERY_ENABLED, false);
    conf.set(TezConfiguration.TEZ_HISTORY_LOGGING_SERVICE_CLASS, InMemoryHistoryLoggingService.class.getName());
    if (logLevel != null) {
        conf.setEnum(TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL, logLevel);
    }/*from   ww w  .  j a  va  2  s .  co m*/

    DAG dag = mock(DAG.class);
    when(dag.getConf()).thenReturn(conf);

    AppContext appContext = mock(AppContext.class);
    when(appContext.getApplicationID()).thenReturn(appId);
    when(appContext.getHadoopShim()).thenReturn(new HadoopShim() {
    });
    when(appContext.getAMConf()).thenReturn(conf);
    when(appContext.getCurrentDAG()).thenReturn(dag);

    HistoryEventHandler handler = new HistoryEventHandler(appContext);
    handler.init(conf);

    return handler;
}