Example usage for org.apache.hadoop.conf Configuration setBooleanIfUnset

List of usage examples for org.apache.hadoop.conf Configuration setBooleanIfUnset

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBooleanIfUnset.

Prototype

public void setBooleanIfUnset(String name, boolean value) 

Source Link

Document

Set the given property, if it is currently unset.

Usage

From source file:com.facebook.hiveio.conf.BooleanConfOption.java

License:Apache License

/**
 * Set value in configuration if it hasn't been set already
 * @param conf Configuration/*from  www  .  j av a2  s .  co  m*/
 * @param value to set
 */
public void setIfUnset(Configuration conf, boolean value) {
    conf.setBooleanIfUnset(getKey(), value);
}

From source file:com.facebook.presto.hive.HdfsConfiguration.java

License:Apache License

protected Configuration createConfiguration() {
    Configuration config = new Configuration();

    if (resourcePaths != null) {
        for (String resourcePath : resourcePaths) {
            config.addResource(new Path(resourcePath));
        }/*  w  ww .jav a  2  s.co m*/
    }

    // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local
    config.setClass("topology.node.switch.mapping.impl", NoOpDNSToSwitchMapping.class,
            DNSToSwitchMapping.class);

    if (socksProxy != null) {
        config.setClass("hadoop.rpc.socket.factory.class.default", SocksSocketFactory.class,
                SocketFactory.class);
        config.set("hadoop.socks.server", socksProxy.toString());
    }

    if (domainSocketPath != null) {
        config.setStrings("dfs.domain.socket.path", domainSocketPath);
    }

    // only enable short circuit reads if domain socket path is properly configured
    if (!config.get("dfs.domain.socket.path", "").trim().isEmpty()) {
        config.setBooleanIfUnset("dfs.client.read.shortcircuit", true);
    }

    config.setInt("dfs.socket.timeout", Ints.checkedCast(dfsTimeout.toMillis()));
    config.setInt("ipc.ping.interval", Ints.checkedCast(dfsTimeout.toMillis()));
    config.setInt("ipc.client.connect.timeout", Ints.checkedCast(dfsConnectTimeout.toMillis()));
    config.setInt("ipc.client.connect.max.retries", dfsConnectMaxRetries);

    // re-map filesystem schemes to match Amazon Elastic MapReduce
    config.set("fs.s3.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3bfs.impl", "org.apache.hadoop.fs.s3.S3FileSystem");

    // set AWS credentials for S3
    for (String scheme : ImmutableList.of("s3", "s3bfs", "s3n")) {
        if (s3AwsAccessKey != null) {
            config.set(format("fs.%s.awsAccessKeyId", scheme), s3AwsAccessKey);
        }
        if (s3AwsSecretKey != null) {
            config.set(format("fs.%s.awsSecretAccessKey", scheme), s3AwsSecretKey);
        }
    }

    // set config for S3
    config.setBoolean(PrestoS3FileSystem.S3_SSL_ENABLED, s3SslEnabled);
    config.setInt(PrestoS3FileSystem.S3_MAX_CLIENT_RETRIES, s3MaxClientRetries);
    config.setInt(PrestoS3FileSystem.S3_MAX_ERROR_RETRIES, s3MaxErrorRetries);
    config.set(PrestoS3FileSystem.S3_CONNECT_TIMEOUT, s3ConnectTimeout.toString());
    config.set(PrestoS3FileSystem.S3_STAGING_DIRECTORY, s3StagingDirectory.toString());

    updateConfiguration(config);

    return config;
}

From source file:com.facebook.presto.hive.HdfsConfigurationUpdater.java

License:Apache License

public void updateConfiguration(Configuration config) {
    if (resourcePaths != null) {
        for (String resourcePath : resourcePaths) {
            config.addResource(new Path(resourcePath));
        }/*  ww  w. j  a v  a 2  s  . c  o m*/
    }

    // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local
    config.setClass("topology.node.switch.mapping.impl", NoOpDNSToSwitchMapping.class,
            DNSToSwitchMapping.class);

    if (socksProxy != null) {
        config.setClass("hadoop.rpc.socket.factory.class.default", SocksSocketFactory.class,
                SocketFactory.class);
        config.set("hadoop.socks.server", socksProxy.toString());
    }

    if (domainSocketPath != null) {
        config.setStrings("dfs.domain.socket.path", domainSocketPath);
    }

    // only enable short circuit reads if domain socket path is properly configured
    if (!config.get("dfs.domain.socket.path", "").trim().isEmpty()) {
        config.setBooleanIfUnset("dfs.client.read.shortcircuit", true);
    }

    config.setInt("dfs.socket.timeout", toIntExact(dfsTimeout.toMillis()));
    config.setInt("ipc.ping.interval", toIntExact(ipcPingInterval.toMillis()));
    config.setInt("ipc.client.connect.timeout", toIntExact(dfsConnectTimeout.toMillis()));
    config.setInt("ipc.client.connect.max.retries", dfsConnectMaxRetries);

    // re-map filesystem schemes to match Amazon Elastic MapReduce
    config.set("fs.s3.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3bfs.impl", "org.apache.hadoop.fs.s3.S3FileSystem");

    // set AWS credentials for S3
    if (s3AwsAccessKey != null) {
        config.set(PrestoS3FileSystem.S3_ACCESS_KEY, s3AwsAccessKey);
        config.set("fs.s3bfs.awsAccessKeyId", s3AwsAccessKey);
    }
    if (s3AwsSecretKey != null) {
        config.set(PrestoS3FileSystem.S3_SECRET_KEY, s3AwsSecretKey);
        config.set("fs.s3bfs.awsSecretAccessKey", s3AwsSecretKey);
    }
    if (s3Endpoint != null) {
        config.set(PrestoS3FileSystem.S3_ENDPOINT, s3Endpoint);
        config.set("fs.s3bfs.Endpoint", s3Endpoint);
    }
    if (s3SignerType != null) {
        config.set(PrestoS3FileSystem.S3_SIGNER_TYPE, s3SignerType.getSignerType());
    }

    config.setInt("fs.cache.max-size", fileSystemMaxCacheSize);

    configureCompression(config, compressionCodec);

    // set config for S3
    config.setBoolean(PrestoS3FileSystem.S3_USE_INSTANCE_CREDENTIALS, s3UseInstanceCredentials);
    config.setBoolean(PrestoS3FileSystem.S3_SSL_ENABLED, s3SslEnabled);
    config.setBoolean(PrestoS3FileSystem.S3_SSE_ENABLED, s3SseEnabled);
    if (s3EncryptionMaterialsProvider != null) {
        config.set(PrestoS3FileSystem.S3_ENCRYPTION_MATERIALS_PROVIDER, s3EncryptionMaterialsProvider);
    }
    if (s3KmsKeyId != null) {
        config.set(PrestoS3FileSystem.S3_KMS_KEY_ID, s3KmsKeyId);
    }
    config.setInt(PrestoS3FileSystem.S3_MAX_CLIENT_RETRIES, s3MaxClientRetries);
    config.setInt(PrestoS3FileSystem.S3_MAX_ERROR_RETRIES, s3MaxErrorRetries);
    config.set(PrestoS3FileSystem.S3_MAX_BACKOFF_TIME, s3MaxBackoffTime.toString());
    config.set(PrestoS3FileSystem.S3_MAX_RETRY_TIME, s3MaxRetryTime.toString());
    config.set(PrestoS3FileSystem.S3_CONNECT_TIMEOUT, s3ConnectTimeout.toString());
    config.set(PrestoS3FileSystem.S3_SOCKET_TIMEOUT, s3SocketTimeout.toString());
    config.set(PrestoS3FileSystem.S3_STAGING_DIRECTORY, s3StagingDirectory.toString());
    config.setInt(PrestoS3FileSystem.S3_MAX_CONNECTIONS, s3MaxConnections);
    config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_FILE_SIZE, s3MultipartMinFileSize.toBytes());
    config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_PART_SIZE, s3MultipartMinPartSize.toBytes());
    config.setBoolean(PrestoS3FileSystem.S3_PIN_CLIENT_TO_CURRENT_REGION, pinS3ClientToCurrentRegion);
    config.set(PrestoS3FileSystem.S3_USER_AGENT_PREFIX, s3UserAgentPrefix);
}

From source file:com.moz.fiji.schema.tools.FijiToolLauncher.java

License:Apache License

/**
 * Programmatic entry point to the tool launcher if a tool is already selected.
 * Hadoop property-based arguments will be parsed by FijiToolLauncher.run()
 * in a manner similar to Hadoop's ToolRunner.
 *
 * @param tool The FijiTool to run./*from   w  w  w. j  a  v  a 2s .c o m*/
 * @param args The command-line arguments, excluding the name of the tool to run.
 * @throws Exception If there is an error.
 * @return 0 on program success, non-zero on error.
 */
public int run(FijiTool tool, String[] args) throws Exception {
    Configuration conf = getConf();
    if (conf == null) {
        conf = new Configuration();
        setConf(conf);
    }

    // Mimic behavior of Hadoop's ToolRunner.run().
    GenericOptionsParser parser = new GenericOptionsParser(conf, args);
    conf = HBaseConfiguration.addHbaseResources(conf);

    tool.setConf(conf);

    // Get remaining arguments and invoke the tool with them.
    String[] toolArgs = parser.getRemainingArgs();

    // Work around for CDH4 and Hadoop1 setting different "GenericOptionsParser used" flags.
    conf.setBooleanIfUnset("mapred.used.genericoptionsparser", true);
    conf.setBooleanIfUnset("mapreduce.client.genericoptionsparser.used", true);
    return tool.toolMain(Arrays.asList(toolArgs));
}

From source file:io.prestosql.plugin.hive.HdfsConfigurationInitializer.java

License:Apache License

public void initializeConfiguration(Configuration config) {
    copy(resourcesConfiguration, config);

    // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local
    config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class,
            DNSToSwitchMapping.class);

    if (socksProxy != null) {
        config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class,
                SocketFactory.class);
        config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString());
    }//  ww  w.  j a  v  a 2  s. c o  m

    if (domainSocketPath != null) {
        config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath);
    }

    // only enable short circuit reads if domain socket path is properly configured
    if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) {
        config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
    }

    config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis()));
    config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries);

    if (isHdfsWireEncryptionEnabled) {
        config.set(HADOOP_RPC_PROTECTION, "privacy");
        config.setBoolean("dfs.encrypt.data.transfer", true);
    }

    config.setInt("fs.cache.max-size", fileSystemMaxCacheSize);

    config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength);

    configureCompression(config, compressionCodec);

    s3ConfigurationUpdater.updateConfiguration(config);
    gcsConfigurationInitialize.updateConfiguration(config);
}

From source file:org.apache.tez.mapreduce.hadoop.MRHelpers.java

License:Apache License

/**
 * Update the provided configuration to use the new API (mapreduce) or the old API (mapred) based
 * on the configured InputFormat, OutputFormat, Partitioner etc. Also ensures that keys not
 * required by a particular mode are not present. </p>
 *
 * This method should be invoked after completely setting up the configuration. </p>
 *
 * Defaults to using the new API if relevant keys are not present.
 *
 *//*from   w w  w . j  a v a 2s  .  c o  m*/
public static void configureMRApiUsage(Configuration conf) {
    String oldMapperClass = "mapred.mapper.class";
    conf.setBooleanIfUnset("mapred.mapper.new-api", conf.get(oldMapperClass) == null);
    try {
        if (conf.getBoolean("mapred.mapper.new-api", false)) {
            String mode = "new map API";
            ensureNotSet(conf, "mapred.input.format.class", mode);
            ensureNotSet(conf, oldMapperClass, mode);
        } else {
            String mode = "map compatability";
            ensureNotSet(conf, MRJobConfig.INPUT_FORMAT_CLASS_ATTR, mode);
            ensureNotSet(conf, MRJobConfig.MAP_CLASS_ATTR, mode);
        }
    } catch (IOException e) {
        throw new TezUncheckedException(e);
    }
}

From source file:org.kiji.schema.tools.KijiToolLauncher.java

License:Apache License

/**
 * Programmatic entry point to the tool launcher if a tool is already selected.
 * Hadoop property-based arguments will be parsed by KijiToolLauncher.run()
 * in a manner similar to Hadoop's ToolRunner.
 *
 * @param tool The KijiTool to run./*from  w  w w.ja  v a  2  s  .  c  o m*/
 * @param args The command-line arguments, excluding the name of the tool to run.
 * @throws Exception If there is an error.
 * @return 0 on program success, non-zero on error.
 */
public int run(KijiTool tool, String[] args) throws Exception {
    Configuration conf = getConf();
    if (conf == null) {
        conf = new Configuration();
        setConf(conf);
    }

    // Mimic behavior of Hadoop's ToolRunner.run().
    GenericOptionsParser parser = new GenericOptionsParser(conf, args);
    conf = HBaseConfiguration.addHbaseResources(conf);

    tool.setConf(conf);

    // Get remaining arguments and invoke the tool with them.
    String[] toolArgs = parser.getRemainingArgs();

    // Work around for CDH4 and Hadoop1 setting different "GenericOptionsParser used" flags.
    conf.setBooleanIfUnset("mapred.used.genericoptionsparser", true);
    conf.setBooleanIfUnset("mapreduce.client.genericoptionsparser.used", true);
    return tool.toolMain(Arrays.asList(toolArgs));
}