Example usage for org.apache.hadoop.conf Configuration getRaw

List of usage examples for org.apache.hadoop.conf Configuration getRaw

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getRaw.

Prototype

public String getRaw(String name) 

Source Link

Document

Get the value of the name property, without doing variable expansion.If the key is deprecated, it returns the value of the first key which replaces the deprecated key and is not null.

Usage

From source file:com.asakusafw.runtime.stage.input.StageInputDriver.java

License:Apache License

/**
 * Returns the configured stage inputs./*w  ww . jav  a 2 s  .c  o m*/
 * @param conf the current configuration
 * @return the configured inputs, or an empty list if they has not been configured
 * @throws IOException if failed to restore the stage inputs
 * @throws IllegalArgumentException if the parameter is {@code null}
 */
static List<StageInput> getInputs(Configuration conf) throws IOException {
    if (conf == null) {
        throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$
    }
    String encoded = conf.getRaw(KEY);
    if (encoded == null) {
        return Collections.emptyList();
    }
    try {
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Decoding inputs ({0} bytes)", //$NON-NLS-1$
                    encoded.length()));
        }
        List<StageInput> inputList = decode(conf, encoded);
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Decoded inputs ({0} entries)", //$NON-NLS-1$
                    inputList.size()));
        }
        return inputList;
    } catch (IOException e) {
        throw new IOException(MessageFormat.format("Failed to extract input information: {0}", KEY), e);
    } catch (ClassNotFoundException e) {
        throw new IOException(MessageFormat.format("Failed to extract input information: {0}", KEY), e);
    }
}

From source file:com.cloudera.sqoop.metastore.hsqldb.HsqldbJobStorage.java

License:Apache License

/**
 * Actually insert/update the resources for this job.
 *///from w w w  .  ja va 2 s  .  c  o m
private void createInternal(String jobName, JobData data) throws IOException {
    try {
        LOG.debug("Creating job: " + jobName);

        // Save the name of the Sqoop tool.
        setV0Property(jobName, PROPERTY_CLASS_SCHEMA, SQOOP_TOOL_KEY, data.getSqoopTool().getToolName());

        // Save the property set id.
        setV0Property(jobName, PROPERTY_CLASS_SCHEMA, PROPERTY_SET_KEY, CUR_PROPERTY_SET_ID);

        // Save all properties of the SqoopOptions.
        Properties props = data.getSqoopOptions().writeProperties();
        setV0Properties(jobName, PROPERTY_CLASS_SQOOP_OPTIONS, props);

        // And save all unique properties of the configuration.
        Configuration saveConf = data.getSqoopOptions().getConf();
        Configuration baseConf = new Configuration();

        for (Map.Entry<String, String> entry : saveConf) {
            String key = entry.getKey();
            String rawVal = saveConf.getRaw(key);
            String baseVal = baseConf.getRaw(key);
            if (baseVal != null && rawVal.equals(baseVal)) {
                continue; // Don't save this; it's set in the base configuration.
            }

            LOG.debug("Saving " + key + " => " + rawVal + " / " + baseVal);
            setV0Property(jobName, PROPERTY_CLASS_CONFIG, key, rawVal);
        }

        connection.commit();
    } catch (SQLException sqlE) {
        try {
            connection.rollback();
        } catch (SQLException sqlE2) {
            LOG.warn("Exception rolling back transaction during error handling: " + sqlE2);
        }
        throw new IOException("Error communicating with database", sqlE);
    }
}

From source file:com.linkedin.cubert.utils.ExecutionConfig.java

License:Open Source License

/**
 * APIs for Hadoop Config serialization and deserialization
 */// www . j  a v  a2 s  . co m
public static void readConf(final Configuration conf) throws IOException {
    final String raw = conf.getRaw(HADOOP_CONF_KEY);
    if (raw == null) {
        return;
    }
    try {
        Loader.instance = (ExecutionConfig) SerializerUtils.deserializeFromString(raw);
    } catch (ClassNotFoundException e) {
        e.printStackTrace();
    }
}

From source file:com.taobao.adfs.util.Utilities.java

License:Apache License

public static String setConfDefaultValue(Configuration conf, String key, Object value,
        boolean setIfOnlyWhitespace, boolean relativePathToAbosulotePath) {
    String valueString = conf.getRaw(key);
    if (value != null && (valueString == null || (setIfOnlyWhitespace && valueString.trim().isEmpty())))
        valueString = value.toString();// www.  j  a va 2  s  .c o  m
    if (relativePathToAbosulotePath)
        valueString = new File(valueString).getAbsolutePath();
    conf.set(key, valueString);
    return conf.getRaw(key);
}

From source file:com.uber.hoodie.common.util.FSUtils.java

License:Apache License

public static FileSystem getFs(String path, Configuration conf) {
    FileSystem fs;//from  w  ww . j av a2s. com
    conf = prepareHadoopConf(conf);
    try {
        fs = new Path(path).getFileSystem(conf);
    } catch (IOException e) {
        throw new HoodieIOException("Failed to get instance of " + FileSystem.class.getName(), e);
    }
    LOG.info(String.format("Hadoop Configuration: fs.defaultFS: [%s], Config:[%s], FileSystem: [%s]",
            conf.getRaw("fs.defaultFS"), conf.toString(), fs.toString()));
    return fs;
}

From source file:de.bitocean.mm.MMAppRunner.java

public static void main(String[] args) throws Exception {

    Configuration cfg = new Configuration();

    File cfgFile = EtoshaContextLogger.getCFGFile();

    System.out.println(">>> CFG:   " + cfgFile.getAbsolutePath());
    System.out.println(">>> exists : " + cfgFile.exists());

    /**/*from  w ww  .  j a  v a2 s .  co m*/
     * according to:
     *
     * http://stackoverflow.com/questions/11478036/hadoop-configuration-property-returns-null
     *
     * we add the resource as a URI
     */
    cfg.addResource(cfgFile.getAbsoluteFile().toURI().toURL());
    cfg.reloadConfiguration();
    System.out.println(cfg);

    System.out.println(cfg.getRaw("smw.url"));
    System.out.println(cfg.get("smw.pw"));
    System.out.println(cfg.get("smw.user")); // for SMW account
    System.out.println(cfg.get("smw.env"));

    SemanticContextBridge.overWriteEnvForLocaltest = false;

    int exitCode = ToolRunner.run(cfg, new MMAppRunner(), args);

}

From source file:org.apache.hcatalog.mapreduce.MultiOutputFormat.java

License:Apache License

/**
 * Compare the aliasContext with userJob and add the differing configuration
 * as mapreduce.multiout.alias.<aliasname>.conf to the userJob.
 * <p>//w w w .  j a v a 2 s  .c  o  m
 * Merge config like tmpjars, tmpfile, tmparchives,
 * mapreduce.job.hdfs-servers that are directly handled by JobClient and add
 * them to userJob.
 * <p>
 * Add mapred.output.dir config to userJob.
 *
 * @param alias alias name associated with a OutputFormat
 * @param userJob reference to Job that the user is going to submit
 * @param aliasContext JobContext populated with OutputFormat related
 *            configuration.
 */
private static void setAliasConf(String alias, JobContext userJob, JobContext aliasContext) {
    Configuration userConf = userJob.getConfiguration();
    StringBuilder builder = new StringBuilder();
    for (Entry<String, String> conf : aliasContext.getConfiguration()) {
        String key = conf.getKey();
        String value = conf.getValue();
        String jobValue = userConf.getRaw(key);
        if (jobValue == null || !jobValue.equals(value)) {
            if (configsToMerge.containsKey(key)) {
                String mergedValue = getMergedConfValue(jobValue, value, configsToMerge.get(key));
                userConf.set(key, mergedValue);
            } else {
                if (configsToOverride.contains(key)) {
                    userConf.set(key, value);
                }
                builder.append(key).append(CONF_KEY_DELIM).append(value).append(CONF_VALUE_DELIM);
            }
        }
    }
    if (builder.length() > CONF_VALUE_DELIM.length()) {
        builder.delete(builder.length() - CONF_VALUE_DELIM.length(), builder.length());
        userConf.set(getAliasConfName(alias), builder.toString());
    }
}

From source file:org.godhuli.rhipe.RHMRHelper.java

License:Apache License

void addJobConfToEnvironment(Configuration conf, Properties env) {
    Iterator it = conf.iterator();
    while (it.hasNext()) {
        Map.Entry en = (Map.Entry) it.next();
        String name = (String) en.getKey();
        if (name.equals("mapred.input.dir") || name.equals("rhipe_input_folder"))
            continue;
        String value = null;/*www . ja  v  a 2 s  .  c  o  m*/
        if (!(name.equals("LD_LIBRARY_PATH") || name.equals("PATH"))) {
            value = conf.get(name); // does variable expansion
        } else {
            value = conf.getRaw(name);
        }
        env.put(name, value);
    }
}

From source file:org.trustedanalytics.auth.gateway.hdfs.fs.FileSystemProvider.java

License:Apache License

default FileSystem getFileSystem(String user, Configuration configuration) throws IOException {
    try {// w  w  w  .j a  va  2 s .  com
        return FileSystem.get(new URI(configuration.getRaw("fs.defaultFS")), configuration, user);
    } catch (InterruptedException | URISyntaxException | IOException e) {
        Throwables.propagateIfPossible(e, IOException.class);
        throw new IOException("Cannot create file system", e);
    }
}

From source file:org.trustedanalytics.servicebroker.hdfs.config.HdfsConfiguration.java

License:Apache License

private FileSystem getFileSystemForUser(Configuration config, String user)
        throws URISyntaxException, IOException, InterruptedException {
    LOGGER.info("Creating filesytem with for brokerUser: " + user);
    return FileSystem.get(new URI(config.getRaw(HdfsConstants.HADOOP_DEFAULT_FS)), config, user);
}