Example usage for org.apache.hadoop.conf Configuration size

List of usage examples for org.apache.hadoop.conf Configuration size

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration size.

Prototype

public int size() 

Source Link

Document

Return the number of keys in the configuration.

Usage

From source file:co.cask.cdap.explore.service.ExploreServiceUtilsTest.java

License:Apache License

@Test
public void hijackConfFileTest() throws Exception {
    Configuration conf = new Configuration(false);
    conf.set("foo", "bar");
    Assert.assertEquals(1, conf.size());

    File tempDir = tmpFolder.newFolder();

    File confFile = tmpFolder.newFile("hive-site.xml");

    try (FileOutputStream os = new FileOutputStream(confFile)) {
        conf.writeXml(os);//  w  w w.  jav  a 2s  .  c om
    }

    File newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir);

    conf = new Configuration(false);
    conf.addResource(newConfFile.toURI().toURL());

    Assert.assertEquals(3, conf.size());
    Assert.assertEquals("false", conf.get(Job.MAPREDUCE_JOB_USER_CLASSPATH_FIRST));
    Assert.assertEquals("false", conf.get(Job.MAPREDUCE_JOB_CLASSLOADER));
    Assert.assertEquals("bar", conf.get("foo"));

    // check yarn-site changes
    confFile = tmpFolder.newFile("yarn-site.xml");
    conf = new YarnConfiguration();

    try (FileOutputStream os = new FileOutputStream(confFile)) {
        conf.writeXml(os);
    }

    String yarnApplicationClassPath = "$PWD/*," + conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH));

    newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir);

    conf = new Configuration(false);
    conf.addResource(newConfFile.toURI().toURL());

    Assert.assertEquals(yarnApplicationClassPath, conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH));

    // check mapred-site changes
    confFile = tmpFolder.newFile("mapred-site.xml");
    conf = new YarnConfiguration();

    try (FileOutputStream os = new FileOutputStream(confFile)) {
        conf.writeXml(os);
    }

    String mapredApplicationClassPath = "$PWD/*," + conf.get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
            MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH);

    newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir);

    conf = new Configuration(false);
    conf.addResource(newConfFile.toURI().toURL());

    Assert.assertEquals(mapredApplicationClassPath, conf.get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH));

    // Ensure conf files that are not hive-site.xml/mapred-site.xml/yarn-site.xml are unchanged
    confFile = tmpFolder.newFile("core-site.xml");
    Assert.assertEquals(confFile, ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir));
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.SubmittedJob.java

License:Apache License

SubmittedJob(JobID jobID, String jobSubmitDirectory, Credentials credentials, Configuration configuration)
        throws IOException, InterruptedException {
    this.jobID = jobID;
    this.configuration = configuration;
    this.jobSubmitDirectoryPath = new Path(jobSubmitDirectory);
    this.fileSystem = FileSystem.get(configuration);

    JobSplit.TaskSplitMetaInfo splitInfo[] = SplitMetaInfoReader.readSplitMetaInfo(jobID, fileSystem,
            configuration, jobSubmitDirectoryPath);

    Path jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDirectoryPath);
    FSDataInputStream stream = fileSystem.open(jobSplitFile);

    for (JobSplit.TaskSplitMetaInfo info : splitInfo) {
        Object split = getSplitDetails(stream, info.getStartOffset(), configuration);
        inputSplits.add(split);//from  ww  w  .j a v a 2  s .  c  o  m
        splitLocations.put(split, info.getLocations());
        LOG.info("Adding split for execution. Split = " + split + " Locations: "
                + Arrays.toString(splitLocations.get(split)));
    }

    stream.close();

    jobConfPath = JobSubmissionFiles.getJobConfPath(jobSubmitDirectoryPath);

    if (!fileSystem.exists(jobConfPath)) {
        throw new IOException("Cannot find job.xml. Path = " + jobConfPath);
    }

    //We cannot just use JobConf(Path) constructor,
    //because it does not work for HDFS locations.
    //The comment in Configuration#loadResource() states,
    //for the case when the Path to the resource is provided:
    //"Can't use FileSystem API or we get an infinite loop
    //since FileSystem uses Configuration API.  Use java.io.File instead."
    //
    //Workaround: construct empty Configuration, provide it with
    //input stream and give it to JobConf constructor.
    FSDataInputStream jobConfStream = fileSystem.open(jobConfPath);
    Configuration jobXML = new Configuration(false);
    jobXML.addResource(jobConfStream);

    //The configuration does not actually gets read before we attempt to
    //read some property. Call to #size() will make Configuration to
    //read the input stream.
    jobXML.size();

    //We are done with input stream, can close it now.
    jobConfStream.close();

    jobConf = new JobConf(jobXML);

    newApi = jobConf.getUseNewMapper();

    jobStatus = new JobStatus(jobID, 0f, 0f, 0f, 0f, JobStatus.State.RUNNING, JobPriority.NORMAL,
            UserGroupInformation.getCurrentUser().getUserName(), jobID.toString(), jobConfPath.toString(), "");
}

From source file:com.thinkbiganalytics.kylo.util.HadoopClassLoaderTest.java

License:Apache License

/**
 * Test Hadoop class loader./*from  w w  w  . j  ava 2  s. co m*/
 */
@Test
@SuppressWarnings({ "squid:S2095", "unchecked" })
public void test() {
    final Configuration conf = new Configuration(false);
    final HadoopClassLoader classLoader = new HadoopClassLoader(conf);

    // Test null paths
    Assert.assertFalse("Expected null jar to be ignored", classLoader.addJar(null));
    Assert.assertFalse("Expected null jars to be ignored", classLoader.addJars(null));
    Assert.assertArrayEquals(new URL[0], classLoader.getURLs());
    Assert.assertEquals(0, conf.size());

    // Test invalid path
    Assert.assertFalse("Expected invalid jar to be ignored",
            classLoader.addJar("file:/tmp/" + UUID.randomUUID()));
    Assert.assertArrayEquals(new URL[0], classLoader.getURLs());
    Assert.assertEquals(0, conf.size());

    // Test Hadoop path
    Assert.assertTrue("Expected Hadoop jar to be added", classLoader.addJar("mock:/tmp/file.ext"));
    Matcher matcher1 = withToString(CoreMatchers.equalTo("hadoop:mock:/tmp/file.ext"));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher1));

    // Test path without FileSystem services
    final String classUrl = getClass().getResource("./").toString();
    Assert.assertTrue("Expected class directory to be added", classLoader.addJar(classUrl));
    Matcher matcher2 = withToString(CoreMatchers.equalTo(classUrl));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher2));
    Assert.assertEquals(0, conf.size());

    // Test path with FileSystem services
    final String resourceUrl = getClass().getResource("/").toString();
    Assert.assertTrue("Expected resource directory to be added", classLoader.addJar(resourceUrl));
    Matcher matcher3 = withToString(CoreMatchers.equalTo(resourceUrl));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher3));
    Assert.assertEquals(MockFileSystem.class, conf.getClass("fs.mock.impl", null));

    // Test existing jar
    final int existingSize = classLoader.getURLs().length;
    Assert.assertFalse("Expected existing jar to be ignored", classLoader.addJar(resourceUrl));
    Assert.assertEquals(existingSize, classLoader.getURLs().length);
}

From source file:com.twitter.hraven.datasource.JobHistoryRawService.java

License:Apache License

/**
 * @param result/*  w  w w . j ava  2s .  c o  m*/
 *          from the {@link Scan} from
 *          {@link #getHistoryRawTableScan(String, String, String, boolean, boolean, boolean)}
 * @return the configuration part.
 * @throws MissingColumnInResultException
 *           when the result does not contain {@link Constants#RAW_FAM},
 *           {@link Constants#JOBCONF_COL}.
 */
public Configuration createConfigurationFromResult(Result result) throws MissingColumnInResultException {

    if (result == null) {
        throw new IllegalArgumentException("Cannot create InputStream from null");
    }

    KeyValue keyValue = result.getColumnLatest(Constants.RAW_FAM_BYTES, Constants.JOBCONF_COL_BYTES);

    // Create a jobConf from the raw input
    Configuration jobConf = new Configuration(false);

    byte[] jobConfRawBytes = null;
    if (keyValue != null) {
        jobConfRawBytes = keyValue.getValue();
    }
    if (jobConfRawBytes == null || jobConfRawBytes.length == 0) {
        throw new MissingColumnInResultException(Constants.RAW_FAM_BYTES, Constants.JOBCONF_COL_BYTES);
    }

    InputStream in = new ByteArrayInputStream(jobConfRawBytes);
    jobConf.addResource(in);

    // Configuration property loading is lazy, so we need to force a load from the input stream
    try {
        int size = jobConf.size();
        if (LOG.isDebugEnabled()) {
            LOG.info("Loaded " + size + " job configuration properties from result");
        }
    } catch (Exception e) {
        throw new ProcessingException(
                "Invalid configuration from result " + Bytes.toStringBinary(result.getRow()), e);
    }

    return jobConf;
}

From source file:org.apache.accumulo.core.conf.SiteConfiguration.java

License:Apache License

/**
 * Not for consumers. Call {@link SiteConfiguration#getInstance(AccumuloConfiguration)} instead
 */// w w w.  j a v  a 2  s. co  m
SiteConfiguration(AccumuloConfiguration parent) {
    this.parent = parent;
    /*
     * Make a read-only copy of static configs so we can avoid lock contention on the Hadoop Configuration object
     */
    final Configuration conf = getXmlConfig();
    Map<String, String> temp = new HashMap<>((int) (Math.ceil(conf.size() / 0.75f)), 0.75f);
    for (Entry<String, String> entry : conf) {
        temp.put(entry.getKey(), entry.getValue());
    }
    /*
     * If any of the configs used in hot codepaths are unset here, set a null so that we'll default to the parent config without contending for the Hadoop
     * Configuration object
     */
    for (Property hotConfig : Property.HOT_PATH_PROPERTIES) {
        if (!(temp.containsKey(hotConfig.getKey()))) {
            temp.put(hotConfig.getKey(), null);
        }
    }
    staticConfigs = Collections.unmodifiableMap(temp);
}

From source file:org.apache.accumulo.harness.MiniClusterHarness.java

License:Apache License

public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token,
        MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
    requireNonNull(token);/*ww  w .  jav a2 s  .  com*/
    checkArgument(token instanceof PasswordToken || token instanceof KerberosToken,
            "A PasswordToken or KerberosToken is required");

    String rootPasswd;
    if (token instanceof PasswordToken) {
        rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8);
    } else {
        rootPasswd = UUID.randomUUID().toString();
    }

    File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);

    // Enable native maps by default
    cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());

    Configuration coreSite = new Configuration(false);

    // Setup SSL and credential providers if the properties request such
    configureForEnvironment(cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);

    // Invoke the callback for tests to configure MAC before it starts
    configCallback.configureMiniCluster(cfg, coreSite);

    MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);

    // Write out any configuration items to a file so HDFS will pick them up automatically (from the classpath)
    if (coreSite.size() > 0) {
        File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
        if (csFile.exists())
            throw new RuntimeException(csFile + " already exist");

        OutputStream out = new BufferedOutputStream(
                new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
        coreSite.writeXml(out);
        out.close();
    }

    return miniCluster;
}

From source file:org.apache.accumulo.test.functional.ConfigurableMacBase.java

License:Apache License

private void createMiniAccumulo() throws Exception {
    // createTestDir will give us a empty directory, we don't need to clean it up ourselves
    File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD);
    String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath();
    String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString();
    cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
    cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
    Configuration coreSite = new Configuration(false);
    configure(cfg, coreSite);/*from  ww  w  .  ja  v a2  s .  c om*/
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
    configureForEnvironment(cfg, getClass(), getSslDir(baseDir));
    cluster = new MiniAccumuloClusterImpl(cfg);
    if (coreSite.size() > 0) {
        File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
        if (csFile.exists()) {
            coreSite.addResource(new Path(csFile.getAbsolutePath()));
        }
        File tmp = new File(csFile.getAbsolutePath() + ".tmp");
        OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp));
        coreSite.writeXml(out);
        out.close();
        assertTrue(tmp.renameTo(csFile));
    }
    beforeClusterStart(cfg);
}

From source file:org.apache.accumulo.test.functional.ConfigurableMacIT.java

License:Apache License

@Before
public void setUp() throws Exception {
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(
            createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()), ROOT_PASSWORD);
    cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
    Configuration coreSite = new Configuration(false);
    configure(cfg, coreSite);/*from  ww  w . j  a v a  2  s.  com*/
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
    configureForEnvironment(cfg, getClass(), createSharedTestDir(this.getClass().getName() + "-ssl"));
    cluster = new MiniAccumuloClusterImpl(cfg);
    if (coreSite.size() > 0) {
        File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
        if (csFile.exists())
            throw new RuntimeException(csFile + " already exist");

        OutputStream out = new BufferedOutputStream(
                new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
        coreSite.writeXml(out);
        out.close();
    }
    beforeClusterStart(cfg);
    cluster.start();
}

From source file:org.apache.falcon.workflow.engine.OozieDAGEngine.java

License:Apache License

private InstancesResult.KeyValuePair[] getWFParams(WorkflowJob jobInfo) {
    Configuration conf = new Configuration(false);
    conf.addResource(new ByteArrayInputStream(jobInfo.getConf().getBytes()));
    InstancesResult.KeyValuePair[] wfParams = new InstancesResult.KeyValuePair[conf.size()];
    int i = 0;//from  w  w w  .ja v a  2s . c om
    for (Map.Entry<String, String> entry : conf) {
        wfParams[i++] = new InstancesResult.KeyValuePair(entry.getKey(), entry.getValue());
    }
    return wfParams;
}

From source file:org.apache.hoya.tools.ConfigHelperTest.java

License:Apache License

private static Configuration loadConf(String s) {
    Configuration conf = new Configuration(false);
    conf.addResource(s);/*from w  w w . j a  va2 s .  c  o m*/
    assertTrue("loaded no properties from " + s, conf.size() > 0);
    return conf;
}