List of usage examples for org.apache.hadoop.fs FileContext makeQualified
public Path makeQualified(final Path path)
From source file:com.github.seqware.queryengine.plugins.hbasemr.MRHBasePluginRunner.java
License:Open Source License
public MRHBasePluginRunner(MapReducePlugin mapReducePlugin, FeatureSet inputSet, Object... parameters) { this.mapReducePlugin = mapReducePlugin; try {/*from w ww . ja va 2 s . c om*/ CreateUpdateManager manager = SWQEFactory.getModelManager(); //outputSet should attach to the original reference this.outputSet = manager.buildFeatureSet().setReferenceID(inputSet.getReferenceID()).build(); manager.close(); // do setup for Map/Reduce from the HBase API String tableName = generateTableName(inputSet); String destTableName = generateTableName(outputSet); Configuration conf = new Configuration(); HBaseStorage.configureHBaseConfig(conf); HBaseConfiguration.addHbaseResources(conf); // we need to pass the parameters for a featureset, maybe we can take advantage of our serializers byte[] sSet = SWQEFactory.getSerialization().serialize(inputSet); byte[] dSet = SWQEFactory.getSerialization().serialize(outputSet); String[] str_params = serializeParametersToString(parameters, mapReducePlugin, sSet, dSet); File file = new File(new URI(Constants.Term.DEVELOPMENT_DEPENDENCY.getTermValue(String.class))); if (file.exists()) { conf.setStrings("tmpjars", Constants.Term.DEVELOPMENT_DEPENDENCY.getTermValue(String.class)); } conf.setStrings(EXT_PARAMETERS, str_params); conf.set("mapreduce.map.java.opts", "-Xmx4096m -verbose:gc"); conf.set("mapreduce.reduce.java.opts", "-Xmx4096m -verbose:gc"); conf.set("mapreduce.map.ulimit", "4194304"); conf.set("mapreduce.reduce.ulimit", "4194304"); conf.set("mapreduce.map.memory.mb", "4096"); conf.set("mapreduce.reduce.memory.mb", "4096"); conf.set("mapreduce.map.memory.physical.mb", "4096"); conf.set("mapreduce.reduce.memory.physical.mb", "4096"); // the above settings all seem to be ignored by hboot // TODO: only this one works, but as far I know, we're using mapreduce not mapred. // Strange conf.set("mapred.child.java.opts", "-Xmx2048m -verbose:gc"); this.job = new Job(conf, mapReducePlugin.getClass().getSimpleName()); Scan scan = new Scan(); scan.setMaxVersions(); // we need all version data scan.setCaching(500); // 1 is the default in Scan, which will be bad for MapReduce jobs scan.setCacheBlocks(false); // don't set to true for MR jobs byte[] qualiferBytes = Bytes.toBytes(inputSet.getSGID().getUuid().toString()); scan.addColumn(HBaseStorage.getTEST_FAMILY_INBYTES(), qualiferBytes); scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(qualiferBytes))); // handle the part that changes from job to job // pluginInterface.performVariableInit(tableName, destTableName, scan); TableMapReduceUtil.initTableMapperJob(tableName, // input HBase table name scan, // Scan instance to control CF and attribute selection PluginRunnerMapper.class, // mapper mapReducePlugin.getMapOutputKeyClass(), // mapper output key mapReducePlugin.getMapOutputValueClass(), // mapper output value job); job.setOutputFormatClass(mapReducePlugin.getOutputClass()); // because we aren't emitting anything from mapper job.setReducerClass(MRHBasePluginRunner.PluginRunnerReducer.class); // reducer class job.setNumReduceTasks(mapReducePlugin.getNumReduceTasks()); if (mapReducePlugin.getResultMechanism() == PluginInterface.ResultMechanism.FILE) { FileContext fileContext = FileContext.getFileContext(this.job.getConfiguration()); Path path = new Path( "/tmp/" + new BigInteger(20, new SecureRandom()).toString(32) + mapReducePlugin.toString()); path = fileContext.makeQualified(path); TextOutputFormat.setOutputPath(job, path); // adjust directories as required } TableMapReduceUtil.addDependencyJars(job); job.setJarByClass(MRHBasePluginRunner.class); // submit the job, but do not block job.submit(); } catch (URISyntaxException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (InterruptedException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (ClassNotFoundException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (IOException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } }
From source file:com.github.seqware.queryengine.plugins.runners.hbasemr.MRHBasePluginRunner.java
License:Open Source License
/** * /* w w w . ja va 2 s.co m*/ * @param mapReducePlugin the particular plugin to instantiate and run * @param reference a reference (has to be provided in lieu of a feature set) * @param inputSet a set of feature sets to operate on * @param parameters an arbitrary number of external parameters for plugin developers to provide to their plugins */ public MRHBasePluginRunner(MapReducePlugin mapReducePlugin, Reference reference, List<FeatureSet> inputSet, Object... parameters) { // handle null inputSet if (inputSet == null) { inputSet = new ArrayList<FeatureSet>(); } // we should either have a reference or more than one input set assert (reference != null || inputSet.size() > 0); // all feature sets should have the same reference if (inputSet.size() > 0) { SGID ref = inputSet.iterator().next().getReference().getSGID(); for (FeatureSet set : inputSet) { assert (set.getReferenceID().equals(ref)); } } SGID referenceSGID = reference != null ? reference.getSGID() : inputSet.iterator().next().getReferenceID(); this.mapReducePlugin = mapReducePlugin; try { CreateUpdateManager manager = SWQEFactory.getModelManager(); //outputSet should attach to the original reference this.outputSet = manager.buildFeatureSet().setReferenceID(referenceSGID).build(); manager.close(); // do setup for Map/Reduce from the HBase API String tableName = generateTableName(outputSet); String destTableName = generateTableName(outputSet); Configuration conf = new Configuration(); HBaseStorage.configureHBaseConfig(conf); HBaseConfiguration.addHbaseResources(conf); // we need to pass the parameters for a featureset, maybe we can take advantage of our serializers byte[][] sSet = new byte[inputSet.size()][];//SWQEFactory.getSerialization().serialize(inputSet); for (int i = 0; i < sSet.length; i++) { sSet[i] = SWQEFactory.getSerialization().serialize(inputSet.get(i)); } byte[] dSet = SWQEFactory.getSerialization().serialize(outputSet); String[] str_params = serializeParametersToString(parameters, mapReducePlugin, sSet, dSet); File file = new File(new URI(Constants.Term.DEVELOPMENT_DEPENDENCY.getTermValue(String.class))); if (file.exists()) { conf.setStrings("tmpjars", Constants.Term.DEVELOPMENT_DEPENDENCY.getTermValue(String.class)); } conf.setStrings(EXT_PARAMETERS, str_params); conf.set("mapreduce.map.java.opts", "-Xmx4096m -verbose:gc"); conf.set("mapreduce.reduce.java.opts", "-Xmx4096m -verbose:gc"); conf.set("mapreduce.map.ulimit", "4194304"); conf.set("mapreduce.reduce.ulimit", "4194304"); conf.set("mapreduce.map.memory.mb", "4096"); conf.set("mapreduce.reduce.memory.mb", "4096"); conf.set("mapreduce.map.memory.physical.mb", "4096"); conf.set("mapreduce.reduce.memory.physical.mb", "4096"); conf.set("mapred.job.map.memory.mb", "4096"); conf.set("mapred.job.reduce.memory.mb", "4096"); // the above settings all seem to be ignored by hboot // TODO: only this one works, but as far I know, we're using mapreduce not mapred. // Strange conf.set("mapred.child.java.opts", "-Xmx2048m -verbose:gc"); this.job = new Job(conf, mapReducePlugin.getClass().getSimpleName()); Scan scan = new Scan(); scan.setMaxVersions(); // we need all version data scan.setCaching(500); // 1 is the default in Scan, which will be bad for MapReduce jobs scan.setCacheBlocks(false); // don't set to true for MR jobs for (FeatureSet set : inputSet) { byte[] qualiferBytes = Bytes.toBytes(set.getSGID().getUuid().toString()); scan.addColumn(HBaseStorage.getTEST_FAMILY_INBYTES(), qualiferBytes); } // this might be redundant, check this!!!! // scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(qualiferBytes))); // handle the part that changes from job to job // pluginInterface.performVariableInit(tableName, destTableName, scan); TableMapReduceUtil.initTableMapperJob(tableName, // input HBase table name scan, // Scan instance to control CF and attribute selection PluginRunnerMapper.class, // mapper mapReducePlugin.getMapOutputKeyClass(), // mapper output key mapReducePlugin.getMapOutputValueClass(), // mapper output value job); TableMapReduceUtil.initTableReducerJob(tableName, PluginRunnerReducer.class, job); if (mapReducePlugin.getOutputClass() != null) { job.setOutputFormatClass(mapReducePlugin.getOutputClass()); } job.setReducerClass(MRHBasePluginRunner.PluginRunnerReducer.class); // reducer class if (mapReducePlugin.getResultMechanism() == PluginInterface.ResultMechanism.FILE) { FileContext fileContext = FileContext.getFileContext(this.job.getConfiguration()); FileSystem fs = FileSystem.get(job.getConfiguration()); Path path = new Path(fs.getHomeDirectory(), new BigInteger(20, new SecureRandom()).toString(32) + mapReducePlugin.toString()); path = fileContext.makeQualified(path); TextOutputFormat.setOutputPath(job, path); // adjust directories as required } job.setJarByClass(MRHBasePluginRunner.class); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJars(conf, MRHBasePluginRunner.class, MRHBasePluginRunner.PluginRunnerMapper.class, MRHBasePluginRunner.PluginRunnerReducer.class); // submit the job, but do not block job.submit(); } catch (URISyntaxException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (InterruptedException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (ClassNotFoundException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } catch (IOException ex) { Logger.getLogger(MRHBasePluginRunner.class.getName()).fatal(null, ex); } }
From source file:com.ikanow.aleph2.core.shared.utils.JarCacheUtils.java
License:Apache License
/** Moves a shared JAR into a local spot (if required) * @param library_bean/*from www. j a va 2s. c o m*/ * @param fs * @return either a basic message bean containing an error, or the fully qualified path of the cached JAR */ public static <M> CompletableFuture<Validation<BasicMessageBean, String>> getCachedJar( final String local_cached_jar_dir, final SharedLibraryBean library_bean, final IStorageService fs, final String handler_for_errors, final M msg_for_errors) { try { final FileContext dfs = fs.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get(); final FileContext lfs = fs.getUnderlyingPlatformDriver(FileContext.class, IStorageService.LOCAL_FS) .get(); final Path cached_jar_file = lfs .makeQualified(new Path(local_cached_jar_dir + "/" + buildCachedJarName(library_bean))); final Path original_jar_file = dfs.makeQualified(new Path(library_bean.path_name())); final FileStatus file_status = dfs.getFileStatus(original_jar_file); // (this will exception out if it doesn't exist, as it should) try { final FileStatus local_file_status = lfs.getFileStatus(cached_jar_file); // (this will exception in to case 2 if it doesn't exist) // if the local version exists then overwrite it if (file_status.getModificationTime() > local_file_status.getModificationTime()) { // (it gets kinda complicated here so just invalidate the entire classloader cache..) // TODO (ALEPH-12): add a coverage test for this ClassloaderUtils.clearCache(); lfs.util().copy(original_jar_file, cached_jar_file, false, true); } } catch (FileNotFoundException f) { // 2) if the local version doesn't exist then just copy the distributed file across // (note: don't need to do anything with the classloader cache here since the file doesn't exist so can't have a cache key) lfs.util().copy(original_jar_file, cached_jar_file); } return CompletableFuture.completedFuture(Validation.success(cached_jar_file.toString())); } catch (Throwable e) { return CompletableFuture.completedFuture( Validation.fail(SharedErrorUtils.buildErrorMessage(handler_for_errors, msg_for_errors, SharedErrorUtils.getLongForm(SharedErrorUtils.SHARED_LIBRARY_NAME_NOT_FOUND, e, library_bean.path_name())))); } }
From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java
License:Apache License
@Test public void test_localFileNotPresent() throws InterruptedException, ExecutionException, UnsupportedFileSystemException { final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration()); final String local_cached_dir = Optional.of(_globals.local_cached_jar_dir()) .map(dir -> dir.replace(File.separator, "/")).map(dir -> dir.endsWith("/") ? dir : (dir + "/")) .get();// www.ja va2 s . co m final String expected_cache_name = local_cached_dir + "test1.cache.jar"; final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name)); // Just make sure we've deleted the old file try { new File(expected_cache_name).delete(); } catch (Exception e) { } assertTrue("Remote file exists", new File(_test_file_path).exists()); assertFalse("Local file doesn't exist", new File(expected_cache_name).exists()); final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "test1").done() .get(); final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar( _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "test1", new TestMessageBean()).get(); assertEquals(expected_cache_path.toString(), ret_val_1.success()); assertTrue("Local file now exists", new File(expected_cache_name).exists()); }
From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java
License:Apache License
@Test public void test_localFilePresentButOld() throws InterruptedException, ExecutionException, AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, IOException { final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration()); String java_name = _globals.local_cached_jar_dir() + File.separator + "testX.cache.jar"; final String expected_cache_name = java_name.replace(File.separator, "/"); final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name)); // Just make sure we've deleted the old file try {/*from www . j a va 2 s.c o m*/ System.out.println("Deleted: " + new File(java_name).delete()); } catch (Exception e) { fail("Misc Error: " + e); } assertTrue("Remote file exists", new File(_test_file_path).exists()); assertFalse("Local file doesn't exist: " + java_name, new File(java_name).exists()); // Now create the file localfs.create(expected_cache_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)); localfs.setTimes(expected_cache_path, 0L, 0L); // check something has happened: assertEquals(0L, localfs.getFileStatus(expected_cache_path).getModificationTime()); assertNotEquals(0L, _test_file_time); // Now run the test routine final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "testX").done() .get(); final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar( _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "testX", new TestMessageBean()).get(); assertEquals(expected_cache_path.toString(), ret_val_1.success()); assertTrue("Local file still exists", new File(expected_cache_name).exists()); assertTrue("File time should have been updated", localfs.getFileStatus(expected_cache_path).getModificationTime() >= _test_file_time); }
From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java
License:Apache License
@Test public void test_localFilePresentAndNew() throws InterruptedException, ExecutionException, AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, IOException { final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration()); final String expected_cache_name = _globals.local_cached_jar_dir().replace(File.separator, "/") + "test1.cache.jar"; final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name)); // Just make sure we've deleted the old file try {/* w w w . ja v a 2 s .com*/ new File(expected_cache_name).delete(); } catch (Exception e) { } assertTrue("Remote file exists", new File(_test_file_path).exists()); assertFalse("Local file doesn't exist", new File(expected_cache_name).exists()); // Now create the file localfs.create(expected_cache_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)); localfs.setTimes(expected_cache_path, _test_file_time + 10000, _test_file_time + 10000); // check something has happened: assertEquals(_test_file_time + 10000, localfs.getFileStatus(expected_cache_path).getModificationTime()); // Now run the test routine final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "test1").done() .get(); final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar( _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "test1", new TestMessageBean()).get(); assertEquals(expected_cache_path.toString(), ret_val_1.success()); assertTrue("Local file still exists", new File(expected_cache_name).exists()); assertEquals(localfs.getFileStatus(expected_cache_path).getModificationTime(), _test_file_time + 10000); }
From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java
License:Apache License
protected static void copyFile(final String binary_id, final String path, final IStorageService aleph2_fs, final GridFS share_fs) throws IOException { try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { final GridFSDBFile file = share_fs.find(new ObjectId(binary_id)); file.writeTo(out);// w w w . j ava2 s.com final FileContext fs = aleph2_fs.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get(); final Path file_path = fs.makeQualified(new Path(path)); try (FSDataOutputStream outer = fs.create(file_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), org.apache.hadoop.fs.Options.CreateOpts.createParent())) { outer.write(out.toByteArray()); } } }
From source file:com.ikanow.aleph2.management_db.services.SharedLibraryCrudService.java
License:Apache License
@Override public ManagementFuture<Boolean> deleteObjectBySpec(QueryComponent<SharedLibraryBean> unique_spec) { return FutureUtils.createManagementFuture( _underlying_library_db.get().getObjectBySpec(unique_spec).thenCompose(lib -> { if (lib.isPresent()) { try { final FileContext fs = _storage_service.get() .getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get(); fs.delete(fs.makeQualified(new Path(lib.get().path_name())), false); } catch (Exception e) { // i suppose we don't really care if it fails.. // (maybe add a message?) //DEBUG //e.printStackTrace(); }/*from ww w . ja v a 2 s.c o m*/ return _underlying_library_db.get().deleteObjectBySpec(unique_spec); } else { return CompletableFuture.completedFuture(false); } })); }
From source file:com.ikanow.aleph2.remote.hdfs_test.SimpleHdfsTest.java
License:Apache License
public void runTest() throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IllegalArgumentException, IOException { final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator; final IStorageService storage = _service_context.getStorageService(); final FileContext fc = (FileContext) storage .getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get(); final FileContext lfc = (FileContext) storage .getUnderlyingPlatformDriver(FileContext.class, IStorageService.LOCAL_FS).get(); System.out.println("FILES IN BUCKET ROOT"); final RemoteIterator<LocatedFileStatus> it = fc.util().listFiles(new Path(storage.getBucketRootPath()), true);/*from ww w .j a va 2 s . c o m*/ boolean first = true; while (it.hasNext()) { final LocatedFileStatus lfs = it.next(); if (first) { first = false; lfc.util().copy(lfs.getPath(), lfc.makeQualified(new Path(temp_dir + "ALEX.txt"))); } System.out.println(lfs); } System.out.println("FILES/DIRS IN BUCKET ROOT"); Stream<FileStatus> dirstream = Arrays.stream(fc.util().listStatus(new Path(storage.getBucketRootPath()))); dirstream.forEach(fs -> System.out.println(fs)); }
From source file:org.apache.solr.hadoop.hack.MiniMRYarnCluster.java
License:Apache License
@Override public void serviceInit(Configuration conf) throws Exception { conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) { conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), "apps_staging_dir/").getAbsolutePath()); }// w w w . j a va2s.c om // By default, VMEM monitoring disabled, PMEM monitoring enabled. if (!conf.getBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) { conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); } conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000"); try { Path stagingPath = FileContext.getFileContext(conf) .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR))); /* * Re-configure the staging path on Windows if the file system is localFs. * We need to use a absolute path that contains the drive letter. The unit * test could run on a different drive than the AM. We can run into the * issue that job files are localized to the drive where the test runs on, * while the AM starts on a different drive and fails to find the job * metafiles. Using absolute path can avoid this ambiguity. */ if (Path.WINDOWS) { if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) { conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath()); } } FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf); if (fc.util().exists(stagingPath)) { LOG.info(stagingPath + " exists! deleting..."); fc.delete(stagingPath, true); } LOG.info("mkdir: " + stagingPath); //mkdir the staging directory so that right permissions are set while running as proxy user fc.mkdir(stagingPath, null, true); //mkdir done directory as well String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf); Path doneDirPath = fc.makeQualified(new Path(doneDir)); fc.mkdir(doneDirPath, null, true); } catch (IOException e) { throw new YarnRuntimeException("Could not create staging directory. ", e); } conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of // which shuffle doesn't happen //configure the shuffle service in NM conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID }); conf.setClass(String.format(Locale.ENGLISH, YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class); // Non-standard shuffle port conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0); conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class); // TestMRJobs is for testing non-uberized operation only; see TestUberAM // for corresponding uberized tests. conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); super.serviceInit(conf); }