List of usage examples for org.apache.hadoop.fs FileSystem getHomeDirectory
public Path getHomeDirectory()
From source file:org.hdl.caffe.yarn.app.Client.java
License:Apache License
private String copyLocalFileToDfs(FileSystem fs, String appId, String srcFilePath, String dstFileName) throws IOException { String suffix = CaffeYarnConstants.APP_NAME + "/" + appId + "/" + dstFileName; Path dst = new Path(fs.getHomeDirectory(), suffix); if (srcFilePath != null) { fs.copyFromLocalFile(new Path(srcFilePath), dst); }/*from w ww.j a va2s. c om*/ LOG.info("Copy " + srcFilePath + " to " + dst.toString()); return dst.toString(); }
From source file:org.hdl.tensorflow.yarn.util.Utils.java
License:Apache License
public static Path copyLocalFileToDfs(FileSystem fs, String appId, Path srcPath, String dstFileName) throws IOException { Path dstPath = new Path(fs.getHomeDirectory(), Constants.DEFAULT_APP_NAME + Path.SEPARATOR + appId + Path.SEPARATOR + dstFileName); LOG.info("Copying " + srcPath + " to " + dstPath); fs.copyFromLocalFile(srcPath, dstPath); return dstPath; }
From source file:org.janusgraph.hadoop.config.job.AbstractDistCacheConfigurer.java
License:Apache License
protected Path uploadFileIfNecessary(FileSystem localFS, Path localPath, FileSystem destFS) throws IOException { // Fast path for local FS -- DistributedCache + local JobRunner seems copy/link files automatically if (destFS.equals(localFS)) { log.debug("Skipping file upload for {} (destination filesystem {} equals local filesystem)", localPath, destFS);// w ww .jav a 2s. c om return localPath; } Path destPath = new Path(destFS.getHomeDirectory() + "/" + HDFS_TMP_LIB_DIR + "/" + localPath.getName()); Stats fileStats = null; try { fileStats = compareModtimes(localFS, localPath, destFS, destPath); } catch (IOException e) { log.warn("Unable to read or stat file: localPath={}, destPath={}, destFS={}", localPath, destPath, destFS); } if (fileStats != null && !fileStats.isRemoteCopyCurrent()) { log.debug("Copying {} to {}", localPath, destPath); destFS.copyFromLocalFile(localPath, destPath); if (null != fileStats.local) { final long mtime = fileStats.local.getModificationTime(); log.debug("Setting modtime on {} to {}", destPath, mtime); destFS.setTimes(destPath, mtime, -1); // -1 means leave atime alone } } return destPath; }
From source file:org.jwebsocket.plugins.filesystem.FileSystemPlugIn.java
License:Open Source License
private void saveToHDFS(WebSocketConnector aConnector, Token aToken) throws IOException, InterruptedException { if (mLog.isDebugEnabled()) { mLog.debug("Processing 'save to HDFS'..."); }/*w w w . jav a 2s.c o m*/ Configuration conf = new Configuration(); conf.setClassLoader(JWebSocketXmlConfigInitializer.getClassLoader()); conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"); FileSystem fs = FileSystem.get(URI.create("hdfs://211.189.127.45:8020"), conf, "yarn"); mLog.debug("setup HDFS complete."); TokenServer lServer = getServer(); String lMsg; // check if user is allowed to run 'save' command if (!SecurityFactory.hasRight(lServer.getUsername(aConnector), NS_FILESYSTEM + ".save")) { if (mLog.isDebugEnabled()) { mLog.debug("Returning 'Access denied'..."); } lServer.sendToken(aConnector, lServer.createAccessDenied(aToken)); return; } // instantiate response token Token lResponse = lServer.createResponse(aToken); // obtain required parameters for file load operation String lFilename = aToken.getString("filename"); String lScope = aToken.getString("scope", JWebSocketCommonConstants.SCOPE_PRIVATE); // scope may be "private" or "public" String lBaseDir; if (JWebSocketCommonConstants.SCOPE_PRIVATE.equals(lScope)) { String lUsername = getUsername(aConnector); lBaseDir = getString(PRIVATE_DIR_KEY, PRIVATE_DIR_DEF); if (lUsername != null) { lBaseDir = JWebSocketConfig.expandEnvAndJWebSocketVars(lBaseDir).replace("{username}", lUsername); } else { lMsg = "not authenticated to save private file"; if (mLog.isDebugEnabled()) { mLog.debug(lMsg); } lResponse.setInteger("code", -1); lResponse.setString("msg", lMsg); // send error response to requester lServer.sendToken(aConnector, lResponse); return; } } else if (JWebSocketCommonConstants.SCOPE_PUBLIC.equals(lScope)) { lBaseDir = JWebSocketConfig.expandEnvAndJWebSocketVars(getString(PUBLIC_DIR_KEY, PUBLIC_DIR_DEF)); } else { lMsg = "invalid scope"; if (mLog.isDebugEnabled()) { mLog.debug(lMsg); } lResponse.setInteger("code", -1); lResponse.setString("msg", lMsg); // send error response to requester lServer.sendToken(aConnector, lResponse); return; } Boolean lNotify = aToken.getBoolean("notify", false); String lData = aToken.getString("data"); String lEncoding = aToken.getString("encoding", "base64"); byte[] lBA = null; try { if ("base64".equals(lEncoding)) { int lIdx = lData.indexOf(','); if (lIdx >= 0) { lData = lData.substring(lIdx + 1); } lBA = Base64.decodeBase64(lData); } else { lBA = lData.getBytes("UTF-8"); } } catch (Exception lEx) { mLog.error(Logging.getSimpleExceptionMessage(lEx, "saving file")); } // complete the response token String lFullPath = lBaseDir + lFilename; File lFile = new File(lFullPath); try { // prevent two threads at a time writing to the same file synchronized (this) { // force create folder if not yet exists File lDir = new File(FilenameUtils.getFullPath(lFullPath)); FileUtils.forceMkdir(lDir); if (lBA != null) { FileUtils.writeByteArrayToFile(lFile, lBA); OutputSupplier<? extends OutputStream> os = (OutputSupplier<? extends OutputStream>) fs .create(new Path(fs.getHomeDirectory(), lFilename), true); ByteStreams.write(lBA, os); } else { FileUtils.writeStringToFile(lFile, lData, "UTF-8"); } } } catch (Exception lEx) { lResponse.setInteger("code", -1); lMsg = lEx.getClass().getSimpleName() + " on save: " + lEx.getMessage(); lResponse.setString("msg", lMsg); mLog.error(lMsg); } // send response to requester lServer.sendToken(aConnector, lResponse); // send notification event to other affected clients // to allow to update their content (if desired) if (lNotify) { // create token of type "event" Token lEvent = TokenFactory.createToken(BaseToken.TT_EVENT); // include name space of this plug-in lEvent.setNS(NS_FILESYSTEM); lEvent.setString("name", "filesaved"); lEvent.setString("filename", lFilename); lEvent.setString("sourceId", aConnector.getId()); lEvent.setString("url", getString(WEB_ROOT_KEY, WEB_ROOT_DEF) + lFilename); // TODO: Limit notification to desired scope lServer.broadcastToken(lEvent); } }
From source file:org.kitesdk.minicluster.HBaseService.java
License:Apache License
/** * Configure the HBase cluster before launching it * /*from w w w . j av a 2 s .c om*/ * @param config * already created Hadoop configuration we'll further configure for * HDFS * @param zkClientPort * The client port zookeeper is listening on * @param hdfsFs * The HDFS FileSystem this HBase cluster will run on top of * @param bindIP * The IP Address to force bind all sockets on. If null, will use * defaults * @param masterPort * The port the master listens on * @param regionserverPort * The port the regionserver listens on * @return The updated Configuration object. * @throws IOException */ private static Configuration configureHBaseCluster(Configuration config, int zkClientPort, FileSystem hdfsFs, String bindIP, int masterPort, int regionserverPort) throws IOException { // Configure the zookeeper port config.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zkClientPort)); // Initialize HDFS path configurations required by HBase Path hbaseDir = new Path(hdfsFs.makeQualified(hdfsFs.getHomeDirectory()), "hbase"); FSUtils.setRootDir(config, hbaseDir); hdfsFs.mkdirs(hbaseDir); config.set("fs.defaultFS", hdfsFs.getUri().toString()); config.set("fs.default.name", hdfsFs.getUri().toString()); FSUtils.setVersion(hdfsFs, hbaseDir); // Configure the bind addresses and ports. If running in Openshift, we only // have permission to bind to the private IP address, accessible through an // environment variable. logger.info("HBase force binding to ip: " + bindIP); config.set("hbase.master.ipc.address", bindIP); config.set(HConstants.MASTER_PORT, Integer.toString(masterPort)); config.set("hbase.regionserver.ipc.address", bindIP); config.set(HConstants.REGIONSERVER_PORT, Integer.toString(regionserverPort)); config.set(HConstants.ZOOKEEPER_QUORUM, bindIP); // By default, the HBase master and regionservers will report to zookeeper // that its hostname is what it determines by reverse DNS lookup, and not // what we use as the bind address. This means when we set the bind // address, daemons won't actually be able to connect to eachother if they // are different. Here, we do something that's illegal in 48 states - use // reflection to override a private static final field in the DNS class // that is a cachedHostname. This way, we are forcing the hostname that // reverse dns finds. This may not be compatible with newer versions of // Hadoop. try { Field cachedHostname = DNS.class.getDeclaredField("cachedHostname"); cachedHostname.setAccessible(true); Field modifiersField = Field.class.getDeclaredField("modifiers"); modifiersField.setAccessible(true); modifiersField.setInt(cachedHostname, cachedHostname.getModifiers() & ~Modifier.FINAL); cachedHostname.set(null, bindIP); } catch (Exception e) { // Reflection can throw so many checked exceptions. Let's wrap in an // IOException. throw new IOException(e); } // By setting the info ports to -1 for, we won't launch the master or // regionserver info web interfaces config.set(HConstants.MASTER_INFO_PORT, "-1"); config.set(HConstants.REGIONSERVER_INFO_PORT, "-1"); return config; }
From source file:org.lilyproject.hadooptestfw.fork.HBaseTestingUtility.java
License:Apache License
/** * Creates an hbase rootdir in user home directory. Also creates hbase * version file. Normally you won't make use of this method. Root hbasedir * is created for you as part of mini cluster startup. You'd only use this * method if you were doing manual operation. * * @return Fully qualified path to hbase root dir *///from ww w . ja v a 2s. c om public Path createRootDir() throws IOException { FileSystem fs = FileSystem.get(this.conf); // Lily change: create "hbase" subdirectory under home directory // to serve as hbaseRootdir. The home directory can contain other // directories and files, which are not necessarily hbase tables. // For instance a 'target' dir created by MiniMRCluster. // Cfr. HBASE-5317 and HBASE-4025 Path hbaseRootdir = fs.makeQualified(new Path(fs.getHomeDirectory(), "hbase")); this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString()); fs.mkdirs(hbaseRootdir); FSUtils.setVersion(fs, hbaseRootdir); return hbaseRootdir; }
From source file:org.lilyproject.testfw.HadoopLauncher.java
License:Apache License
public MiniHBaseCluster startMiniCluster(final int servers) throws Exception { // Make a new random dir to home everything in. Set it as system property. // minidfs reads home from system property. this.clusterTestBuildDir = setupClusterTestBuildDir(); System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath()); // Bring up mini dfs cluster. This spews a bunch of warnings about missing // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'. startMiniDFSCluster(servers, this.clusterTestBuildDir); // Mangle conf so fs parameter points to minidfs we just started up FileSystem fs = this.dfsCluster.getFileSystem(); this.conf.set("fs.defaultFS", fs.getUri().toString()); // Do old style too just to be safe. this.conf.set("fs.default.name", fs.getUri().toString()); this.dfsCluster.waitClusterUp(); // Start up a zk cluster. if (this.zkCluster == null) { startMiniZKCluster(this.clusterTestBuildDir); }/*from w w w .jav a 2s . co m*/ // Now do the mini hbase cluster. Set the hbase.rootdir in config. Path hbaseRootdir = fs.makeQualified(fs.getHomeDirectory()); this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString()); fs.mkdirs(hbaseRootdir); FSUtils.setVersion(fs, hbaseRootdir); Configuration c = new Configuration(this.conf); this.hbaseCluster = new MiniHBaseCluster(c, servers); // Don't leave here till we've done a successful scan of the .META. HTable t = new HTable(c, HConstants.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) continue; return this.hbaseCluster; }
From source file:org.moya.core.yarn.Client.java
License:Apache License
/** * Main run function for the client/*from www.j a v a2 s . c o m*/ * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask // if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource // manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of // the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); Path src = new Path(appMasterJar); String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar"; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); // Set the type of resource - file or archive // archives are untarred at destination // we don't need the jar file to be untarred amJarRsrc.setType(LocalResourceType.FILE); // Set visibility of the resource // Setting to most private option amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); // Set the resource to be copied over amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); // Set timestamp and length of file so that the framework // can do basic sanity checks for the local resource // after it has been copied over to ensure it is the same // resource the client intended to use with the application amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put("AppMaster.jar", amJarRsrc); // Setup App Master Constants String amJarLocation = ""; long amJarLen = 0; long amJarTimestamp = 0; // adding info so we can add the jar to the App master container path amJarLocation = dst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(dst); amJarLen = shellFileStatus.getLen(); amJarTimestamp = shellFileStatus.getModificationTime(); // ADD libs needed that will be untared // Keep it all archived for now so add it as a file... src = new Path(localLibJar); pathSuffix = appName + "/" + appId.getId() + "/Runnable.jar"; dst = new Path(fs.getHomeDirectory(), pathSuffix); fs.copyFromLocalFile(false, true, src, dst); destStatus = fs.getFileStatus(dst); LocalResource libsJarRsrc = Records.newRecord(LocalResource.class); libsJarRsrc.setType(LocalResourceType.FILE); libsJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); libsJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); libsJarRsrc.setTimestamp(destStatus.getModificationTime()); localResources.put("Runnable.jar", libsJarRsrc); // Setup Libs Constants String libsLocation = ""; long libsLen = 0; long libsTimestamp = 0; // adding info so we can add the jar to the App master container path libsLocation = dst.toUri().toString(); FileStatus libsFileStatus = fs.getFileStatus(dst); libsLen = libsFileStatus.getLen(); libsTimestamp = libsFileStatus.getModificationTime(); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the env variables to be setup in the env where the application // master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put the AM jar into env and MOYA Runnable // using the env info, the application master will create the correct // local resource for the // eventual containers that will be launched to execute the shell // scripts env.put(MConstants.APPLICATIONMASTERJARLOCATION, amJarLocation); env.put(MConstants.APPLICATIONMASTERJARTIMESTAMP, Long.toString(amJarTimestamp)); env.put(MConstants.APPLICATIONMASTERJARLEN, Long.toString(amJarLen)); env.put(MConstants.LIBSLOCATION, libsLocation); env.put(MConstants.LIBSTIMESTAMP, Long.toString(libsTimestamp)); env.put(MConstants.LIBSLEN, Long.toString(libsLen)); env.put(MConstants.ZOOKEEPERHOSTS, ZKHosts); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(moyaPriority)); if (!localLibJar.isEmpty()) { vargs.add("--lib " + localLibJar + ""); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // The following are not required for launching an application master // amContainer.setContainerId(containerId); appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = // applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on // success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application return monitorApplication(appId); }
From source file:org.mrgeo.hdfs.utils.HadoopFileUtils.java
License:Apache License
/** * Returns a tmp directory, if the tmp directory doesn't exist it is created. * * @return//from w w w. j a v a 2 s . c o m * @throws IOException */ public static Path getTempDir(final Configuration conf) throws IOException { final FileSystem fs = getFileSystem(conf); Path parent; parent = fs.getHomeDirectory(); final Path tmp = new Path(parent, "tmp"); if (!fs.exists(tmp)) { fs.mkdirs(tmp); } return tmp; }
From source file:org.opencb.opencga.storage.hadoop.variant.HadoopVariantStorageTest.java
License:Apache License
static StorageConfiguration updateStorageConfiguration(StorageConfiguration storageConfiguration, Configuration conf) throws IOException { storageConfiguration.setDefaultStorageEngineId(HadoopVariantStorageEngine.STORAGE_ENGINE_ID); StorageEtlConfiguration variantConfiguration = storageConfiguration .getStorageEngine(HadoopVariantStorageEngine.STORAGE_ENGINE_ID).getVariant(); ObjectMap options = variantConfiguration.getOptions(); options.put(HadoopVariantStorageEngine.EXTERNAL_MR_EXECUTOR, TestMRExecutor.class); TestMRExecutor.setStaticConfiguration(conf); options.put(GenomeHelper.CONFIG_HBASE_ADD_DEPENDENCY_JARS, false); EnumSet<Compression.Algorithm> supportedAlgorithms = EnumSet.of(Compression.Algorithm.NONE, HBaseTestingUtility.getSupportedCompressionAlgorithms()); options.put(ArchiveDriver.CONFIG_ARCHIVE_TABLE_COMPRESSION, supportedAlgorithms.contains(Compression.Algorithm.GZ) ? Compression.Algorithm.GZ.getName() : Compression.Algorithm.NONE.getName()); options.put(VariantTableDriver.CONFIG_VARIANT_TABLE_COMPRESSION, supportedAlgorithms.contains(Compression.Algorithm.SNAPPY) ? Compression.Algorithm.SNAPPY.getName() : Compression.Algorithm.NONE.getName()); FileSystem fs = FileSystem.get(HadoopVariantStorageTest.configuration.get()); String intermediateDirectory = fs.getHomeDirectory().toUri().resolve("opencga_test/").toString(); System.out.println(HadoopVariantStorageEngine.OPENCGA_STORAGE_HADOOP_INTERMEDIATE_HDFS_DIRECTORY + " = " + intermediateDirectory);// w w w.ja v a2 s . com options.put(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); options.put(HadoopVariantStorageEngine.OPENCGA_STORAGE_HADOOP_INTERMEDIATE_HDFS_DIRECTORY, intermediateDirectory); options.put(ArchiveDriver.CONFIG_ARCHIVE_TABLE_PRESPLIT_SIZE, 5); options.put(AbstractVariantTableDriver.CONFIG_VARIANT_TABLE_PRESPLIT_SIZE, 5); variantConfiguration.getDatabase().setHosts(Collections.singletonList( "hbase://" + HadoopVariantStorageTest.configuration.get().get(HConstants.ZOOKEEPER_QUORUM))); return storageConfiguration; }