List of usage examples for org.apache.hadoop.fs CommonConfigurationKeysPublic FS_DEFAULT_NAME_KEY
String FS_DEFAULT_NAME_KEY
To view the source code for org.apache.hadoop.fs CommonConfigurationKeysPublic FS_DEFAULT_NAME_KEY.
Click Source Link
From source file:co.cask.cdap.master.startup.FileSystemCheck.java
License:Apache License
@Override public void run() { String user = cConf.get(Constants.CFG_HDFS_USER); String rootPath = cConf.get(Constants.CFG_HDFS_NAMESPACE); LOG.info("Checking FileSystem availability."); Location rootLocation = locationFactory.create(rootPath); boolean rootExists; try {//www. ja v a2 s . co m rootExists = rootLocation.exists(); LOG.info(" FileSystem availability successfully verified."); if (rootExists) { if (!rootLocation.isDirectory()) { throw new RuntimeException(String.format( "%s is not a directory. Change it to a directory, or update %s to point to a different location.", rootPath, Constants.CFG_HDFS_NAMESPACE)); } } } catch (IOException e) { throw new RuntimeException(String.format( "Unable to connect to the FileSystem with %s set to %s. " + "Please check that the FileSystem is running and that the correct " + "Hadoop configuration (e.g. core-site.xml, hdfs-site.xml) " + "and Hadoop libraries are included in the CDAP Master classpath.", CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, hConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)), e); } LOG.info("Checking that user {} has permission to write to {} on the FileSystem.", user, rootPath); if (rootExists) { // try creating a tmp file to check permissions try { Location tmpFile = rootLocation.getTempFile("tmp"); if (!tmpFile.createNew()) { throw new RuntimeException(String.format( "Could not make a temp file in directory %s on the FileSystem. " + "Please check that user %s has permission to write to %s, " + "or create the directory manually with write permissions.", rootPath, user, rootPath)); } else { tmpFile.delete(); } } catch (IOException e) { throw new RuntimeException(String.format( "Could not make a temp file in directory %s on the FileSystem. " + "Please check that user %s has permission to write to %s, " + "or create the directory manually with write permissions.", rootPath, user, rootPath), e); } } else { // try creating the directory to check permissions try { if (!rootLocation.mkdirs()) { throw new RuntimeException(String.format( "Could not make directory %s on the FileSystem. " + "Please check that user %s has permission to write to %s, " + "or create the directory manually with write permissions.", rootPath, user, rootPath)); } } catch (IOException e) { throw new RuntimeException(String.format( "Could not make directory %s on the FileSystem. " + "Please check that user %s has permission to write to %s, " + "or create the directory manually with write permissions.", rootPath, user, rootPath), e); } } LOG.info(" FileSystem permissions successfully verified."); }
From source file:com.cloudera.impala.service.JniFrontend.java
License:Apache License
/** * Return an empty string if the FileSystem configured in CONF refers to a * DistributedFileSystem (the only one supported by Impala) and Impala can list the root * directory "/". Otherwise, return an error string describing the issues. *//*from w w w . j a v a2 s . com*/ private String checkFileSystem(Configuration conf) { try { FileSystem fs = FileSystem.get(CONF); if (!(fs instanceof DistributedFileSystem)) { return "Unsupported file system. Impala only supports DistributedFileSystem " + "but the configured filesystem is: " + fs.getClass().getSimpleName() + "." + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY + "(" + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ")" + " might be set incorrectly"; } } catch (IOException e) { return "couldn't retrieve FileSystem:\n" + e.getMessage(); } try { FileSystemUtil.getTotalNumVisibleFiles(new Path("/")); } catch (IOException e) { return "Could not read the HDFS root directory at " + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ". Error was: \n" + e.getMessage(); } return ""; }
From source file:com.cloudera.oryx.common.servcomp.OryxConfiguration.java
License:Open Source License
private static void configure(Configuration conf) { if (!Namespaces.isLocalComputation() || !Namespaces.isLocalData()) { File hadoopConfDir = findHadoopConfDir(); addResource(hadoopConfDir, "core-site.xml", conf); addResource(hadoopConfDir, "hdfs-site.xml", conf); addResource(hadoopConfDir, "mapred-site.xml", conf); addResource(hadoopConfDir, "yarn-site.xml", conf); String fsDefaultFS = conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); if (fsDefaultFS == null || fsDefaultFS.equals(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)) { // Standard config generated by Hadoop 2.0.x seemed to set fs.default.name instead of fs.defaultFS? conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, conf.get("fs.default.name")); }/* ww w.j a v a2s.co m*/ fixLzoCodecIssue(conf); } }
From source file:org.apache.hoya.core.build.InstanceBuilder.java
License:Apache License
public void propagateFilename() { String fsDefaultName = conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); instanceDescription.getAppConfOperations() .set(OptionKeys.SITE_XML_PREFIX + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); instanceDescription.getAppConfOperations() .set(OptionKeys.SITE_XML_PREFIX + HoyaXmlConfKeys.FS_DEFAULT_NAME_CLASSIC, fsDefaultName); }
From source file:org.apache.hoya.providers.accumulo.AccumuloClientProvider.java
License:Apache License
public void propagateClientFSBinding(Map<String, String> sitexml) throws BadConfigException { String fsDefaultName = getConf().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY); if (fsDefaultName == null) { throw new BadConfigException("Key not found in conf: {}", CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); }/* w w w . j a va2s . c om*/ sitexml.put(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); sitexml.put(HoyaXmlConfKeys.FS_DEFAULT_NAME_CLASSIC, fsDefaultName); }
From source file:org.apache.impala.service.JniFrontend.java
License:Apache License
/** * Return an empty string if the default FileSystem configured in CONF refers to a * DistributedFileSystem and Impala can list the root directory "/". Otherwise, * return an error string describing the issues. *//*from w ww . j ava 2s. com*/ private String checkFileSystem(Configuration conf) { try { FileSystem fs = FileSystem.get(CONF); if (!(fs instanceof DistributedFileSystem || fs instanceof S3AFileSystem)) { return "Currently configured default filesystem: " + fs.getClass().getSimpleName() + ". " + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY + " (" + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ")" + " is not supported."; } } catch (IOException e) { return "couldn't retrieve FileSystem:\n" + e.getMessage(); } try { FileSystemUtil.getTotalNumVisibleFiles(new Path("/")); } catch (IOException e) { return "Could not read the root directory at " + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ". Error was: \n" + e.getMessage(); } return ""; }
From source file:org.apache.slider.core.build.InstanceBuilder.java
License:Apache License
public void propagateFilename() { String fsDefaultName = conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); instanceDescription.getAppConfOperations() .set(OptionKeys.SITE_XML_PREFIX + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); instanceDescription.getAppConfOperations() .set(OptionKeys.SITE_XML_PREFIX + SliderXmlConfKeys.FS_DEFAULT_NAME_CLASSIC, fsDefaultName); }
From source file:org.apache.slider.providers.accumulo.AccumuloClientProvider.java
License:Apache License
public void propagateClientFSBinding(Map<String, String> sitexml) throws BadConfigException { String fsDefaultName = getConf().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY); if (fsDefaultName == null) { throw new BadConfigException("Key not found in conf: {}", CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); }//from w w w .ja v a 2 s. c o m sitexml.put(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); sitexml.put(SliderXmlConfKeys.FS_DEFAULT_NAME_CLASSIC, fsDefaultName); }
From source file:org.apache.tajo.master.TajoMaster.java
License:Apache License
private void checkAndInitializeSystemDirectories() throws IOException { // Get Tajo root dir this.tajoRootPath = TajoConf.getTajoRootDir(systemConf); LOG.info("Tajo Root Directory: " + tajoRootPath); // Check and Create Tajo root dir this.defaultFS = tajoRootPath.getFileSystem(systemConf); systemConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString()); LOG.info("FileSystem (" + this.defaultFS.getUri() + ") is initialized."); if (!defaultFS.exists(tajoRootPath)) { defaultFS.mkdirs(tajoRootPath, new FsPermission(TAJO_ROOT_DIR_PERMISSION)); LOG.info("Tajo Root Directory '" + tajoRootPath + "' is created."); }/*from w w w .j a va 2 s.com*/ // Check and Create system and system resource dir Path systemPath = TajoConf.getSystemDir(systemConf); if (!defaultFS.exists(systemPath)) { defaultFS.mkdirs(systemPath, new FsPermission(SYSTEM_DIR_PERMISSION)); LOG.info("System dir '" + systemPath + "' is created"); } Path systemResourcePath = TajoConf.getSystemResourceDir(systemConf); if (!defaultFS.exists(systemResourcePath)) { defaultFS.mkdirs(systemResourcePath, new FsPermission(SYSTEM_RESOURCE_DIR_PERMISSION)); LOG.info("System resource dir '" + systemResourcePath + "' is created"); } // Get Warehouse dir this.wareHousePath = TajoConf.getWarehouseDir(systemConf); LOG.info("Tajo Warehouse dir: " + wareHousePath); // Check and Create Warehouse dir if (!defaultFS.exists(wareHousePath)) { defaultFS.mkdirs(wareHousePath, new FsPermission(WAREHOUSE_DIR_PERMISSION)); LOG.info("Warehouse dir '" + wareHousePath + "' is created"); } Path stagingPath = TajoConf.getDefaultRootStagingDir(systemConf); LOG.info("Staging dir: " + wareHousePath); if (!defaultFS.exists(stagingPath)) { defaultFS.mkdirs(stagingPath, new FsPermission(STAGING_ROOTDIR_PERMISSION)); LOG.info("Staging dir '" + stagingPath + "' is created"); } }
From source file:org.apache.tajo.master.YarnContainerProxy.java
License:Apache License
public static ContainerLaunchContext createCommonContainerLaunchContext(Configuration config, String queryId, boolean isMaster) { TajoConf conf = (TajoConf) config;//from w w w.j a v a 2 s. c o m ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); try { ByteBuffer userToken = ByteBuffer .wrap(UserGroupInformation.getCurrentUser().getShortUserName().getBytes()); ctx.setTokens(userToken); } catch (IOException e) { e.printStackTrace(); } //////////////////////////////////////////////////////////////////////////// // Set the env variables to be setup //////////////////////////////////////////////////////////////////////////// LOG.info("Set the environment for the application master"); Map<String, String> environment = new HashMap<String, String>(); //String initialClassPath = getInitialClasspath(conf); environment.put(ApplicationConstants.Environment.SHELL.name(), "/bin/bash"); if (System.getenv(ApplicationConstants.Environment.JAVA_HOME.name()) != null) { environment.put(ApplicationConstants.Environment.JAVA_HOME.name(), System.getenv(ApplicationConstants.Environment.JAVA_HOME.name())); } // TODO - to be improved with org.apache.tajo.sh shell script Properties prop = System.getProperties(); if (prop.getProperty("tajo.test", "FALSE").equalsIgnoreCase("TRUE") || (System.getenv("tajo.test") != null && System.getenv("tajo.test").equalsIgnoreCase("TRUE"))) { LOG.info("tajo.test is TRUE"); environment.put(ApplicationConstants.Environment.CLASSPATH.name(), prop.getProperty("java.class.path", null)); environment.put("tajo.test", "TRUE"); } else { // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder("./"); //for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) { for (String c : YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH) { classPathEnv.append(':'); classPathEnv.append(c.trim()); } classPathEnv.append(":" + System.getenv("TAJO_BASE_CLASSPATH")); classPathEnv.append(":./log4j.properties:./*"); if (System.getenv("HADOOP_HOME") != null) { environment.put("HADOOP_HOME", System.getenv("HADOOP_HOME")); environment.put(ApplicationConstants.Environment.HADOOP_COMMON_HOME.name(), System.getenv("HADOOP_HOME")); environment.put(ApplicationConstants.Environment.HADOOP_HDFS_HOME.name(), System.getenv("HADOOP_HOME")); environment.put(ApplicationConstants.Environment.HADOOP_YARN_HOME.name(), System.getenv("HADOOP_HOME")); } if (System.getenv("TAJO_BASE_CLASSPATH") != null) { environment.put("TAJO_BASE_CLASSPATH", System.getenv("TAJO_BASE_CLASSPATH")); } environment.put(ApplicationConstants.Environment.CLASSPATH.name(), classPathEnv.toString()); } ctx.setEnvironment(environment); if (LOG.isDebugEnabled()) { LOG.debug("================================================="); for (Map.Entry<String, String> entry : environment.entrySet()) { LOG.debug(entry.getKey() + "=" + entry.getValue()); } LOG.debug("================================================="); } //////////////////////////////////////////////////////////////////////////// // Set the local resources //////////////////////////////////////////////////////////////////////////// Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("defaultFS: " + conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); try { FileSystem fs = FileSystem.get(conf); FileContext fsCtx = FileContext.getFileContext(conf); Path systemConfPath = TajoConf.getSystemConfPath(conf); if (!fs.exists(systemConfPath)) { LOG.error("system_conf.xml (" + systemConfPath.toString() + ") Not Found"); } LocalResource systemConfResource = createApplicationResource(fsCtx, systemConfPath, LocalResourceType.FILE); localResources.put(TajoConstants.SYSTEM_CONF_FILENAME, systemConfResource); ctx.setLocalResources(localResources); } catch (IOException e) { LOG.error(e.getMessage(), e); } Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>(); try { serviceData.put(PullServerAuxService.PULLSERVER_SERVICEID, PullServerAuxService.serializeMetaData(0)); } catch (IOException ioe) { LOG.error(ioe); } ctx.setServiceData(serviceData); return ctx; }