List of usage examples for org.apache.hadoop.hdfs HdfsConfiguration HdfsConfiguration
public HdfsConfiguration(Configuration conf)
From source file:co.cask.cdap.operations.hdfs.HDFSInfo.java
License:Apache License
@Nullable private URL getHAWebURL() throws IOException { String activeNamenode = null; String nameService = getNameService(); HdfsConfiguration hdfsConf = new HdfsConfiguration(conf); String nameNodePrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""); hdfsConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal); for (String nnId : DFSUtil.getNameNodeIds(conf, nameService)) { HAServiceTarget haServiceTarget = new NNHAServiceTarget(hdfsConf, nameService, nnId); HAServiceProtocol proxy = haServiceTarget.getProxy(hdfsConf, 10000); HAServiceStatus serviceStatus = proxy.getServiceStatus(); if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) { continue; }//from w w w .ja v a2 s . com activeNamenode = DFSUtil.getNamenodeServiceAddr(hdfsConf, nameService, nnId); } if (activeNamenode == null) { throw new IllegalStateException("Could not find an active namenode"); } return rpcToHttpAddress(URI.create(activeNamenode)); }
From source file:com.mellanox.r4h.MiniDFSCluster.java
License:Apache License
/** * Modify the config and start up additional DataNodes. The info port for * DataNodes is guaranteed to use a free port. * //from www.ja v a 2 s.c o m * Data nodes can run with the name node in the mini cluster or * a real name node. For example, running with a real name node is useful * when running simulated data nodes with a real name node. * If minicluster's name node is null assume that the conf has been * set with the right address:port of the name node. * * @param conf * the base configuration to use in starting the DataNodes. This * will be modified as necessary. * @param numDataNodes * Number of DataNodes to start; may be zero * @param manageDfsDirs * if true, the data directories for DataNodes will be * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation * the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * @param racks * array of strings indicating the rack that each DataNode is on * @param hosts * array of strings indicating the hostnames for each DataNode * @param simulatedCapacities * array of capacities of the simulated data nodes * @param setupHostsFile * add new nodes to dfs hosts files * @param checkDataNodeAddrConfig * if true, only set DataNode port addresses if not already set in config * @param checkDataNodeHostConfig * if true, only set DataNode hostname key if not already set in config * @param dnConfOverlays * An array of {@link Configuration} objects that will overlay the * global MiniDFSCluster Configuration for the corresponding DataNode. * @throws IllegalStateException * if NameNode has been shutdown */ public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException { if (operation == StartupOption.RECOVER) { return; } if (checkDataNodeHostConfig) { conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); } else { conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); } int curDatanodesNum = dataNodes.size(); // for mincluster's the default initialDelay for BRs is 0 if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); } // If minicluster's name node is null assume that the conf has been // set with the right address:port of the name node. // if (racks != null && numDataNodes > racks.length) { throw new IllegalArgumentException("The length of racks [" + racks.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (hosts != null && numDataNodes > hosts.length) { throw new IllegalArgumentException("The length of hosts [" + hosts.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } // Generate some hostnames if required if (racks != null && hosts == null) { hosts = new String[numDataNodes]; for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { hosts[i - curDatanodesNum] = "host" + i + ".foo.com"; } } if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) { throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) { throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null : new String[] { operation.getName() }; for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { Configuration dnConf = new HdfsConfiguration(conf); if (dnConfOverlays != null) { dnConf.addResource(dnConfOverlays[i]); } // Set up datanode address setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig); if (manageDfsDirs) { String dirs = makeDataNodeDirs(i, storageType); dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); } if (simulatedCapacities != null) { SimulatedFSDataset.setFactory(dnConf); dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i - curDatanodesNum]); } LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); if (hosts != null) { dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]); LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY)); } if (racks != null) { String name = hosts[i - curDatanodesNum]; LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]); StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]); } Configuration newconf = new HdfsConfiguration(dnConf); // save config if (hosts != null) { NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); } SecureResources secureResources = null; if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) { try { secureResources = SecureDataNodeStarter.getSecureResources(dnConf); } catch (Exception ex) { ex.printStackTrace(); } } final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT); int numRetries = 0; DataNode dn = null; while (true) { try { dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources); break; } catch (IOException e) { // Work around issue testing security where rapidly starting multiple // DataNodes using the same principal gets rejected by the KDC as a // replay attack. if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) { try { Thread.sleep(1000); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); break; } ++numRetries; continue; } throw e; } } if (dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); // since the HDFS does things based on host|ip:port, we need to add the // mapping for the service to rackId String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString(); if (racks != null) { LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]); StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]); } dn.runDatanodeDaemon(); dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort())); } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; waitActive(); }
From source file:com.mellanox.r4h.MiniDFSCluster.java
License:Apache License
/** * Restart a datanode, on the same port if requested * //from w w w . j av a 2 s . co m * @param dnprop * the datanode to restart * @param keepPort * whether to use the same port * @return true if restarting is successful * @throws IOException */ public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException { Configuration conf = dnprop.conf; String[] args = dnprop.dnArgs; SecureResources secureResources = dnprop.secureResources; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getXferAddress(); conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); } DataNode newDn = DataNode.createDataNode(args, conf, secureResources); dataNodes.add(new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort())); numDataNodes++; return true; }
From source file:com.wandisco.s3hdfs.proxy.S3HdfsProxy.java
License:Apache License
public static void main(String[] args) throws IOException { Configuration conf = new HdfsConfiguration(new S3HdfsConfiguration()); startInstance(Integer.decode(conf.get(S3_PROXY_PORT_KEY, S3_PROXY_PORT_DEFAULT)), conf.get(DFS_NAMENODE_HTTP_PORT_KEY, String.valueOf(DFS_NAMENODE_HTTP_PORT_DEFAULT)), conf.get(S3_SERVICE_HOSTNAME_KEY, S3_SERVICE_HOSTNAME_DEFAULT), Integer.decode((conf.get(S3_MAX_CONNECTIONS_KEY, S3_MAX_CONNECTIONS_DEFAULT)))); }
From source file:com.wandisco.s3hdfs.rewrite.filter.TestBase.java
License:Apache License
/** * @throws java.lang.Exception/*from w w w. j av a 2 s .co m*/ */ @Before public void setUp() throws Exception { Configuration conf = new HdfsConfiguration(new S3HdfsConfiguration()); conf.setInt(S3_PROXY_PORT_KEY, PROXY_PORT); conf.setBoolean(DFS_WEBHDFS_ENABLED_KEY, true); conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100); conf.setLong(DFS_BLOCK_SIZE_KEY, 1024); conf.setLong(DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 512); // ^ has to be a multiple of 512 FsPermission.setUMask(conf, FsPermission.createImmutable((short) 0)); // ^ eliminate the UMask in HDFS to remove perm denied exceptions in s3Dir hostName = conf.get(S3_SERVICE_HOSTNAME_KEY); System.out.println("S3HDFS ServiceHostName: " + hostName); s3Directory = conf.get(S3_DIRECTORY_KEY); cluster = new MiniDFSCluster.Builder(conf).nameNodeHttpPort(HTTP_PORT).numDataNodes(3).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); //initialize s3 directory Path s3Path = new Path(s3Directory); assertTrue(hdfs.mkdirs(s3Path)); testUtil = new S3HdfsTestUtil(hdfs, s3Directory); s3Service = testUtil.configureS3Service(hostName, PROXY_PORT); }
From source file:com.wandisco.s3hdfs.rewrite.filter.TestConcurrency.java
License:Apache License
public static void main(String[] args) throws Exception { Configuration conf = new HdfsConfiguration(new S3HdfsConfiguration()); DistributedFileSystem hdfs = (DistributedFileSystem) DistributedFileSystem.get(conf); PROXY_PORT = Integer.decode(conf.get(S3_PROXY_PORT_KEY, S3_PROXY_PORT_DEFAULT)); TestConcurrency test = new TestConcurrency(); test.hdfs = hdfs;/*ww w. ja v a 2 s.c o m*/ test.s3Directory = conf.get(S3_DIRECTORY_KEY); test.hostName = conf.get(S3_SERVICE_HOSTNAME_KEY); test.testUtil = new S3HdfsTestUtil(test.hdfs, test.s3Directory); test.s3Service = test.testUtil.configureS3Service(test.hostName, PROXY_PORT); test.testFiveRandom(); hdfs.close(); }
From source file:org.apache.slider.providers.slideram.SliderAMProviderService.java
License:Apache License
@Override public void applyInitialRegistryDefinitions(URL amWebURI, URL agentOpsURI, URL agentStatusURI, ServiceRecord serviceRecord) throws IOException { super.applyInitialRegistryDefinitions(amWebURI, agentOpsURI, agentStatusURI, serviceRecord); // now publish site.xml files YarnConfiguration defaultYarnConfig = new YarnConfiguration(); amState.getPublishedSliderConfigurations().put(PublishedArtifacts.COMPLETE_CONFIG, new PublishedConfiguration("Complete slider application settings", getConfig(), getConfig())); amState.getPublishedSliderConfigurations().put(PublishedArtifacts.YARN_SITE_CONFIG, new PublishedConfiguration("YARN site settings", ConfigHelper.loadFromResource("yarn-site.xml"), defaultYarnConfig)); amState.getPublishedSliderConfigurations().put(PublishedArtifacts.CORE_SITE_CONFIG, new PublishedConfiguration("Core site settings", ConfigHelper.loadFromResource("core-site.xml"), defaultYarnConfig)); amState.getPublishedSliderConfigurations().put(PublishedArtifacts.HDFS_SITE_CONFIG, new PublishedConfiguration("HDFS site settings", ConfigHelper.loadFromResource("hdfs-site.xml"), new HdfsConfiguration(true))); try {//from w w w . ja v a 2 s . com URL managementAPI = new URL(amWebURI, SLIDER_PATH_MANAGEMENT); URL registryREST = new URL(amWebURI, SLIDER_PATH_REGISTRY); URL publisherURL = new URL(amWebURI, SLIDER_PATH_PUBLISHER); // Set the configurations URL. String configurationsURL = SliderUtils.appendToURL(publisherURL.toExternalForm(), RestPaths.SLIDER_CONFIGSET); String exportsURL = SliderUtils.appendToURL(publisherURL.toExternalForm(), RestPaths.SLIDER_EXPORTS); serviceRecord.addExternalEndpoint( RegistryTypeUtils.webEndpoint(CustomRegistryConstants.WEB_UI, amWebURI.toURI())); serviceRecord.addExternalEndpoint(RegistryTypeUtils .restEndpoint(CustomRegistryConstants.MANAGEMENT_REST_API, managementAPI.toURI())); serviceRecord.addExternalEndpoint(RegistryTypeUtils .restEndpoint(CustomRegistryConstants.PUBLISHER_REST_API, publisherURL.toURI())); serviceRecord.addExternalEndpoint(RegistryTypeUtils .restEndpoint(CustomRegistryConstants.REGISTRY_REST_API, registryREST.toURI())); serviceRecord.addExternalEndpoint(RegistryTypeUtils.restEndpoint( CustomRegistryConstants.PUBLISHER_CONFIGURATIONS_API, new URI(configurationsURL))); serviceRecord.addExternalEndpoint(RegistryTypeUtils .restEndpoint(CustomRegistryConstants.PUBLISHER_EXPORTS_API, new URI(exportsURL))); } catch (URISyntaxException e) { throw new IOException(e); } }
From source file:org.apache.tajo.TajoTestingCluster.java
License:Apache License
/** * Start a minidfscluster./*from www .j a va 2s . com*/ * Can only create one. * @param servers How many DNs to start. * @param dir Where to home your dfs cluster. * @param hosts hostnames DNs to run on. * @throws Exception * @see {@link #shutdownMiniDFSCluster()} * @return The mini dfs cluster created. * @throws java.io.IOException */ public MiniDFSCluster startMiniDFSCluster(int servers, File dir, final String hosts[]) throws IOException { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.toString()); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf)); builder.hosts(hosts); builder.numDataNodes(servers); builder.format(true); builder.manageNameDfsDirs(true); builder.manageDataDfsDirs(true); builder.waitSafeMode(true); this.dfsCluster = builder.build(); // Set this just-started cluser as our filesystem. this.defaultFS = this.dfsCluster.getFileSystem(); this.conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString()); this.conf.setVar(TajoConf.ConfVars.ROOT_DIR, defaultFS.getUri() + "/tajo"); isDFSRunning = true; return this.dfsCluster; }
From source file:org.apache.twill.internal.appmaster.ApplicationMasterMain.java
License:Apache License
/** * Starts the application master./*from w w w . ja v a 2 s . c o m*/ */ public static void main(String[] args) throws Exception { String zkConnect = System.getenv(EnvKeys.TWILL_ZK_CONNECT); File twillSpec = new File(Constants.Files.TWILL_SPEC); RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID)); ZKClientService zkClientService = createZKClient(zkConnect, System.getenv(EnvKeys.TWILL_APP_NAME)); Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration())); setRMSchedulerAddress(conf); final YarnAMClient amClient = new VersionDetectYarnAMClientFactory(conf).create(); ApplicationMasterService service = new ApplicationMasterService(runId, zkClientService, twillSpec, amClient, createAppLocation(conf)); TrackerService trackerService = new TrackerService(service); List<Service> prerequisites = Lists.newArrayList(new YarnAMClientService(amClient, trackerService), zkClientService, new AppMasterTwillZKPathService(zkClientService, runId)); // TODO: Temp fix for Kakfa issue in MapR. Will be removed when fixing TWILL-147 if (Boolean.parseBoolean(System.getProperty("twill.disable.kafka"))) { LOG.info("Log collection through kafka disabled"); } else { prerequisites.add(new ApplicationKafkaService(zkClientService, runId)); } new ApplicationMasterMain(String.format("%s/%s/kafka", zkConnect, runId.getId())).doMain(service, prerequisites.toArray(new Service[prerequisites.size()])); }
From source file:org.apache.twill.internal.container.TwillContainerMain.java
License:Apache License
/** * Main method for launching a {@link TwillContainerService} which runs * a {@link org.apache.twill.api.TwillRunnable}. *//* w w w . j ava 2s. c o m*/ public static void main(final String[] args) throws Exception { // Try to load the secure store from localized file, which AM requested RM to localize it for this container. loadSecureStore(); String zkConnectStr = System.getenv(EnvKeys.TWILL_ZK_CONNECT); File twillSpecFile = new File(Constants.Files.TWILL_SPEC); RunId appRunId = RunIds.fromString(System.getenv(EnvKeys.TWILL_APP_RUN_ID)); RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID)); String runnableName = System.getenv(EnvKeys.TWILL_RUNNABLE_NAME); int instanceId = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_ID)); int instanceCount = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_COUNT)); ZKClientService zkClientService = createZKClient(zkConnectStr, System.getenv(EnvKeys.TWILL_APP_NAME)); ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClientService); ZKClient appRunZkClient = getAppRunZKClient(zkClientService, appRunId); TwillSpecification twillSpec = loadTwillSpec(twillSpecFile); TwillRunnableSpecification runnableSpec = twillSpec.getRunnables().get(runnableName) .getRunnableSpecification(); ContainerInfo containerInfo = new EnvContainerInfo(); Arguments arguments = decodeArgs(); BasicTwillContext context = new BasicTwillContext(runId, appRunId, containerInfo.getHost(), arguments.getRunnableArguments().get(runnableName).toArray(new String[0]), arguments.getArguments().toArray(new String[0]), runnableSpec, instanceId, discoveryService, discoveryService, appRunZkClient, instanceCount, containerInfo.getMemoryMB(), containerInfo.getVirtualCores()); ZKClient containerZKClient = getContainerZKClient(zkClientService, appRunId, runnableName); Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration())); Service service = new TwillContainerService(context, containerInfo, containerZKClient, runId, runnableSpec, getClassLoader(), createAppLocation(conf)); new TwillContainerMain().doMain(service, new LogFlushService(), zkClientService, new TwillZKPathService(containerZKClient, runId)); }