List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration()
From source file:io.fluo.cluster.WorkerApp.java
License:Apache License
public static void main(String[] args) throws ConfigurationException, Exception { AppOptions options = new AppOptions(); JCommander jcommand = new JCommander(options, args); if (options.displayHelp()) { jcommand.usage();/*from w w w . j a v a 2 s . co m*/ System.exit(-1); } Logging.init("worker", options.getFluoHome() + "/conf", "STDOUT"); File configFile = new File(options.getFluoHome() + "/conf/fluo.properties"); FluoConfiguration config = new FluoConfiguration(configFile); if (!config.hasRequiredWorkerProps()) { log.error("fluo.properties is missing required properties for worker"); System.exit(-1); } Environment env = new Environment(config); YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.addResource(new Path(options.getHadoopPrefix() + "/etc/hadoop/core-site.xml")); yarnConfig.addResource(new Path(options.getHadoopPrefix() + "/etc/hadoop/yarn-site.xml")); TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfig, env.getZookeepers()); twillRunner.startAndWait(); TwillPreparer preparer = twillRunner.prepare(new WorkerApp(options, config)); // Add any observer jars found in lib observers File observerDir = new File(options.getFluoHome() + "/lib/observers"); for (File f : observerDir.listFiles()) { String jarPath = "file:" + f.getCanonicalPath(); log.debug("Adding observer jar " + jarPath + " to YARN app"); preparer.withResources(new URI(jarPath)); } TwillController controller = preparer.start(); controller.start(); while (controller.isRunning() == false) { Thread.sleep(2000); } env.close(); System.exit(0); }
From source file:io.hops.ha.common.TestDBLimites.java
License:Apache License
@Before public void setup() throws IOException { try {/*from ww w. j av a 2 s .c o m*/ LOG.info("Setting up Factories"); Configuration conf = new YarnConfiguration(); conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "4000"); YarnAPIStorageFactory.setConfiguration(conf); RMStorageFactory.setConfiguration(conf); RMStorageFactory.getConnector().formatStorage(); } catch (StorageInitializtionException ex) { LOG.error(ex); } catch (StorageException ex) { LOG.error(ex); } }
From source file:io.hops.metadata.util.DistributedRTClientEvaluation.java
License:Apache License
public DistributedRTClientEvaluation(String rtAddress, int nbSimulatedNM, int hbPeriod, long duration, String output, int startingPort, int nbNMTotal) throws IOException, YarnException, InterruptedException { this.nbNM = nbSimulatedNM; this.hbPeriod = hbPeriod; this.duration = duration; this.output = output; this.nbNMTotal = nbNMTotal; conf = new YarnConfiguration(); conf.setStrings(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, rtAddress); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); //Create NMs/* w w w. jav a 2s. c o m*/ for (int i = 0; i < nbSimulatedNM; i++) { nmMap.put(i, NodeId.newInstance(InetAddress.getLocalHost().getHostName(), startingPort + i)); } start(); }
From source file:io.hops.metadata.util.DistributedRTRMEvaluation.java
License:Apache License
public DistributedRTRMEvaluation() throws IOException { conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.setBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED, true); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); YarnAPIStorageFactory.setConfiguration(conf); RMStorageFactory.setConfiguration(conf); }
From source file:io.hops.metadata.util.TestFairSchedulerUtilities.java
License:Apache License
@Before public void setup() throws IOException { try {//from w w w . j ava 2 s.co m LOG.info("Setting up Factories"); conf = new YarnConfiguration(); conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "4000"); YarnAPIStorageFactory.setConfiguration(conf); RMStorageFactory.setConfiguration(conf); RMStorageFactory.getConnector().formatStorage(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, ResourceScheduler.class); // All tests assume only one assignment per node update } catch (StorageInitializtionException ex) { LOG.error(ex); } catch (StorageException ex) { LOG.error(ex); } }
From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java
License:Apache License
@Before public void setup() throws StorageInitializtionException, StorageException, IOException { LOG.info("Setting up Factories"); conf = new YarnConfiguration(); conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "4000"); YarnAPIStorageFactory.setConfiguration(conf); RMStorageFactory.setConfiguration(conf); RMUtilities.InitializeDB();// www . j a v a 2s .co m conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); }
From source file:io.hops.tensorflow.TestCluster.java
License:Apache License
protected void setupInternal(int numNodeManager) throws Exception { LOG.info("Starting up YARN cluster"); conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); conf.set("yarn.log.dir", "target"); conf.set("yarn.log-aggregation-enable", "true"); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName()); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); conf.setBoolean(YarnConfiguration.NM_GPU_RESOURCE_ENABLED, false); if (yarnCluster == null) { yarnCluster = new MiniYARNCluster(TestCluster.class.getSimpleName(), 1, numNodeManager, 1, 1); yarnCluster.init(conf);//from w w w.j av a 2 s . c om yarnCluster.start(); conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort()); waitForNMsToRegister(); URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml"); if (url == null) { throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath"); } Configuration yarnClusterConfig = yarnCluster.getConfig(); yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent()); //write the document to a buffer (not directly to the file, as that //can cause the file being written to get read -which will then fail. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); yarnClusterConfig.writeXml(bytesOut); bytesOut.close(); //write the bytes to the file in the classpath OutputStream os = new FileOutputStream(new File(url.getPath())); os.write(bytesOut.toByteArray()); os.close(); } FileContext fsContext = FileContext.getLocalFSFileContext(); fsContext.delete(new Path(conf.get("yarn.timeline-service.leveldb-timeline-store.path")), true); try { Thread.sleep(2000); } catch (InterruptedException e) { LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); } }
From source file:io.hops.TestStreaming.java
License:Apache License
@BeforeClass public static void setUp() throws Exception { conf = new YarnConfiguration(); // Set configuration options conf.setBoolean(YarnConfiguration.DISTRIBUTED_RM, true); RMStorageFactory.setConfiguration(conf); YarnAPIStorageFactory.setConfiguration(conf); DBUtility.InitializeDB();/*from w w w.j a v a2s . c om*/ }
From source file:MasteringYarn.DistributedShellClient.java
public void run(String[] args) throws YarnException, IOException, InterruptedException { YarnConfiguration yarnConfiguration = new YarnConfiguration(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(yarnConfiguration);//from www . java 2s. co m yarnClient.start(); YarnClientApplication yarnClientApplication = yarnClient.createApplication(); //container launch context for application master ContainerLaunchContext applicationMasterContainer = Records.newRecord(ContainerLaunchContext.class); applicationMasterContainer.setCommands( Collections.singletonList("$JAVA_HOME/bin/java MasteringYarn.DistributedShellApplicationMaster " + args[2] + " " + args[3] + " " + "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout " + "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")); LocalResource applicationMasterJar = Records.newRecord(LocalResource.class); setupJarFileForApplicationMaster(new Path(args[1]), applicationMasterJar); applicationMasterContainer .setLocalResources(Collections.singletonMap("MasteringYarn.jar", applicationMasterJar)); Map<String, String> appMasterEnv = new HashMap<>(); setupEnvironmentForApplicationMaster(appMasterEnv); applicationMasterContainer.setEnvironment(appMasterEnv); Resource resources = Records.newRecord(Resource.class); resources.setVirtualCores(1); resources.setMemory(100); ApplicationSubmissionContext submissionContext = yarnClientApplication.getApplicationSubmissionContext(); submissionContext.setAMContainerSpec(applicationMasterContainer); submissionContext.setQueue("default"); submissionContext.setApplicationName("MasteringYarn"); submissionContext.setResource(resources); ApplicationId applicationId = submissionContext.getApplicationId(); System.out.println("Submitting " + applicationId); yarnClient.submitApplication(submissionContext); System.out.println("Post submission " + applicationId); ApplicationReport applicationReport; YarnApplicationState applicationState; do { Thread.sleep(1000); applicationReport = yarnClient.getApplicationReport(applicationId); applicationState = applicationReport.getYarnApplicationState(); System.out.println("Diagnostics " + applicationReport.getDiagnostics()); } while (applicationState != YarnApplicationState.FAILED && applicationState != YarnApplicationState.FINISHED && applicationState != YarnApplicationState.KILLED); System.out.println("Application finished with " + applicationState + " state and id " + applicationId); }
From source file:ml.shifu.guagua.yarn.GuaguaAppMaster.java
License:Apache License
/** * Application entry point/*from ww w.j a va 2 s. c o m*/ * * @param args * command-line args (set by GuaguaYarnClient, if any) */ public static void main(final String[] args) { LOG.info("Starting GuaguaAppMaster. "); String containerIdString = System.getenv().get(Environment.CONTAINER_ID.name()); if (containerIdString == null) { // container id should always be set in the env by the framework throw new IllegalArgumentException("ContainerId not found in env vars."); } ContainerId containerId = ConverterUtils.toContainerId(containerIdString); ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId(); Configuration conf = new YarnConfiguration(); String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name()); conf.set(MRJobConfig.USER_NAME, jobUserName); try { UserGroupInformation.setConfiguration(conf); // Security framework already loaded the tokens into current UGI, just use them Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); LOG.info("Executing with tokens:"); for (Token<?> token : credentials.getAllTokens()) { LOG.info(token.toString()); } UserGroupInformation appMasterUgi = UserGroupInformation.createRemoteUser(jobUserName); appMasterUgi.addCredentials(credentials); // Now remove the AM->RM token so tasks don't have it Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } final GuaguaAppMaster appMaster = new GuaguaAppMaster(containerId, appAttemptId, conf); appMasterUgi.doAs(new PrivilegedAction<Void>() { @Override public Void run() { boolean result = false; try { result = appMaster.run(); } catch (Throwable t) { LOG.error("GuaguaAppMaster caught a top-level exception in main.", t); System.exit(1); } if (result) { LOG.info("Guagua Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Guagua Application Master failed. exiting"); System.exit(2); } return null; } }); } catch (Throwable t) { LOG.error("GuaguaAppMaster caught a top-level exception in main.", t); System.exit(1); } }