List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled
public static boolean isSecurityEnabled()
From source file:com.huayu.metis.flume.sink.hdfs.HDFSEventSink.java
License:Apache License
private boolean authenticate() { // logic for kerberos login boolean useSecurity = UserGroupInformation.isSecurityEnabled(); LOG.info("Hadoop Security enabled: " + useSecurity); if (useSecurity) { // sanity checking if (kerbConfPrincipal.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a principal to use for Kerberos auth."); return false; }/*from w w w. j a v a 2 s.co m*/ if (kerbKeytab.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a keytab to use for Kerberos auth."); return false; } else { //If keytab is specified, user should want it take effect. //HDFSEventSink will halt when keytab file is non-exist or unreadable File kfile = new File(kerbKeytab); if (!(kfile.isFile() && kfile.canRead())) { throw new IllegalArgumentException( "The keyTab file: " + kerbKeytab + " is nonexistent or can't read. " + "Please specify a readable keytab file for Kerberos auth."); } } String principal; try { // resolves _HOST pattern using standard Hadoop search/replace // via DNS lookup when 2nd argument is empty principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, ""); } catch (IOException e) { LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal + "). Exception follows.", e); return false; } Preconditions.checkNotNull(principal, "Principal must not be null"); KerberosUser prevUser = staticLogin.get(); KerberosUser newUser = new KerberosUser(principal, kerbKeytab); // be cruel and unusual when user tries to login as multiple principals // this isn't really valid with a reconfigure but this should be rare // enough to warrant a restart of the agent JVM // TODO: find a way to interrogate the entire current config state, // since we don't have to be unnecessarily protective if they switch all // HDFS sinks to use a different principal all at once. Preconditions.checkState(prevUser == null || prevUser.equals(newUser), "Cannot use multiple kerberos principals in the same agent. " + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s", prevUser, newUser); // attempt to use cached credential if the user is the same // this is polite and should avoid flooding the KDC with auth requests UserGroupInformation curUser = null; if (prevUser != null && prevUser.equals(newUser)) { try { curUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e); } } if (curUser == null || !curUser.getUserName().equals(principal)) { try { // static login kerberosLogin(this, principal, kerbKeytab); } catch (IOException e) { LOG.error("Authentication or file read error while attempting to " + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab + "). Exception follows.", e); return false; } } else { LOG.debug("{}: Using existing principal login: {}", this, curUser); } // we supposedly got through this unscathed... so store the static user staticLogin.set(newUser); } // hadoop impersonation works with or without kerberos security proxyTicket = null; if (!proxyUserName.isEmpty()) { try { proxyTicket = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser()); } catch (IOException e) { LOG.error("Unable to login as proxy user. Exception follows.", e); return false; } } UserGroupInformation ugi = null; if (proxyTicket != null) { ugi = proxyTicket; } else if (useSecurity) { try { ugi = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.error("Unexpected error: Unable to get authenticated user after " + "apparent successful login! Exception follows.", e); return false; } } if (ugi != null) { // dump login information AuthenticationMethod authMethod = ugi.getAuthenticationMethod(); LOG.info("Auth method: {}", authMethod); LOG.info(" User name: {}", ugi.getUserName()); LOG.info(" Using keytab: {}", ugi.isFromKeytab()); if (authMethod == AuthenticationMethod.PROXY) { UserGroupInformation superUser; try { superUser = UserGroupInformation.getLoginUser(); LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod()); LOG.info(" Superuser name: {}", superUser.getUserName()); LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab()); } catch (IOException e) { LOG.error("Unexpected error: unknown superuser impersonating proxy.", e); return false; } } LOG.info("Logged in as user {}", ugi.getUserName()); return true; } return true; }
From source file:com.inforefiner.hdata.SubmitClient.java
License:Apache License
/** * Main run function for the client/*from ww w. j ava 2 s . c om*/ * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } if (domainId != null && domainId.length() > 0 && toCreateDomain) { prepareTimelineDomain(); } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); if (attemptFailuresValidityInterval >= 0) { appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH; Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources, StringUtils.join(shellArgs, " ")); } // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); if (domainId != null && domainId.length() > 0) { env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId); } // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); if (null != nodeLabelExpression) { appContext.setNodeLabelExpression(nodeLabelExpression); } vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Resource.newInstance(amMemory, amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master // TODO - what is the range for priority? how to decide? Priority pri = Priority.newInstance(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? Thread t = new Thread(new LogReceiver()); t.start(); // Monitor the application return monitorApplication(appId); }
From source file:com.inmobi.conduit.Conduit.java
License:Apache License
public static void main(String[] args) throws Exception { try {/*from w w w . j a v a 2 s . c om*/ if (args.length != 1) { LOG.error("Usage: com.inmobi.conduit.Conduit <conduit.cfg>"); throw new RuntimeException("Usage: com.inmobi.conduit.Conduit " + "<conduit.cfg>"); } String cfgFile = args[0].trim(); Properties prop = new Properties(); prop.load(new FileReader(cfgFile)); String purgerEnabled = prop.getProperty(PERGER_ENABLED); if (purgerEnabled != null) isPurgerEnabled = Boolean.parseBoolean(purgerEnabled); String streamperLocal = prop.getProperty(STREAMS_PER_LOCALSERVICE); if (streamperLocal != null) { numStreamsLocalService = Integer.parseInt(streamperLocal); } String streamperMerge = prop.getProperty(STREAMS_PER_MERGE); if (streamperMerge != null) { numStreamsMergeService = Integer.parseInt(streamperMerge); } String streamperMirror = prop.getProperty(STREAMS_PER_MIRROR); if (streamperMirror != null) { numStreamsMirrorService = Integer.parseInt(streamperMirror); } String numOfDirPerDistcpService = prop.getProperty(DIR_PER_DISTCP_PER_STREAM); if (numOfDirPerDistcpService != null) { System.setProperty(DIR_PER_DISTCP_PER_STREAM, numOfDirPerDistcpService); } String log4jFile = getProperty(prop, LOG4J_FILE); if (log4jFile == null) { LOG.error("log4j.properties incorrectly defined"); throw new RuntimeException("Log4j.properties not defined"); } PropertyConfigurator.configureAndWatch(log4jFile); LOG.info("Log4j Property File [" + log4jFile + "]"); String clustersStr = prop.getProperty(CLUSTERS_TO_PROCESS); if (clustersStr == null || clustersStr.length() == 0) { LOG.error("Please provide " + CLUSTERS_TO_PROCESS + " in [" + cfgFile + "]"); throw new RuntimeException("Insufficent information on cluster name"); } String[] clusters = clustersStr.split(","); String conduitConfigFile = getProperty(prop, CONDUIT_XML); if (conduitConfigFile == null) { LOG.error("Conduit Configuration file doesn't exist..can't proceed"); throw new RuntimeException("Specified conduit config file doesn't " + "exist"); } String zkConnectString = prop.getProperty(ZK_ADDR); if (zkConnectString == null || zkConnectString.length() == 0) { LOG.error("Zookeper connection string not specified"); throw new RuntimeException("Zoookeeper connection string not " + "specified"); } String enableZK = prop.getProperty(ENABLE_ZOOKEEPER); boolean enableZookeeper; if (enableZK != null && enableZK.length() != 0) enableZookeeper = Boolean.parseBoolean(enableZK); else enableZookeeper = true; String currentCluster = prop.getProperty(CLUSTER_NAME); String principal = prop.getProperty(KRB_PRINCIPAL); String keytab = getProperty(prop, KEY_TAB_FILE); String mbPerMapper = prop.getProperty(MB_PER_MAPPER); if (mbPerMapper != null) { System.setProperty(MB_PER_MAPPER, mbPerMapper); } String numRetries = prop.getProperty(NUM_RETRIES); if (numRetries != null) { System.setProperty(NUM_RETRIES, numRetries); } String numFilesPerLocalStream = prop.getProperty(FILES_PER_COLLECETOR_PER_LOCAL_STREAM); if (numFilesPerLocalStream != null) { System.setProperty(FILES_PER_COLLECETOR_PER_LOCAL_STREAM, numFilesPerLocalStream); } String timeoutToProcessLastCollectorFile = prop.getProperty(TIMEOUT_TO_PROCESS_LAST_COLLECTOR_FILE); if (timeoutToProcessLastCollectorFile != null) { System.setProperty(TIMEOUT_TO_PROCESS_LAST_COLLECTOR_FILE, timeoutToProcessLastCollectorFile); } //Init Conduit metrics try { ConduitMetrics.init(prop); ConduitMetrics.startAll(); } catch (IOException e) { LOG.error("Exception during initialization of metrics" + e.getMessage()); } if (UserGroupInformation.isSecurityEnabled()) { LOG.info("Security enabled, trying kerberoes login principal [" + principal + "] keytab [" + keytab + "]"); //krb enabled if (principal != null && keytab != null) { SecureLoginUtil.login(KRB_PRINCIPAL, principal, KEY_TAB_FILE, keytab); } else { LOG.error("Kerberoes principal/keytab not defined properly in " + "conduit.cfg"); throw new RuntimeException( "Kerberoes principal/keytab not defined " + "properly in conduit.cfg"); } } // parse hcat properties parseHCatProperties(prop); ConduitConfigParser configParser = new ConduitConfigParser(conduitConfigFile); ConduitConfig config = configParser.getConfig(); StringBuffer conduitClusterId = new StringBuffer(); Set<String> clustersToProcess = new HashSet<String>(); if (clusters.length == 1 && "ALL".equalsIgnoreCase(clusters[0])) { for (Cluster c : config.getClusters().values()) { clustersToProcess.add(c.getName()); } } else { for (String c : clusters) { if (config.getClusters().get(c) == null) { LOG.warn("Cluster name is not found in the config - " + c); return; } clustersToProcess.add(c); conduitClusterId.append(c); conduitClusterId.append("_"); } } final Conduit conduit = new Conduit(config, clustersToProcess, currentCluster); MessagePublisher msgPublisher = createMessagePublisher(prop); if (msgPublisher != null) { LOG.info("Audit feature is enabled for worker "); System.setProperty(AUDIT_ENABLED_KEY, "true"); } else { /* * Disable the audit feature for worker in case if we are not able to create * a publisher from a given publisher configuration file */ System.setProperty(AUDIT_ENABLED_KEY, "false"); } conduit.setPublisher(msgPublisher); Signal.handle(new Signal("TERM"), new SignalHandler() { @Override public void handle(Signal signal) { try { LOG.info("Starting to stop conduit..."); conduit.stop(); ConduitMetrics.stopAll(); } catch (Exception e) { LOG.warn("Error in shutting down conduit", e); } } }); if (enableZookeeper) { LOG.info("Starting CuratorLeaderManager for leader election "); conduit.startCuratorLeaderManager(zkConnectString, conduitClusterId, conduit); } else { conduit.start(); } } catch (Exception e) { LOG.warn("Error in starting Conduit daemon", e); throw new Exception(e); } }
From source file:com.inmobi.databus.Databus.java
License:Apache License
public static void main(String[] args) throws Exception { try {/*w w w .j a v a2 s . c om*/ if (args.length != 1) { LOG.error("Usage: com.inmobi.databus.Databus <databus.cfg>"); throw new RuntimeException("Usage: com.inmobi.databus.Databus " + "<databus.cfg>"); } String cfgFile = args[0].trim(); Properties prop = new Properties(); prop.load(new FileReader(cfgFile)); String log4jFile = getProperty(prop, LOG4J_FILE); if (log4jFile == null) { LOG.error("log4j.properties incorrectly defined"); throw new RuntimeException("Log4j.properties not defined"); } PropertyConfigurator.configureAndWatch(log4jFile); LOG.info("Log4j Property File [" + log4jFile + "]"); String clustersStr = prop.getProperty(CLUSTERS_TO_PROCESS); if (clustersStr == null || clustersStr.length() == 0) { LOG.error("Please provide " + CLUSTERS_TO_PROCESS + " in [" + cfgFile + "]"); throw new RuntimeException("Insufficent information on cluster name"); } String[] clusters = clustersStr.split(","); String databusConfigFile = getProperty(prop, DATABUS_XML); if (databusConfigFile == null) { LOG.error("Databus Configuration file doesn't exist..can't proceed"); throw new RuntimeException("Specified databus config file doesn't " + "exist"); } String zkConnectString = prop.getProperty(ZK_ADDR); if (zkConnectString == null || zkConnectString.length() == 0) { LOG.error("Zookeper connection string not specified"); throw new RuntimeException("Zoookeeper connection string not " + "specified"); } String principal = prop.getProperty(KRB_PRINCIPAL); String keytab = getProperty(prop, KEY_TAB_FILE); prop = null; if (UserGroupInformation.isSecurityEnabled()) { LOG.info("Security enabled, trying kerberoes login principal [" + principal + "] keytab [" + keytab + "]"); //krb enabled if (principal != null && keytab != null) { SecureLoginUtil.login(KRB_PRINCIPAL, principal, KEY_TAB_FILE, keytab); } else { LOG.error("Kerberoes principal/keytab not defined properly in " + "databus.cfg"); throw new RuntimeException( "Kerberoes principal/keytab not defined " + "properly in databus.cfg"); } } DatabusConfigParser configParser = new DatabusConfigParser(databusConfigFile); DatabusConfig config = configParser.getConfig(); StringBuffer databusClusterId = new StringBuffer(); Set<String> clustersToProcess = new HashSet<String>(); if (clusters.length == 1 && "ALL".equalsIgnoreCase(clusters[0])) { for (Cluster c : config.getClusters().values()) { clustersToProcess.add(c.getName()); } } else { for (String c : clusters) { if (config.getClusters().get(c) == null) { LOG.warn("Cluster name is not found in the config - " + c); return; } clustersToProcess.add(c); databusClusterId.append(c); databusClusterId.append("_"); } } final Databus databus = new Databus(config, clustersToProcess); LOG.info("Starting CuratorLeaderManager for eleader election "); CuratorLeaderManager curatorLeaderManager = new CuratorLeaderManager(databus, databusClusterId.toString(), zkConnectString); curatorLeaderManager.start(); Signal.handle(new Signal("INT"), new SignalHandler() { @Override public void handle(Signal signal) { try { LOG.info("Starting to stop databus..."); databus.stop(); } catch (Exception e) { LOG.warn("Error in shutting down databus", e); } } }); } catch (Exception e) { LOG.warn("Error in starting Databus daemon", e); throw new Exception(e); } }
From source file:com.inmobi.messaging.consumer.databus.AbstractMessagingDatabusConsumer.java
License:Apache License
protected void initializeConfig(ClientConfig config) throws IOException { String hadoopConfFileName = config.getString(hadoopConfigFileKey); if (hadoopConfFileName != null) { Configuration.addDefaultResource(hadoopConfFileName); }// w w w . j a va2 s.c om conf = new Configuration(); super.init(config); // verify authentication if (UserGroupInformation.isSecurityEnabled()) { String principal = config.getString(consumerPrincipal); String keytab = config.getString(consumerKeytab); if (principal != null && keytab != null) { Configuration conf = new Configuration(); conf.set(consumerPrincipal, principal); conf.set(consumerKeytab, keytab); SecurityUtil.login(conf, consumerKeytab, consumerPrincipal); UserGroupInformation ugi = UserGroupInformation.getLoginUser(); LOG.info("User logged in :" + ugi); } else { LOG.info( "There is no principal or key tab file passed. Using the" + " commandline authentication."); } } // Read consumer id String consumerIdStr = config.getString(consumerIdInGroupConfig, DEFAULT_CONSUMER_ID); String[] id = consumerIdStr.split("/"); try { consumerNumber = Integer.parseInt(id[0]); totalConsumers = Integer.parseInt(id[1]); partitionMinList = new HashSet<Integer>(); if (isValidConfiguration()) { for (int i = 0; i < 60; i++) { if ((i % totalConsumers) == (consumerNumber - 1)) { partitionMinList.add(i); } } } else { throw new IllegalArgumentException("Invalid consumer group membership"); } } catch (NumberFormatException nfe) { throw new IllegalArgumentException("Invalid consumer group membership", nfe); } // Create checkpoint provider and initialize checkpoint String chkpointProviderClassName = config.getString(chkProviderConfig, DEFAULT_CHK_PROVIDER); String databusCheckpointDir = config.getString(checkpointDirConfig, DEFAULT_CHECKPOINT_DIR); this.checkpointProvider = createCheckpointProvider(chkpointProviderClassName, databusCheckpointDir); createCheckpoint(); currentCheckpoint.read(checkpointProvider, getChkpointKey()); //create buffer bufferSize = config.getInteger(queueSizeConfig, DEFAULT_QUEUE_SIZE); buffer = new LinkedBlockingQueue<QueueEntry>(bufferSize); // initialize other common configuration waitTimeForFileCreate = config.getLong(waitTimeForFileCreateConfig, DEFAULT_WAIT_TIME_FOR_FILE_CREATE); // get the retention period of the topic retentionInHours = config.getString(retentionConfig); relativeStartTimeStr = config.getString(relativeStartTimeConfig); if (relativeStartTimeStr == null && retentionInHours != null) { LOG.warn(retentionConfig + " is deprecated." + " Use " + relativeStartTimeConfig + " instead"); int minutes = (Integer.parseInt(retentionInHours)) * 60; relativeStartTimeStr = String.valueOf(minutes); } String stopTimeStr = config.getString(stopDateConfig); stopTime = getDateFromString(stopTimeStr); startOfStream = config.getBoolean(startOfStreamConfig, DEFAULT_START_OF_STREAM); closedReadercount = 0; }
From source file:com.kappaware.hbtools.common.Utils.java
License:Apache License
public static Configuration buildHBaseConfiguration(HBaseParameters parameters) throws ConfigurationException, IOException { Configuration config = HBaseConfiguration.create(); for (String cf : parameters.getConfigFiles()) { File f = new File(cf); if (!f.canRead()) { throw new ConfigurationException(String.format("Unable to read file '%s'", cf)); }/*from w w w. j a v a 2 s . c om*/ log.debug(String.format("Will load '%s'", cf)); config.addResource(new Path(cf)); } config.set("hbase.client.retries.number", Integer.toString(parameters.getClientRetries())); //config.reloadConfiguration(); if (Utils.hasText(parameters.getDumpConfigFile())) { Utils.dumpConfiguration(config, parameters.getDumpConfigFile()); } if (Utils.hasText(parameters.getKeytab()) && Utils.hasText(parameters.getPrincipal())) { // Check if keytab file exists and is readable File f = new File(parameters.getKeytab()); if (!f.canRead()) { throw new ConfigurationException( String.format("Unable to read keytab file: '%s'", parameters.getKeytab())); } UserGroupInformation.setConfiguration(config); if (!UserGroupInformation.isSecurityEnabled()) { throw new ConfigurationException( "Security is not enabled in core-site.xml while Kerberos principal and keytab are provided."); } try { UserGroupInformation userGroupInformation = UserGroupInformation .loginUserFromKeytabAndReturnUGI(parameters.getPrincipal(), parameters.getKeytab()); UserGroupInformation.setLoginUser(userGroupInformation); } catch (Exception e) { throw new ConfigurationException( String.format("Kerberos: Unable to authenticate with principal='%s' and keytab='%s': %s.", parameters.getPrincipal(), parameters.getKeytab(), e.getMessage())); } } return config; }
From source file:com.linkedin.drelephant.security.HadoopSecurity.java
License:Apache License
public HadoopSecurity() throws IOException { Configuration conf = new Configuration(); UserGroupInformation.setConfiguration(conf); _securityEnabled = UserGroupInformation.isSecurityEnabled(); if (_securityEnabled) { logger.info("This cluster is Kerberos enabled."); boolean login = true; _keytabUser = System.getProperty("keytab.user"); if (_keytabUser == null) { logger.error("Keytab user not set. Please set keytab_user in the configuration file"); login = false;//from www . j a v a 2 s . c om } _keytabLocation = System.getProperty("keytab.location"); if (_keytabLocation == null) { logger.error("Keytab location not set. Please set keytab_location in the configuration file"); login = false; } else if (!new File(_keytabLocation).exists()) { logger.error("The keytab file at location [" + _keytabLocation + "] does not exist."); login = false; } if (!login) { throw new IOException("Cannot login. This cluster is security enabled."); } checkLogin(); } }
From source file:com.linkedin.pinot.common.segment.fetcher.HdfsSegmentFetcher.java
License:Apache License
private void authenticate(Configuration hadoopConf, org.apache.commons.configuration.Configuration configs) { String principal = configs.getString(PRINCIPLE); String keytab = configs.getString(KEYTAB); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) { UserGroupInformation.setConfiguration(hadoopConf); if (UserGroupInformation.isSecurityEnabled()) { try { if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials() || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) { LOGGER.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab); UserGroupInformation.loginUserFromKeytab(principal, keytab); }/*from ww w. j a v a 2 s .c om*/ } catch (IOException e) { throw new RuntimeException(String.format( "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e); } } } }
From source file:com.linkedin.pinot.filesystem.HadoopPinotFS.java
License:Apache License
private void authenticate(org.apache.hadoop.conf.Configuration hadoopConf, org.apache.commons.configuration.Configuration configs) { String principal = configs.getString(PRINCIPAL); String keytab = configs.getString(KEYTAB); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) { UserGroupInformation.setConfiguration(hadoopConf); if (UserGroupInformation.isSecurityEnabled()) { try { if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials() || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) { LOGGER.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab); UserGroupInformation.loginUserFromKeytab(principal, keytab); }/*from w w w.j a va2s . c o m*/ } catch (IOException e) { throw new RuntimeException(String.format( "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e); } } } }
From source file:com.mellanox.r4h.MiniDFSCluster.java
License:Apache License
/** * Modify the config and start up additional DataNodes. The info port for * DataNodes is guaranteed to use a free port. * // w w w. ja va2 s . co m * Data nodes can run with the name node in the mini cluster or * a real name node. For example, running with a real name node is useful * when running simulated data nodes with a real name node. * If minicluster's name node is null assume that the conf has been * set with the right address:port of the name node. * * @param conf * the base configuration to use in starting the DataNodes. This * will be modified as necessary. * @param numDataNodes * Number of DataNodes to start; may be zero * @param manageDfsDirs * if true, the data directories for DataNodes will be * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation * the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. * @param racks * array of strings indicating the rack that each DataNode is on * @param hosts * array of strings indicating the hostnames for each DataNode * @param simulatedCapacities * array of capacities of the simulated data nodes * @param setupHostsFile * add new nodes to dfs hosts files * @param checkDataNodeAddrConfig * if true, only set DataNode port addresses if not already set in config * @param checkDataNodeHostConfig * if true, only set DataNode hostname key if not already set in config * @param dnConfOverlays * An array of {@link Configuration} objects that will overlay the * global MiniDFSCluster Configuration for the corresponding DataNode. * @throws IllegalStateException * if NameNode has been shutdown */ public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException { if (operation == StartupOption.RECOVER) { return; } if (checkDataNodeHostConfig) { conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); } else { conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); } int curDatanodesNum = dataNodes.size(); // for mincluster's the default initialDelay for BRs is 0 if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); } // If minicluster's name node is null assume that the conf has been // set with the right address:port of the name node. // if (racks != null && numDataNodes > racks.length) { throw new IllegalArgumentException("The length of racks [" + racks.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (hosts != null && numDataNodes > hosts.length) { throw new IllegalArgumentException("The length of hosts [" + hosts.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } // Generate some hostnames if required if (racks != null && hosts == null) { hosts = new String[numDataNodes]; for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { hosts[i - curDatanodesNum] = "host" + i + ".foo.com"; } } if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) { throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) { throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length + "] is less than the number of datanodes [" + numDataNodes + "]."); } String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null : new String[] { operation.getName() }; for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { Configuration dnConf = new HdfsConfiguration(conf); if (dnConfOverlays != null) { dnConf.addResource(dnConfOverlays[i]); } // Set up datanode address setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig); if (manageDfsDirs) { String dirs = makeDataNodeDirs(i, storageType); dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); } if (simulatedCapacities != null) { SimulatedFSDataset.setFactory(dnConf); dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i - curDatanodesNum]); } LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); if (hosts != null) { dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]); LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY)); } if (racks != null) { String name = hosts[i - curDatanodesNum]; LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]); StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]); } Configuration newconf = new HdfsConfiguration(dnConf); // save config if (hosts != null) { NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); } SecureResources secureResources = null; if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) { try { secureResources = SecureDataNodeStarter.getSecureResources(dnConf); } catch (Exception ex) { ex.printStackTrace(); } } final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT); int numRetries = 0; DataNode dn = null; while (true) { try { dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources); break; } catch (IOException e) { // Work around issue testing security where rapidly starting multiple // DataNodes using the same principal gets rejected by the KDC as a // replay attack. if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) { try { Thread.sleep(1000); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); break; } ++numRetries; continue; } throw e; } } if (dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); // since the HDFS does things based on host|ip:port, we need to add the // mapping for the service to rackId String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString(); if (racks != null) { LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]); StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]); } dn.runDatanodeDaemon(); dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort())); } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; waitActive(); }