List of usage examples for org.apache.commons.logging Log debug
void debug(Object message);
From source file:org.apache.hadoop.dfs.ReplicationTargetChooser.java
private boolean isGoodTarget(DatanodeDescriptor node, long blockSize, int maxTargetPerLoc, boolean considerLoad, List<DatanodeDescriptor> results) { Log logr = FSNamesystem.LOG; // check if the node is (being) decommissed if (node.isDecommissionInProgress() || node.isDecommissioned()) { logr.debug( "Node " + NodeBase.getPath(node) + " is not chosen because the node is (being) decommissioned"); return false; }/*w ww. j av a 2 s . c om*/ long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize); // check the remaining capacity of the target machine if (blockSize * FSConstants.MIN_BLOCKS_FOR_WRITE > remaining) { logr.debug("Node " + NodeBase.getPath(node) + " is not chosen because the node does not have enough space"); return false; } // check the communication traffic of the target machine if (considerLoad) { double avgLoad = 0; int size = clusterMap.getNumOfLeaves(); if (size != 0) { avgLoad = (double) fs.getTotalLoad() / size; } if (node.getXceiverCount() > (2.0 * avgLoad)) { logr.debug("Node " + NodeBase.getPath(node) + " is not chosen because the node is too busy"); return false; } } // check if the target rack has chosen too many nodes String rackname = node.getNetworkLocation(); int counter = 1; for (Iterator<DatanodeDescriptor> iter = results.iterator(); iter.hasNext();) { Node result = iter.next(); if (rackname.equals(result.getNetworkLocation())) { counter++; } } if (counter > maxTargetPerLoc) { logr.debug( "Node " + NodeBase.getPath(node) + " is not chosen because the rack has too many chosen nodes"); return false; } return true; }
From source file:org.apache.hadoop.fs.swift.util.SwiftUtils.java
public static void debug(Log log, String text, Object... args) { if (log.isDebugEnabled()) { log.debug(String.format(text, args)); }/*from w w w . ja va 2 s . co m*/ }
From source file:org.apache.hadoop.hbase.hbql.impl.Utils.java
public static void logException(final Log log, final Exception e) { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final PrintWriter oos = new PrintWriter(baos); e.printStackTrace(oos);/*from w ww. jav a 2 s . com*/ oos.flush(); oos.close(); log.debug(baos.toString()); }
From source file:org.apache.hadoop.hbase.regionserver.MutableCellSetSegment.java
@Override public void dump(Log log) { for (Cell cell : getCellSet()) { log.debug(cell); } }
From source file:org.apache.hadoop.hbase.regionserver.Segment.java
/** * Dumps all cells of the segment into the given log */ void dump(Log log) { for (Cell cell : getCellSet()) { log.debug(cell); } }
From source file:org.apache.hadoop.hbase.rest.Main.java
/** * The main method for the HBase rest server. * @param args command-line arguments/* w w w . j av a2 s . c om*/ * @throws Exception exception */ public static void main(String[] args) throws Exception { Log LOG = LogFactory.getLog("RESTServer"); VersionInfo.logVersion(); Configuration conf = HBaseConfiguration.create(); RESTServlet servlet = RESTServlet.getInstance(conf); Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: 8080]"); options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + "method requests [default: false]"); CommandLine commandLine = null; try { commandLine = new PosixParser().parse(options, args); } catch (ParseException e) { LOG.error("Could not parse: ", e); printUsageAndExit(options, -1); } // check for user-defined port setting, if so override the conf if (commandLine != null && commandLine.hasOption("port")) { String val = commandLine.getOptionValue("port"); servlet.getConfiguration().setInt("hbase.rest.port", Integer.valueOf(val)); LOG.debug("port set to " + val); } // check if server should only process GET requests, if so override the conf if (commandLine != null && commandLine.hasOption("readonly")) { servlet.getConfiguration().setBoolean("hbase.rest.readonly", true); LOG.debug("readonly set to true"); } @SuppressWarnings("unchecked") List<String> remainingArgs = commandLine != null ? commandLine.getArgList() : new ArrayList<String>(); if (remainingArgs.size() != 1) { printUsageAndExit(options, 1); } String command = remainingArgs.get(0); if ("start".equals(command)) { // continue and start container } else if ("stop".equals(command)) { System.exit(1); } else { printUsageAndExit(options, 1); } // set up the Jersey servlet container for Jetty ServletHolder sh = new ServletHolder(ServletContainer.class); sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", ResourceConfig.class.getCanonicalName()); sh.setInitParameter("com.sun.jersey.config.property.packages", "jetty"); // set up Jetty and run the embedded server Server server = new Server(); Connector connector = new SelectChannelConnector(); connector.setPort(servlet.getConfiguration().getInt("hbase.rest.port", 8080)); connector.setHost(servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0")); server.addConnector(connector); // Set the default max thread number to 100 to limit // the number of concurrent requests so that REST server doesn't OOM easily. // Jetty set the default max thread number to 250, if we don't set it. // // Our default min thread number 2 is the same as that used by Jetty. int maxThreads = servlet.getConfiguration().getInt("hbase.rest.threads.max", 100); int minThreads = servlet.getConfiguration().getInt("hbase.rest.threads.min", 2); QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads); threadPool.setMinThreads(minThreads); server.setThreadPool(threadPool); server.setSendServerVersion(false); server.setSendDateHeader(false); server.setStopAtShutdown(true); // set up context Context context = new Context(server, "/", Context.SESSIONS); context.addServlet(sh, "/*"); context.addFilter(GzipFilter.class, "/*", 0); // login the server principal (if using secure Hadoop) if (User.isSecurityEnabled() && User.isHBaseSecurityEnabled(conf)) { String machineName = Strings .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("hbase.rest.dns.interface", "default"), conf.get("hbase.rest.dns.nameserver", "default"))); User.login(conf, "hbase.rest.keytab.file", "hbase.rest.kerberos.principal", machineName); } // start server server.start(); server.join(); }
From source file:org.apache.hadoop.hbase.rest.RESTServer.java
/** * The main method for the HBase rest server. * @param args command-line arguments//from www .ja v a 2 s . co m * @throws Exception exception */ public static void main(String[] args) throws Exception { Log LOG = LogFactory.getLog("RESTServer"); VersionInfo.logVersion(); FilterHolder authFilter = null; Configuration conf = HBaseConfiguration.create(); Class<? extends ServletContainer> containerClass = ServletContainer.class; UserProvider userProvider = UserProvider.instantiate(conf); // login the server principal (if using secure Hadoop) if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL); Preconditions.checkArgument(principalConfig != null && !principalConfig.isEmpty(), REST_KERBEROS_PRINCIPAL + " should be set if security is enabled"); userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName); if (conf.get(REST_AUTHENTICATION_TYPE) != null) { containerClass = RESTServletContainer.class; authFilter = new FilterHolder(); authFilter.setClassName(AuthFilter.class.getName()); authFilter.setName("AuthenticationFilter"); } } UserGroupInformation realUser = userProvider.getCurrent().getUGI(); RESTServlet servlet = RESTServlet.getInstance(conf, realUser); Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: 8080]"); options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + "method requests [default: false]"); options.addOption(null, "infoport", true, "Port for web UI"); CommandLine commandLine = null; try { commandLine = new PosixParser().parse(options, args); } catch (ParseException e) { LOG.error("Could not parse: ", e); printUsageAndExit(options, -1); } // check for user-defined port setting, if so override the conf if (commandLine != null && commandLine.hasOption("port")) { String val = commandLine.getOptionValue("port"); servlet.getConfiguration().setInt("hbase.rest.port", Integer.valueOf(val)); LOG.debug("port set to " + val); } // check if server should only process GET requests, if so override the conf if (commandLine != null && commandLine.hasOption("readonly")) { servlet.getConfiguration().setBoolean("hbase.rest.readonly", true); LOG.debug("readonly set to true"); } // check for user-defined info server port setting, if so override the conf if (commandLine != null && commandLine.hasOption("infoport")) { String val = commandLine.getOptionValue("infoport"); servlet.getConfiguration().setInt("hbase.rest.info.port", Integer.valueOf(val)); LOG.debug("Web UI port set to " + val); } @SuppressWarnings("unchecked") List<String> remainingArgs = commandLine != null ? commandLine.getArgList() : new ArrayList<String>(); if (remainingArgs.size() != 1) { printUsageAndExit(options, 1); } String command = remainingArgs.get(0); if ("start".equals(command)) { // continue and start container } else if ("stop".equals(command)) { System.exit(1); } else { printUsageAndExit(options, 1); } // set up the Jersey servlet container for Jetty ServletHolder sh = new ServletHolder(containerClass); sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", ResourceConfig.class.getCanonicalName()); sh.setInitParameter("com.sun.jersey.config.property.packages", "jetty"); // The servlet holder below is instantiated to only handle the case // of the /status/cluster returning arrays of nodes (live/dead). Without // this servlet holder, the problem is that the node arrays in the response // are collapsed to single nodes. We want to be able to treat the // node lists as POJO in the response to /status/cluster servlet call, // but not change the behavior for any of the other servlets // Hence we don't use the servlet holder for all servlets / paths ServletHolder shPojoMap = new ServletHolder(containerClass); @SuppressWarnings("unchecked") Map<String, String> shInitMap = sh.getInitParameters(); for (Entry<String, String> e : shInitMap.entrySet()) { shPojoMap.setInitParameter(e.getKey(), e.getValue()); } shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true"); // set up Jetty and run the embedded server Server server = new Server(); Connector connector = new SelectChannelConnector(); if (conf.getBoolean(REST_SSL_ENABLED, false)) { SslSelectChannelConnector sslConnector = new SslSelectChannelConnector(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); String password = conf.get(REST_SSL_KEYSTORE_PASSWORD); String keyPassword = conf.get(REST_SSL_KEYSTORE_KEYPASSWORD, password); sslConnector.setKeystore(keystore); sslConnector.setPassword(password); sslConnector.setKeyPassword(keyPassword); connector = sslConnector; } connector.setPort(servlet.getConfiguration().getInt("hbase.rest.port", 8080)); connector.setHost(servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0")); server.addConnector(connector); // Set the default max thread number to 100 to limit // the number of concurrent requests so that REST server doesn't OOM easily. // Jetty set the default max thread number to 250, if we don't set it. // // Our default min thread number 2 is the same as that used by Jetty. int maxThreads = servlet.getConfiguration().getInt("hbase.rest.threads.max", 100); int minThreads = servlet.getConfiguration().getInt("hbase.rest.threads.min", 2); QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads); threadPool.setMinThreads(minThreads); server.setThreadPool(threadPool); server.setSendServerVersion(false); server.setSendDateHeader(false); server.setStopAtShutdown(true); // set up context Context context = new Context(server, "/", Context.SESSIONS); context.addServlet(shPojoMap, "/status/cluster"); context.addServlet(sh, "/*"); if (authFilter != null) { context.addFilter(authFilter, "/*", 1); } // Load filters from configuration. String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, ArrayUtils.EMPTY_STRING_ARRAY); for (String filter : filterClasses) { filter = filter.trim(); context.addFilter(Class.forName(filter), "/*", 0); } HttpServerUtil.constrainHttpMethods(context); // Put up info server. int port = conf.getInt("hbase.rest.info.port", 8085); if (port >= 0) { conf.setLong("startcode", System.currentTimeMillis()); String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0"); InfoServer infoServer = new InfoServer("rest", a, port, false, conf); infoServer.setAttribute("hbase.conf", conf); infoServer.start(); } // start server server.start(); server.join(); }
From source file:org.apache.hadoop.hbase.util.CommonFSUtils.java
/** * Log the current state of the filesystem from a certain root directory * @param fs filesystem to investigate//from www . j ava2s. c o m * @param root root file/directory to start logging from * @param LOG log to output information * @throws IOException if an unexpected exception occurs */ public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG) throws IOException { LOG.debug("Current file system:"); logFSTree(LOG, fs, root, "|-"); }
From source file:org.apache.hadoop.hbase.util.CommonFSUtils.java
/** * Recursive helper to log the state of the FS * * @see #logFileSystemState(FileSystem, Path, Log) *///from ww w . jav a 2 s.co m private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix) throws IOException { FileStatus[] files = listStatus(fs, root, null); if (files == null) { return; } for (FileStatus file : files) { if (file.isDirectory()) { LOG.debug(prefix + file.getPath().getName() + "/"); logFSTree(LOG, fs, file.getPath(), prefix + "---"); } else { LOG.debug(prefix + file.getPath().getName()); } } }
From source file:org.apache.hadoop.hbase.util.FSUtils.java
/** * Recursive helper to log the state of the FS * * @see #logFileSystemState(FileSystem, Path, Log) *///from w w w . ja v a 2s . co m private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix) throws IOException { FileStatus[] files = FSUtils.listStatus(fs, root, null); if (files == null) return; for (FileStatus file : files) { if (file.isDirectory()) { LOG.debug(prefix + file.getPath().getName() + "/"); logFSTree(LOG, fs, file.getPath(), prefix + "---"); } else { LOG.debug(prefix + file.getPath().getName()); } } }