Example usage for java.util.logging Logger getLevel

List of usage examples for java.util.logging Logger getLevel

Introduction

In this page you can find the example usage for java.util.logging Logger getLevel.

Prototype

public Level getLevel() 

Source Link

Document

Get the log Level that has been specified for this Logger.

Usage

From source file:Main.java

public static Level getLevel(Logger logger) {
    Level level = logger.getLevel();
    while (level == null && logger.getParent() != null) {
        logger = logger.getParent();/*  w  w  w.  ja  v a 2 s  .c o  m*/
        level = logger.getLevel();
    }
    return level;
}

From source file:com.twitter.heron.common.utils.logging.LoggingHelper.java

/**
 * Init java util logging/*  www  . ja  v  a 2  s . c  om*/
 *
 * @param level the Level of message to log
 * @param isRedirectStdOutErr whether we redirect std out&err
 * @param format the format to log
 */
public static void loggerInit(Level level, boolean isRedirectStdOutErr, String format) throws IOException {
    // Set the java util logging format
    setLoggingFormat(format);

    // Configure the root logger and its handlers so that all the
    // derived loggers will inherit the properties
    Logger rootLogger = Logger.getLogger("");
    for (Handler handler : rootLogger.getHandlers()) {
        handler.setLevel(level);
    }

    rootLogger.setLevel(level);

    if (rootLogger.getLevel().intValue() < Level.WARNING.intValue()) {
        // zookeeper logging scares me. if people want this, we can patch to config-drive this
        Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARNING);
    }

    // setting logging for http client to be error level
    System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.SimpleLog");
    System.setProperty("org.apache.commons.logging.simplelog.log.httpclient.wire", "ERROR");
    System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.http", "ERROR");
    System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.http.headers", "ERROR");

    if (isRedirectStdOutErr) {

        // Remove ConsoleHandler if present, to avoid StackOverflowError.
        // ConsoleHandler writes to System.err and since we are redirecting
        // System.err to Logger, it results in an infinite loop.
        for (Handler handler : rootLogger.getHandlers()) {
            if (handler instanceof ConsoleHandler) {
                rootLogger.removeHandler(handler);
            }
        }

        // now rebind stdout/stderr to logger
        Logger logger;
        LoggingOutputStream los;

        logger = Logger.getLogger("stdout");
        los = new LoggingOutputStream(logger, StdOutErrLevel.STDOUT);
        System.setOut(new PrintStream(los, true));

        logger = Logger.getLogger("stderr");
        los = new LoggingOutputStream(logger, StdOutErrLevel.STDERR);
        System.setErr(new PrintStream(los, true));
    }
}

From source file:alma.acs.logging.AcsLogger.java

/**
 * Client applications that use ACS class <code>ComponentClient</code> may need to turn their own JDK Logger into
 * an <code>AcsLogger</code>.
 * <p>/*www.j  a v a  2s.c o  m*/
 * If <code>logger</code> is itself of sub-type <code>AcsLogger</code> then it is returned directly without being wrapped.
 * The wrapping logger shares the parent logger and the log level with the provided logger.
 * 
 * @param logger
 *            the JDK logger
 * @param wrapperName
 *            Name of the returned AcsLogger. May be <code>null</code> in which case the delegate's name plus
 *            "wrapper" is taken.
 * @return an AcsLogger that delegates to the given <code>logger</code>.
 * @since ACS 8.0
 */
public static AcsLogger fromJdkLogger(Logger logger, String wrapLoggerName) {
    if (logger instanceof AcsLogger) {
        return (AcsLogger) logger;
    }
    String acsLoggerName = (wrapLoggerName != null ? wrapLoggerName.trim() : logger.getName() + "wrapper");
    AcsLogger ret = new AcsLogger(acsLoggerName, logger.getResourceBundleName(), null, true, logger);
    ret.setLevel(logger.getLevel());
    ret.setParent(logger.getParent());
    return ret;
}

From source file:org.osiam.addons.selfadministration.exception.OsiamExceptionHandler.java

private Level getLogLevel(Logger logger) {
    Level level = null;//from  w  ww .  jav  a2s. com
    if (logger != null) {
        level = logger.getLevel();
    }
    if (level == null) {
        level = getLogLevel(logger.getParent());
    }
    return level;
}

From source file:com.cyberway.issue.io.Arc2Warc.java

protected void transform(final ARCReader reader, final File warc) throws IOException {
    WARCWriter writer = null;//from  ww w. ja va 2  s.c  o m
    // No point digesting. Digest is available after reading of ARC which
    // is too late for inclusion in WARC.
    reader.setDigest(false);
    try {
        BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(warc));
        // Get the body of the first ARC record as a String so can dump it
        // into first record of WARC.
        final Iterator<ArchiveRecord> i = reader.iterator();
        ARCRecord firstRecord = (ARCRecord) i.next();
        ByteArrayOutputStream baos = new ByteArrayOutputStream((int) firstRecord.getHeader().getLength());
        firstRecord.dump(baos);
        // Add ARC first record content as an ANVLRecord.
        ANVLRecord ar = new ANVLRecord(1);
        ar.addLabelValue("Filedesc", baos.toString());
        List<String> metadata = new ArrayList<String>(1);
        metadata.add(ar.toString());
        // Now create the writer.  If reader was compressed, lets write
        // a compressed WARC.
        writer = new WARCWriter(null, bos, warc, reader.isCompressed(), null, metadata);
        // Write a warcinfo record with description about how this WARC
        // was made.
        writer.writeWarcinfoRecord(warc.getName(), "Made from " + reader.getReaderIdentifier() + " by "
                + this.getClass().getName() + "/" + getRevision());
        for (; i.hasNext();) {
            write(writer, (ARCRecord) i.next());
        }
    } finally {
        if (reader != null) {
            reader.close();
        }
        if (writer != null) {
            // I don't want the close being logged -- least, not w/o log of
            // an opening (and that'd be a little silly for simple script
            // like this). Currently, it logs at level INFO so that close
            // of files gets written to log files.  Up the log level just
            // for the close.
            Logger l = Logger.getLogger(writer.getClass().getName());
            Level oldLevel = l.getLevel();
            l.setLevel(Level.WARNING);
            try {
                writer.close();
            } finally {
                l.setLevel(oldLevel);
            }
        }
    }
}

From source file:com.cyberway.issue.io.Warc2Arc.java

protected void transform(final WARCReader reader, final ARCWriter writer)
        throws IOException, java.text.ParseException {
    // No point digesting. Digest is available after reading of ARC which
    // is too late for inclusion in WARC.
    reader.setDigest(false);//w  w w  .ja v a2  s  . c  o m
    // I don't want the close being logged -- least, not w/o log of
    // an opening (and that'd be a little silly for simple script
    // like this). Currently, it logs at level INFO so that close
    // of files gets written to log files.  Up the log level just
    // for the close.
    Logger l = Logger.getLogger(writer.getClass().getName());
    Level oldLevel = l.getLevel();
    try {
        l.setLevel(Level.WARNING);
        for (final Iterator i = reader.iterator(); i.hasNext();) {
            WARCRecord r = (WARCRecord) i.next();
            if (!isARCType(r.getHeader().getMimetype())) {
                continue;
            }
            if (r.getHeader().getContentBegin() <= 0) {
                // Otherwise, because length include Header-Line and
                // Named Fields, these will end up in the ARC unless there
                // is a non-zero content begin.
                continue;
            }
            String ip = (String) r.getHeader().getHeaderValue((WARCConstants.HEADER_KEY_IP));
            long length = r.getHeader().getLength();
            int offset = r.getHeader().getContentBegin();
            // This mimetype is not exactly what you'd expect to find in
            // an ARC though technically its 'correct'.  To get right one,
            // need to parse the HTTP Headers.  Thats messy.  Not doing for
            // now.
            String mimetype = r.getHeader().getMimetype();
            // Clean out ISO time string '-', 'T', ':', and 'Z' characters.
            String t = r.getHeader().getDate().replaceAll("[-T:Z]", "");
            long time = ArchiveUtils.getSecondsSinceEpoch(t).getTime();
            writer.write(r.getHeader().getUrl(), mimetype, ip, time, (int) (length - offset), r);
        }
    } finally {
        if (reader != null) {
            reader.close();
        }
        if (writer != null) {
            try {
                writer.close();
            } finally {
                l.setLevel(oldLevel);
            }
        }
    }
}

From source file:net.openhft.chronicle.logger.jul.JulTestBase.java

protected void testChronicleConfiguration(String testId, Class<? extends Handler> expectedHandlerType)
        throws IOException {

    setupLogManager(testId);// ww w . jav  a2s. co m
    Logger logger = Logger.getLogger(testId);

    assertEquals(Level.INFO, logger.getLevel());
    assertFalse(logger.getUseParentHandlers());
    assertNull(logger.getFilter());
    assertNotNull(logger.getHandlers());
    assertEquals(1, logger.getHandlers().length);

    assertTrue(logger.getHandlers()[0].getClass() == expectedHandlerType);
}

From source file:com.googlecode.fightinglayoutbugs.FightingLayoutBugs.java

private void setLogLevelToDebug() {
    String name = FightingLayoutBugs.class.getPackage().getName();
    final Log log = LogFactory.getLog(name);
    if (log instanceof Jdk14Logger || (log instanceof AvalonLogger
            && ((AvalonLogger) log).getLogger() instanceof org.apache.avalon.framework.logger.Jdk14Logger)) {
        final Logger logger = Logger.getLogger(name);
        final Level originalLevel = logger.getLevel();
        logger.setLevel(Level.FINE);
        _runAfterAnalysis.add(new Runnable() {
            @Override/* www .  j a  v  a 2  s . c  o  m*/
            public void run() {
                logger.setLevel(originalLevel);
            }
        });
        enableDebugOutputToConsole(logger);
    } else if (log instanceof Log4JLogger || (log instanceof AvalonLogger
            && ((AvalonLogger) log).getLogger() instanceof org.apache.avalon.framework.logger.Log4JLogger)) {
        final org.apache.log4j.Logger logger = LogManager.getLogger(name);
        final org.apache.log4j.Level originalLevel = logger.getLevel();
        logger.setLevel(org.apache.log4j.Level.DEBUG);
        _runAfterAnalysis.add(new Runnable() {
            @Override
            public void run() {
                logger.setLevel(originalLevel);
            }
        });
    } else if (log instanceof LogKitLogger || (log instanceof AvalonLogger
            && ((AvalonLogger) log).getLogger() instanceof org.apache.avalon.framework.logger.LogKitLogger)) {
        final org.apache.log.Logger logger = LogKit.getLoggerFor(name);
        final Priority originalLevel = logger.getPriority();
        logger.setPriority(Priority.DEBUG);
        _runAfterAnalysis.add(new Runnable() {
            @Override
            public void run() {
                logger.setPriority(originalLevel);
            }
        });
    } else if (log instanceof SimpleLog) {
        final SimpleLog simpleLog = (SimpleLog) log;
        final int originalLevel = simpleLog.getLevel();
        simpleLog.setLevel(SimpleLog.LOG_LEVEL_DEBUG);
        _runAfterAnalysis.add(new Runnable() {
            @Override
            public void run() {
                simpleLog.setLevel(originalLevel);
            }
        });
    }
}

From source file:com.github.stephenc.mongodb.maven.StartMongoMojo.java

public void execute() throws MojoExecutionException, MojoFailureException {
    if (skip) {//  www.  j  a  v a 2  s .  c  om
        getLog().info("Skipping mongodb: mongodb.skip==true");
        return;
    }
    if (installation == null) {
        getLog().info("Using mongod from PATH");
    } else {
        getLog().info("Using mongod installed in " + installation);
    }
    getLog().info("Using database root of " + databaseRoot);
    final Logger mongoLogger = Logger.getLogger("com.mongodb");
    Level mongoLevel = mongoLogger.getLevel();
    try {
        mongoLogger.setLevel(Level.SEVERE);
        MongoOptions opts = new MongoOptions();
        opts.autoConnectRetry = false;
        opts.connectionsPerHost = 1;
        opts.connectTimeout = 50;
        opts.socketTimeout = 50;
        Mongo instance;
        try {
            instance = new Mongo(new ServerAddress("localhost", port), opts);
            List<String> databaseNames = instance.getDatabaseNames();
            throw new MojoExecutionException("Port " + port
                    + " is already running a MongoDb instance with the following databases " + databaseNames);
        } catch (MongoException.Network e) {
            // fine... no instance running
        } catch (MongoException e) {
            throw new MojoExecutionException("Port " + port + " is already running a MongoDb instance");
        } catch (UnknownHostException e) {
            // ignore... localhost is always known!
        }
    } finally {
        mongoLogger.setLevel(mongoLevel);
    }

    CommandLine commandLine = null;
    if (installation != null && installation.isDirectory()) {
        File bin = new File(installation, "bin");
        File exe = new File(bin, Os.isFamily(Os.FAMILY_WINDOWS) ? "mongod.exe" : "mongod");
        if (exe.isFile()) {
            commandLine = new CommandLine(exe);
        } else {
            throw new MojoExecutionException("Could not find mongo executables in specified installation: "
                    + installation + " expected to find " + exe + " but it does not exist.");
        }
    }
    if (commandLine == null) {
        commandLine = new CommandLine(Os.isFamily(Os.FAMILY_WINDOWS) ? "mongod.exe" : "mongod");
    }
    if (databaseRoot.isFile()) {
        throw new MojoExecutionException("Database root " + databaseRoot + " is a file and not a directory");
    }
    if (databaseRoot.isDirectory() && cleanDatabaseRoot) {
        getLog().info("Cleaning database root directory: " + databaseRoot);
        try {
            FileUtils.deleteDirectory(databaseRoot);
        } catch (IOException e) {
            throw new MojoExecutionException("Could not clean database root directory " + databaseRoot, e);
        }
    }
    if (!databaseRoot.isDirectory()) {
        getLog().debug("Creating database root directory: " + databaseRoot);
        if (!databaseRoot.mkdirs()) {
            throw new MojoExecutionException("Could not create database root directory " + databaseRoot);
        }
    }

    if (!verbose) {
        commandLine.addArgument("--quiet");
    }

    commandLine.addArgument("--logpath");
    commandLine.addArgument(logPath.getAbsolutePath());
    if (logAppend) {
        commandLine.addArgument("--logappend");
    }

    commandLine.addArgument(auth ? "--auth" : "--noauth");

    commandLine.addArgument("--port");
    commandLine.addArgument(Integer.toString(port));

    commandLine.addArgument("--dbpath");
    commandLine.addArgument(databaseRoot.getAbsolutePath());

    if (additionalArguments != null) {
        for (String aa : additionalArguments) {
            commandLine.addArgument(aa);
        }
    }

    Executor exec = new DefaultExecutor();
    DefaultExecuteResultHandler execHandler = new DefaultExecuteResultHandler();
    exec.setWorkingDirectory(databaseRoot);
    ProcessObserver processObserver = new ProcessObserver(new ShutdownHookProcessDestroyer());
    exec.setProcessDestroyer(processObserver);

    LogOutputStream stdout = new MavenLogOutputStream(getLog());
    LogOutputStream stderr = new MavenLogOutputStream(getLog());

    getLog().info("Executing command line: " + commandLine);
    exec.setStreamHandler(new PumpStreamHandler(stdout, stderr));
    try {
        exec.execute(commandLine, execHandler);
        getLog().info("Waiting for MongoDB to start...");
        long timeout = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(120);
        mongoLevel = mongoLogger.getLevel();
        try {
            mongoLogger.setLevel(Level.SEVERE);
            while (System.currentTimeMillis() < timeout && !execHandler.hasResult()) {
                MongoOptions opts = new MongoOptions();
                opts.autoConnectRetry = false;
                opts.connectionsPerHost = 1;
                opts.connectTimeout = 250;
                opts.socketTimeout = 250;
                Mongo instance;
                try {
                    instance = new Mongo(new ServerAddress("localhost", port), opts);
                    List<String> databaseNames = instance.getDatabaseNames();
                    getLog().info("MongoDb started.");
                    getLog().info("Databases: " + databaseNames);
                } catch (MongoException.Network e) {
                    // ignore, wait and try again
                    try {
                        Thread.sleep(50);
                    } catch (InterruptedException e1) {
                        // ignore
                    }
                    continue;
                } catch (MongoException e) {
                    getLog().info("MongoDb started.");
                    getLog().info("Unable to list databases due to " + e.getMessage());
                }
                break;
            }
        } finally {
            mongoLogger.setLevel(mongoLevel);
        }
        if (execHandler.hasResult()) {
            ExecuteException exception = execHandler.getException();
            if (exception != null) {
                throw new MojoFailureException(exception.getMessage(), exception);
            }
            throw new MojoFailureException(
                    "Command " + commandLine + " exited with exit code " + execHandler.getExitValue());
        }
        Map pluginContext = session.getPluginContext(getPluginDescriptor(), project);
        pluginContext.put(ProcessObserver.class.getName() + ":" + Integer.toString(port), processObserver);
    } catch (IOException e) {
        throw new MojoExecutionException(e.getMessage(), e);
    }

}

From source file:alma.acs.logging.AcsLogger.java

/**
 * Logs the given <code>LogRecord</code>. 
 * The record can be modified or dropped by the optional filters provided in {@link #addLogRecordFilter(alma.acs.logging.AcsLogger.LogRecordFilter)}. 
 * <p>/*ww  w.  jav a  2 s  . c  om*/
 * Adding of context information:
 * <ul>
 * <li> If the LogRecord has a parameter that is a map which contains additional information 
 * about the line of code, thread, etc., the log record will be taken as provided, and no context
 * information will be added. This can be useful if
 *   <ul>
 *   <li> the log record was reconstructed from a remote error by the ACS error handling code
 *        (see <code>AcsJException</code>), or
 *   <li> if in very exceptional cases application code needs to manipulate such information by hand.
 *   </ul>
 * <li> otherwise, context information is inferred, similar to {@link LogRecord#inferCaller()},
 *   but additionally including thread name and line of code.
 * </ul>  
 * Note that by overloading this method, we intercept all logging activities of the base class.
 *  
 * @see java.util.logging.Logger#log(java.util.logging.LogRecord)
 */
public void log(LogRecord record) {
    // Throw exception if level OFF was used to log this record, see http://jira.alma.cl/browse/COMP-1928
    // Both Level.OFF and AcsLogLevel.OFF use the same value INTEGER.max, but we anyway check for both.
    if (record.getLevel().intValue() == Level.OFF.intValue()
            || record.getLevel().intValue() == AcsLogLevel.OFF.intValue()) {
        throw new IllegalArgumentException(
                "Level OFF must not be used for actual logging, but only for level filtering.");
    }

    StopWatch sw_all = null;
    if (PROFILE) {
        sw_all = new StopWatch(null);
    }

    // Level could be null and must then be inherited from the ancestor loggers, 
    // e.g. during JDK shutdown when the log level is nulled by the JDK LogManager 
    Logger loggerWithLevel = this;
    while (loggerWithLevel != null && loggerWithLevel.getLevel() == null
            && loggerWithLevel.getParent() != null) {
        loggerWithLevel = loggerWithLevel.getParent();
    }
    int levelValue = -1;
    if (loggerWithLevel.getLevel() == null) {
        // HSO 2007-09-05: With ACS 6.0.4 the OMC uses this class (previously plain JDK logger) and has reported 
        // that no level was found, which yielded a NPE. To be investigated further. 
        // Probably #createUnconfiguredLogger was used without setting parent logger nor log level. 
        // Just to be safe I add the necessary checks and warning message that improve over a NPE.
        if (!noLevelWarningPrinted) {
            System.out.println("Logger configuration error: no log level found for logger " + getLoggerName()
                    + " or its ancestors. Will use Level.ALL instead.");
            noLevelWarningPrinted = true;
        }
        // @TODO: decide if resorting to ALL is desirable, or to use schema defaults, INFO, etc
        levelValue = Level.ALL.intValue();
    } else {
        // level is fine, reset the flag to print the error message again when log level is missing.
        noLevelWarningPrinted = false;
        levelValue = loggerWithLevel.getLevel().intValue();
    }

    // filter by log level to avoid unnecessary retrieval of context data.
    // The same check will be repeated by the base class implementation of this method that gets called afterwards.
    if (record.getLevel().intValue() < levelValue || levelValue == offValue) {
        return;
    }

    // modify the logger name if necessary
    if (loggerName != null) {
        record.setLoggerName(loggerName);
    }

    // check if this record already has the context data attached which ACS needs but the JDK logging API does not provide
    LogParameterUtil paramUtil = new LogParameterUtil(record);
    Map<String, Object> specialProperties = paramUtil.extractSpecialPropertiesMap();

    if (specialProperties == null) {
        // we prepend the special properties map to the other parameters
        specialProperties = LogParameterUtil.createPropertiesMap();
        List<Object> paramList = paramUtil.getNonSpecialPropertiesMapParameters();
        paramList.add(0, specialProperties);
        record.setParameters(paramList.toArray());

        String threadName = Thread.currentThread().getName();
        specialProperties.put(LogParameterUtil.PARAM_THREAD_NAME, threadName);

        specialProperties.put(LogParameterUtil.PARAM_PROCESSNAME, this.processName);
        specialProperties.put(LogParameterUtil.PARAM_SOURCEOBJECT, this.sourceObject);

        // Get the stack trace
        StackTraceElement stack[] = (new Throwable()).getStackTrace();
        // search for the first frame before the "Logger" class.
        int ix = 0;
        boolean foundNonLogFrame = false;
        while (ix < stack.length) {
            StackTraceElement frame = stack[ix];
            String cname = frame.getClassName();
            if (!foundNonLogFrame && !loggerClassNames.contains(cname)) {
                // We've found the relevant frame.
                record.setSourceClassName(cname);
                record.setSourceMethodName(frame.getMethodName());
                int lineNumber = frame.getLineNumber();
                specialProperties.put(LogParameterUtil.PARAM_LINE, Long.valueOf(lineNumber));
                foundNonLogFrame = true;
                if (this.callStacksToBeIgnored.isEmpty()) {
                    break; // performance optimization: avoid checking all "higher" stack frames
                }
            }
            if (foundNonLogFrame) {
                if (callStacksToBeIgnored.contains(concatenateIgnoreLogData(cname, frame.getMethodName()))) {
                    //System.out.println("Won't log record with message " + record.getMessage());
                    return;
                }
            }
            ix++;
        }
        // We haven't found a suitable frame, so just punt. This is
        // OK as we are only committed to making a "best effort" here.
    }

    StopWatch sw_afterAcsLogger = null;
    if (PROFILE) {
        sw_afterAcsLogger = sw_all.createStopWatchForSubtask("afterAcsLogger");
        LogParameterUtil logParamUtil = new LogParameterUtil(record);
        logParamUtil.setStopWatch(sw_afterAcsLogger);
    }

    // Let the delegate or Logger base class handle the rest.
    if (delegate != null) {
        delegate.log(record);
    } else {
        super.log(record);
    }

    if (PROFILE) {
        sw_afterAcsLogger.stop();
        sw_all.stop();
        long elapsedNanos = sw_all.getLapTimeNanos();
        if (profileSlowestCallStopWatch == null
                || profileSlowestCallStopWatch.getLapTimeNanos() < elapsedNanos) {
            profileSlowestCallStopWatch = sw_all;
        }
        profileLogTimeStats.addValue(elapsedNanos);
        if (profileLogTimeStats.getN() >= profileStatSize) {
            String msg = "Local logging times in ms for the last " + profileStatSize + " logs: ";
            msg += "mean=" + profileMillisecFormatter.format(profileLogTimeStats.getMean() * 1E-6);
            msg += ", median=" + profileMillisecFormatter.format(profileLogTimeStats.getPercentile(50) * 1E-6);
            msg += ", stdev="
                    + profileMillisecFormatter.format(profileLogTimeStats.getStandardDeviation() * 1E-6);
            msg += "; details of slowest log (from ";
            msg += IsoDateFormat.formatDate(profileSlowestCallStopWatch.getStartTime()) + "): ";
            msg += profileSlowestCallStopWatch.getSubtaskDetails();
            System.out.println(msg);
            profileSlowestCallStopWatch = null;
            profileLogTimeStats.clear();
        }
    }
}