Example usage for org.apache.commons.logging Log debug

List of usage examples for org.apache.commons.logging Log debug

Introduction

In this page you can find the example usage for org.apache.commons.logging Log debug.

Prototype

void debug(Object message);

Source Link

Document

Logs a message with debug log level.

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault.java

/**
 * Determine if a node is a good target. 
 * //from w w w. j av a 2s  .c  om
 * @param node The target node
 * @param blockSize Size of block
 * @param maxTargetPerLoc Maximum number of targets per rack. The value of 
 *                        this parameter depends on the number of racks in 
 *                        the cluster and total number of replicas for a block
 * @param considerLoad whether or not to consider load of the target node
 * @param results A list containing currently chosen nodes. Used to check if 
 *                too many nodes has been chosen in the target rack. 
 * @param avoidStaleNodes Whether or not to avoid choosing stale nodes.               
 * @return Return true if <i>node</i> has enough space, 
 *         does not have too much load, 
 *         and the rack does not have too many nodes.
 */
protected boolean isGoodTarget(DatanodeDescriptor node, long blockSize, int maxTargetPerLoc,
        boolean considerLoad, List<DatanodeDescriptor> results, boolean avoidStaleNodes) {
    Log logr = FSNamesystem.LOG;
    // check if the node is (being) decommissed
    if (node.isDecommissionInProgress() || node.isDecommissioned()) {
        logr.debug(
                "Node " + NodeBase.getPath(node) + " is not chosen because the node is (being) decommissioned");
        return false;
    }

    if (avoidStaleNodes) {
        if (node.isStale(this.staleInterval)) {
            logr.debug("Node " + NodeBase.getPath(node) + " is not chosen because the node is (being) stale");
            return false;
        }
    }

    long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize);
    // check the remaining capacity of the target machine
    if (blockSize * FSConstants.MIN_BLOCKS_FOR_WRITE > remaining) {
        logr.debug("Node " + NodeBase.getPath(node)
                + " is not chosen because the node does not have enough space");
        return false;
    }

    // check the communication traffic of the target machine
    if (considerLoad) {
        double avgLoad = 0;
        int size = clusterMap.getNumOfLeaves();
        if (size != 0 && stats != null) {
            avgLoad = (double) stats.getTotalLoad() / size;
        }
        if (node.getXceiverCount() > (2.0 * avgLoad)) {
            logr.debug("Node " + NodeBase.getPath(node) + " is not chosen because the node is too busy");
            return false;
        }
    }

    // check if the target rack has chosen too many nodes
    String rackname = node.getNetworkLocation();
    int counter = 1;
    for (Iterator<DatanodeDescriptor> iter = results.iterator(); iter.hasNext();) {
        Node result = iter.next();
        if (rackname.equals(result.getNetworkLocation())) {
            counter++;
        }
    }
    if (counter > maxTargetPerLoc) {
        logr.debug(
                "Node " + NodeBase.getPath(node) + " is not chosen because the rack has too many chosen nodes");
        return false;
    }
    return true;
}

From source file:org.apache.hadoop.hive.ql.exec.mr.Throttle.java

/**
 * Fetch http://tracker.om:/gc.jsp?threshold=period.
 *//*w ww  .j  a  v a2s .  c om*/
public static void checkJobTracker(JobConf conf, Log LOG) {

    try {
        byte[] buffer = new byte[1024];
        int threshold = conf.getInt("mapred.throttle.threshold.percent", DEFAULT_MEMORY_GC_PERCENT);
        int retry = conf.getInt("mapred.throttle.retry.period", DEFAULT_RETRY_PERIOD);

        // If the threshold is 100 percent, then there is no throttling
        if (threshold == 100) {
            return;
        }

        // This is the Job Tracker URL
        String tracker = JobTrackerURLResolver.getURL(conf) + "/gc.jsp?threshold=" + threshold;

        while (true) {
            // read in the first 1K characters from the URL
            URL url = new URL(tracker);
            LOG.debug("Throttle: URL " + tracker);
            InputStream in = null;
            try {
                in = url.openStream();
                in.read(buffer);
                in.close();
                in = null;
            } finally {
                IOUtils.closeStream(in);
            }
            String fetchString = new String(buffer);

            // fetch the xml tag <dogc>xxx</dogc>
            Pattern dowait = Pattern.compile("<dogc>",
                    Pattern.CASE_INSENSITIVE | Pattern.DOTALL | Pattern.MULTILINE);
            String[] results = dowait.split(fetchString);
            if (results.length != 2) {
                throw new IOException(
                        "Throttle: Unable to parse response of URL " + url + ". Get retuned " + fetchString);
            }
            dowait = Pattern.compile("</dogc>", Pattern.CASE_INSENSITIVE | Pattern.DOTALL | Pattern.MULTILINE);
            results = dowait.split(results[1]);
            if (results.length < 1) {
                throw new IOException(
                        "Throttle: Unable to parse response of URL " + url + ". Get retuned " + fetchString);
            }

            // if the jobtracker signalled that the threshold is not exceeded,
            // then we return immediately.
            if (results[0].trim().compareToIgnoreCase("false") == 0) {
                return;
            }

            // The JobTracker has exceeded its threshold and is doing a GC.
            // The client has to wait and retry.
            LOG.warn("Job is being throttled because of resource crunch on the " + "JobTracker. Will retry in "
                    + retry + " seconds..");
            Thread.sleep(retry * 1000L);
        }
    } catch (Exception e) {
        LOG.warn("Job is not being throttled. " + e);
    }
}

From source file:org.apache.hadoop.mapred.AssignTasksHelper.java

/** General informations used by the assignTasks */
private void logGeneralInfos(Log log, TaskType type) {
    HelperForType helper = this.helper(type);
    StringBuilder builder = new StringBuilder("assignTasks(");
    builder.append(this.ttStatus.getTrackerName()).append(":").append(type).append(") mode: ");
    if (helper.doSizeBasedScheduling) {
        builder.append(helper.doTrainScheduling ? "T+SB" : "SB");
    } else {//  w  ww  . j a v  a 2 s. c  o  m
        builder.append(helper.doTrainScheduling ? "T" : "None");
    }

    if (helper.doSizeBasedScheduling || helper.doTrainScheduling) {
        builder.append(" maxT: " + helper.maxTrainSlots).append(" maxSB: " + helper.maxSizeBasedSlots)
                .append(" minTTasks: " + helper.numTrainTasksForJob)
                .append(" totAvailT: " + helper.totAvailableTrainSlots())
                .append(" totAvailSB: " + helper.totAvailableSizeBasedSlots());
    }
    log.debug(builder.toString());

}

From source file:org.apache.hadoop.mapred.AssignTasksHelper.java

private void logChanges(Log log, TaskType type) {
    HelperForType helper = this.helper(type);
    StringBuilder builder = new StringBuilder("assignTasks(");
    builder.append(this.ttStatus.getTrackerName()).append(":").append(type).append("):")
            .append(" totRunningTrainTasks: " + helper.startRunningTT + " -> " + helper.runningTrainTasks)
            .append(" totRunningSizeBasedTasks: " + helper.startRunningSBT + " -> "
                    + helper.runningSizeBasedTasks)
            .append(" tt.availableSlots: " + helper.startAvailableSlots + " -> " + helper.currAvailableSlots)
            .append(" actions: " + helper.actions);
    log.debug(builder.toString());
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerUtils.java

public static boolean isBlacklisted(FiCaSchedulerApp application, FiCaSchedulerNode node, Log LOG) {
    if (application.isBlacklisted(node.getNodeName())) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Skipping 'host' " + node.getNodeName() + " for " + application.getApplicationId()
                    + " since it has been blacklisted");
        }/*  w ww.  java2  s  . c  o  m*/
        return true;
    }

    if (application.isBlacklisted(node.getRackName())) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Skipping 'rack' " + node.getRackName() + " for " + application.getApplicationId()
                    + " since it has been blacklisted");
        }
        return true;
    }

    return false;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils.java

public static boolean isPlaceBlacklisted(SchedulerApplicationAttempt application, SchedulerNode node, Log log) {
    if (application.isPlaceBlacklisted(node.getNodeName())) {
        if (log.isDebugEnabled()) {
            log.debug("Skipping 'host' " + node.getNodeName() + " for " + application.getApplicationId()
                    + " since it has been blacklisted");
        }/*from www.j  a  va2  s.com*/
        return true;
    }

    if (application.isPlaceBlacklisted(node.getRackName())) {
        if (log.isDebugEnabled()) {
            log.debug("Skipping 'rack' " + node.getRackName() + " for " + application.getApplicationId()
                    + " since it has been blacklisted");
        }
        return true;
    }

    return false;
}

From source file:org.apache.hadoop.yarn.util.TestAdHocLogDumper.java

@Test
public void testDumpingSchedulerLogs() throws Exception {

    Map<Appender, Priority> levels = new HashMap<>();
    String logHierarchy = TestAdHocLogDumper.class.getName();
    String logFilename = "test.log";
    Log log = LogFactory.getLog(logHierarchy);
    if (log instanceof Log4JLogger) {
        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders.hasMoreElements();) {
            Object obj = appenders.nextElement();
            if (obj instanceof AppenderSkeleton) {
                AppenderSkeleton appender = (AppenderSkeleton) obj;
                levels.put(appender, appender.getThreshold());
            }//from w  w w  .j  a  va 2s  . com
        }
    }

    AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logFilename);
    dumper.dumpLogs("DEBUG", 1000);
    LOG.debug("test message 1");
    LOG.info("test message 2");
    File logFile = new File(logFilename);
    Assert.assertTrue(logFile.exists());
    Thread.sleep(2000);
    long lastWrite = logFile.lastModified();
    Assert.assertTrue(lastWrite < Time.now());
    Assert.assertTrue(logFile.length() != 0);

    // make sure levels are set back to their original values
    if (log instanceof Log4JLogger) {
        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders.hasMoreElements();) {
            Object obj = appenders.nextElement();
            if (obj instanceof AppenderSkeleton) {
                AppenderSkeleton appender = (AppenderSkeleton) obj;
                Assert.assertEquals(levels.get(appender), appender.getThreshold());
            }
        }
    }
    boolean del = logFile.delete();
    if (!del) {
        LOG.info("Couldn't clean up after test");
    }
}

From source file:org.apache.hawq.pxf.plugins.hdfs.HdfsAnalyzer.java

/**
 * Collects a number of basic statistics based on an estimate. Statistics
 * are: number of records, number of hdfs blocks and hdfs block size.
 *
 * @param datapath path is a data source URI that can appear as a file name,
 *            a directory name or a wildcard pattern
 * @return statistics in JSON format/*from   w w  w  .  j  a  v a 2  s  .co  m*/
 * @throws Exception if path is wrong, its metadata cannot be retrieved from
 *             file system, or if scanning the first block using the
 *             accessor failed
 */
@Override
public AnalyzerStats getEstimatedStats(String datapath) throws Exception {
    long blockSize = 0;
    long numberOfBlocks;
    long dataSize = 0;
    Path path = new Path(HdfsUtilities.absoluteDataPath(datapath));

    ArrayList<InputSplit> splits = getSplits(path);

    for (InputSplit split : splits) {
        FileSplit fsp = (FileSplit) split;
        dataSize += fsp.getLength();
        if (blockSize == 0) {
            Path filePath = fsp.getPath();
            FileStatus fileStatus = fs.getFileStatus(filePath);
            if (fileStatus.isFile()) {
                blockSize = fileStatus.getBlockSize();
            }
        }
    }

    // if no file is in path (only dirs), get default block size
    if (blockSize == 0) {
        blockSize = fs.getDefaultBlockSize(path);
    }
    numberOfBlocks = splits.size();

    /*
     * The estimate of the number of tuples in table is based on the
     * actual number of tuples in the first block, multiplied by its
     * size compared to the size of the whole data to be read.
     * The calculation:
     * Ratio of tuples to size = number of tuples in first block / first block size.
     * Total of tuples = ratio * number of blocks * total block size.
     */
    long numberOfTuplesInBlock = getNumberOfTuplesInBlock(splits);
    long numberOfTuples = 0;
    if (!splits.isEmpty()) {
        long blockLength = splits.get(0).getLength();
        numberOfTuples = (long) Math.floor((((double) numberOfTuplesInBlock / blockLength) * (dataSize)));
    }
    // AnalyzerStats stats = new AnalyzerStats(blockSize, numberOfBlocks,
    AnalyzerStats stats = new AnalyzerStats(blockSize, numberOfBlocks, numberOfTuples);

    // print files size to log when in debug level
    Log.debug(AnalyzerStats.dataToString(stats, path.toString()));

    return stats;
}

From source file:org.apache.hawq.pxf.plugins.hdfs.HdfsAnalyzer.java

/**
 * Calculates the number of tuples in a split (block). Reads one block from
 * HDFS. Exception during reading will filter upwards and handled in
 * AnalyzerResource/*w ww.jav a2  s  .  co  m*/
 */
private long getNumberOfTuplesInBlock(ArrayList<InputSplit> splits) throws Exception {
    long tuples = -1; /* default - if we are not able to read data */
    ReadAccessor accessor;

    if (splits.isEmpty()) {
        return 0;
    }

    /*
     * metadata information includes: file split's start, length and hosts
     * (locations).
     */
    FileSplit firstSplit = (FileSplit) splits.get(0);
    byte[] fragmentMetadata = HdfsUtilities.prepareFragmentMetadata(firstSplit);
    inputData.setFragmentMetadata(fragmentMetadata);
    inputData.setDataSource(firstSplit.getPath().toUri().getPath());
    accessor = ReadBridge.getFileAccessor(inputData);

    if (accessor.openForRead()) {
        tuples = 0;
        while (accessor.readNextObject() != null) {
            tuples++;
        }

        accessor.closeForRead();
    }
    Log.debug("number of tuples in first block: " + tuples);

    return tuples;
}

From source file:org.apache.hawq.pxf.service.rest.AnalyzerResource.java

@GET
@Path("getEstimatedStats")
@Produces("application/json")
public Response getEstimatedStats(@Context ServletContext servletContext, @Context final HttpHeaders headers,
        @QueryParam("path") String path) throws Exception {

    if (Log.isDebugEnabled()) {
        StringBuilder startmsg = new StringBuilder(
                "ANALYZER/getEstimatedStats started for path \"" + path + "\"");
        if (headers != null) {
            for (String header : headers.getRequestHeaders().keySet()) {
                startmsg.append(" Header: ").append(header).append(" Value: ")
                        .append(headers.getRequestHeader(header));
            }/*from   ww w  .  ja va  2 s. c  o m*/
        }
        Log.debug(startmsg);
    }

    /* Convert headers into a regular map */
    Map<String, String> params = convertToCaseInsensitiveMap(headers.getRequestHeaders());

    /* Store protocol level properties and verify */
    final ProtocolData protData = new ProtocolData(params);
    SecuredHDFS.verifyToken(protData, servletContext);

    /*
     * Analyzer is a special case in which it is hard to tell if user didn't
     * specify one, or specified a profile that doesn't include one, or it's
     * an actual protocol violation. Since we can only test protocol level
     * logic, we assume (like before) that it's a user error, which is the
     * case in most likelihood. When analyzer module is removed in the near
     * future, this assumption will go away with it.
     */
    if (protData.getAnalyzer() == null) {
        throw new IllegalArgumentException(
                "PXF 'Analyzer' class was not found. Please supply it in the LOCATION clause or use it in a PXF profile in order to run ANALYZE on this table");
    }

    /* Create an analyzer instance with API level parameters */
    final Analyzer analyzer = AnalyzerFactory.create(protData);

    /*
       * Function queries the pxf Analyzer for the data fragments of the resource
     * The fragments are returned in a string formatted in JSON    
     */
    String jsonOutput = AnalyzerStats.dataToJSON(analyzer.getEstimatedStats(path));

    return Response.ok(jsonOutput, MediaType.APPLICATION_JSON_TYPE).build();
}