Example usage for org.apache.commons.logging Log info

List of usage examples for org.apache.commons.logging Log info

Introduction

In this page you can find the example usage for org.apache.commons.logging Log info.

Prototype

void info(Object message);

Source Link

Document

Logs a message with info log level.

Usage

From source file:org.apache.hadoop.hdfs.TestRaidDfs.java

public static void waitForFileRaided(Log logger, FileSystem fileSys, Path file, Path destPath,
        short targetReplication) throws IOException, InterruptedException {
    FileStatus parityStat = null;/*from   w w  w  .  ja v  a  2  s.  com*/
    String fileName = file.getName().toString();
    long startTime = System.currentTimeMillis();
    // wait till file is raided
    while (parityStat == null && System.currentTimeMillis() - startTime < 120000) {
        logger.info("Waiting for files to be raided.");
        try {
            FileStatus[] listPaths = fileSys.listStatus(destPath);
            if (listPaths != null) {
                for (FileStatus f : listPaths) {
                    logger.info("File raided so far : " + f.getPath());
                    String found = f.getPath().getName().toString();
                    if (fileName.equals(found)) {
                        parityStat = f;
                        break;
                    }
                }
            }
        } catch (FileNotFoundException e) {
            //ignore
        }
        Thread.sleep(1000); // keep waiting
    }

    while (true) {
        LocatedBlocks locations = null;
        DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
        locations = RaidDFSUtil.getBlockLocations(dfs, file.toUri().getPath(), 0, parityStat.getLen());
        if (!locations.isUnderConstruction()) {
            break;
        }
        Thread.sleep(1000);
    }

    while (true) {
        FileStatus stat = fileSys.getFileStatus(file);
        if (stat.getReplication() == targetReplication)
            break;
        Thread.sleep(1000);
    }
}

From source file:org.apache.hadoop.hdfs.TestRaidDfs.java

public static void waitForDirRaided(Log logger, FileSystem fileSys, Path file, Path destPath,
        short targetReplication, long waitMillis) throws IOException, InterruptedException {
    FileStatus parityStat = null;//  w  w  w.j  av a 2  s  .  c  om
    String fileName = file.getName().toString();
    long startTime = System.currentTimeMillis();
    FileStatus srcStat = fileSys.getFileStatus(file);
    // wait till file is raided
    while (parityStat == null && System.currentTimeMillis() - startTime < waitMillis) {
        logger.info("Waiting for files to be raided.");
        try {
            FileStatus[] listPaths = fileSys.listStatus(destPath);
            if (listPaths != null) {
                for (FileStatus f : listPaths) {
                    logger.info("File raided so far : " + f.getPath());
                    String found = f.getPath().getName().toString();
                    if (fileName.equals(found) && srcStat.getModificationTime() == f.getModificationTime()) {
                        parityStat = f;
                        break;
                    }
                }
            }
        } catch (FileNotFoundException e) {
            //ignore
        }
        Thread.sleep(1000); // keep waiting
    }
    assertTrue("Parity file is not generated", parityStat != null);
    assertEquals(srcStat.getModificationTime(), parityStat.getModificationTime());
    for (FileStatus stat : fileSys.listStatus(file)) {
        assertEquals(stat.getReplication(), targetReplication);
    }
}

From source file:org.apache.hadoop.hdfs.web.WebHdfsFileSystem.java

/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
    final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
            DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
    log.info(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY + " = " + b);
    return b;/*from   www.  ja  va 2 s  .com*/
}

From source file:org.apache.hadoop.hive.ql.exec.ExecDriver.java

public static void main(String[] args) throws IOException, HiveException {

    String planFileName = null;//w w w .  j  a  va 2  s . c  o  m
    ArrayList<String> jobConfArgs = new ArrayList<String>();
    boolean noLog = false;
    String files = null;
    boolean localtask = false;
    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-plan")) {
                planFileName = args[++i];
            } else if (args[i].equals("-jobconf")) {
                jobConfArgs.add(args[++i]);
            } else if (args[i].equals("-nolog")) {
                noLog = true;
            } else if (args[i].equals("-files")) {
                files = args[++i];
            } else if (args[i].equals("-localtask")) {
                localtask = true;
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }

    JobConf conf;
    if (localtask) {
        conf = new JobConf(MapredLocalTask.class);
    } else {
        conf = new JobConf(ExecDriver.class);
    }
    StringBuilder sb = new StringBuilder("JobConf:\n");

    for (String one : jobConfArgs) {
        int eqIndex = one.indexOf('=');
        if (eqIndex != -1) {
            try {
                String key = one.substring(0, eqIndex);
                String value = URLDecoder.decode(one.substring(eqIndex + 1), "UTF-8");
                conf.set(key, value);
                sb.append(key).append("=").append(value).append("\n");
            } catch (UnsupportedEncodingException e) {
                System.err.println(
                        "Unexpected error " + e.getMessage() + " while encoding " + one.substring(eqIndex + 1));
                System.exit(3);
            }
        }
    }

    if (files != null) {
        conf.set("tmpfiles", files);
    }

    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);

    if (noLog) {
        // If started from main(), and noLog is on, we should not output
        // any logs. To turn the log on, please set -Dtest.silent=false
        BasicConfigurator.resetConfiguration();
        BasicConfigurator.configure(new NullAppender());
    } else {
        setupChildLog4j(conf);
    }

    Log LOG = LogFactory.getLog(ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);

    if (planFileName == null) {
        console.printError("Must specify Plan File Name");
        printUsage();
    }

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : Collections
            .list((Enumeration<Appender>) LogManager.getRootLogger().getAllAppenders())) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
        }
    }

    // log the list of job conf parameters for reference
    LOG.info(sb.toString());

    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);

    // this is workaround for hadoop-17 - libjars are not added to classpath of the
    // child process. so we add it here explicitly

    String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
    String addedJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEADDEDJARS);
    try {
        // see also - code in CliDriver.java
        ClassLoader loader = conf.getClassLoader();
        if (StringUtils.isNotBlank(auxJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
        }
        if (StringUtils.isNotBlank(addedJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(addedJars, ","));
        }
        conf.setClassLoader(loader);
        // Also set this to the Thread ContextClassLoader, so new threads will
        // inherit
        // this class loader, and propagate into newly created Configurations by
        // those
        // new threads.
        Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
        memoryMXBean = ManagementFactory.getMemoryMXBean();
        MapredLocalWork plan = Utilities.deserializeMapRedLocalWork(pathData, conf);
        MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
        ret = ed.executeFromChildJVM(new DriverContext());

    } else {
        MapredWork plan = Utilities.deserializeMapRedWork(pathData, conf);
        ExecDriver ed = new ExecDriver(plan, conf, isSilent);
        ret = ed.execute(new DriverContext());
    }

    if (ret != 0) {
        System.exit(2);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.JoinOperator.java

/**
 * This is a similar implementation of FileSinkOperator.moveFileToFinalPath.
 * @param specPath//  w w  w  .j  a va2s.  c o m
 * @param hconf
 * @param success
 * @param log
 * @throws IOException
 * @throws HiveException
 */
private void mvFileToFinalPath(Path specPath, Configuration hconf, boolean success, Log log)
        throws IOException, HiveException {

    FileSystem fs = specPath.getFileSystem(hconf);
    Path tmpPath = Utilities.toTempPath(specPath);
    Path intermediatePath = new Path(tmpPath.getParent(), tmpPath.getName() + ".intermediate");
    if (success) {
        if (fs.exists(tmpPath)) {
            // Step1: rename tmp output folder to intermediate path. After this
            // point, updates from speculative tasks still writing to tmpPath
            // will not appear in finalPath.
            log.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath);
            Utilities.rename(fs, tmpPath, intermediatePath);
            // Step2: remove any tmp file or double-committed output files
            Utilities.removeTempOrDuplicateFiles(fs, intermediatePath);
            // Step3: move to the file destination
            log.info("Moving tmp dir: " + intermediatePath + " to: " + specPath);
            Utilities.renameOrMoveFiles(fs, intermediatePath, specPath);
        }
    } else {
        fs.delete(tmpPath, true);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator.java

public void initializeMapredLocalWork(MapJoinDesc mjConf, Configuration hconf, MapredLocalWork localWork,
        Log l4j) throws HiveException {
    if (localWork == null || localWorkInited) {
        return;//from www .j ava2s.  co m
    }
    localWorkInited = true;
    this.localWork = localWork;
    aliasToMergeQueue = new HashMap<String, MergeQueue>();

    // create map local operators
    Map<String, FetchWork> aliasToFetchWork = localWork.getAliasToFetchWork();
    Map<String, Operator<? extends OperatorDesc>> aliasToWork = localWork.getAliasToWork();
    Map<String, DummyStoreOperator> aliasToSinkWork = conf.getAliasToSink();

    // The operator tree till the sink operator needs to be processed while
    // fetching the next row to fetch from the priority queue (possibly containing
    // multiple files in the small table given a file in the big table). The remaining
    // tree will be processed while processing the join.
    // Look at comments in DummyStoreOperator for additional explanation.
    for (Map.Entry<String, FetchWork> entry : aliasToFetchWork.entrySet()) {
        String alias = entry.getKey();
        FetchWork fetchWork = entry.getValue();

        JobConf jobClone = new JobConf(hconf);

        TableScanOperator ts = (TableScanOperator) aliasToWork.get(alias);
        // push down projections
        ColumnProjectionUtils.appendReadColumns(jobClone, ts.getNeededColumnIDs(), ts.getNeededColumns());
        // push down filters
        HiveInputFormat.pushFilters(jobClone, ts);

        ts.passExecContext(getExecContext());

        FetchOperator fetchOp = new FetchOperator(fetchWork, jobClone);
        ts.initialize(jobClone, new ObjectInspector[] { fetchOp.getOutputObjectInspector() });
        fetchOp.clearFetchContext();

        DummyStoreOperator sinkOp = aliasToSinkWork.get(alias);

        MergeQueue mergeQueue = new MergeQueue(alias, fetchWork, jobClone, ts, sinkOp);

        aliasToMergeQueue.put(alias, mergeQueue);
        l4j.info("fetch operators for " + alias + " initialized");
    }
}

From source file:org.apache.hadoop.hive.ql.io.rcfile.merge.BlockMergeTask.java

public static void main(String[] args) {

    ArrayList<String> jobConfArgs = new ArrayList<String>();

    String inputPathStr = null;//from  www  . j  av a2 s.  c  o  m
    String outputDir = null;

    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-input")) {
                inputPathStr = args[++i];
            } else if (args[i].equals("-jobconf")) {
                jobConfArgs.add(args[++i]);
            } else if (args[i].equals("-outputDir")) {
                outputDir = args[++i];
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }

    if (inputPathStr == null || outputDir == null || outputDir.trim().equals("")) {
        printUsage();
    }

    List<String> inputPaths = new ArrayList<String>();
    String[] paths = inputPathStr.split(INPUT_SEPERATOR);
    if (paths == null || paths.length == 0) {
        printUsage();
    }

    FileSystem fs = null;
    JobConf conf = new JobConf(BlockMergeTask.class);
    for (String path : paths) {
        try {
            Path pathObj = new Path(path);
            if (fs == null) {
                fs = FileSystem.get(pathObj.toUri(), conf);
            }
            FileStatus fstatus = fs.getFileStatus(pathObj);
            if (fstatus.isDir()) {
                FileStatus[] fileStatus = fs.listStatus(pathObj);
                for (FileStatus st : fileStatus) {
                    inputPaths.add(st.getPath().toString());
                }
            } else {
                inputPaths.add(fstatus.getPath().toString());
            }
        } catch (IOException e) {
            e.printStackTrace(System.err);
        }
    }

    StringBuilder sb = new StringBuilder("JobConf:\n");

    for (String one : jobConfArgs) {
        int eqIndex = one.indexOf('=');
        if (eqIndex != -1) {
            try {
                String key = one.substring(0, eqIndex);
                String value = URLDecoder.decode(one.substring(eqIndex + 1), "UTF-8");
                conf.set(key, value);
                sb.append(key).append("=").append(value).append("\n");
            } catch (UnsupportedEncodingException e) {
                System.err.println(
                        "Unexpected error " + e.getMessage() + " while encoding " + one.substring(eqIndex + 1));
                System.exit(3);
            }
        }
    }
    HiveConf hiveConf = new HiveConf(conf, BlockMergeTask.class);

    Log LOG = LogFactory.getLog(BlockMergeTask.class.getName());
    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);
    LogHelper console = new LogHelper(LOG, isSilent);

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : Collections
            .list((Enumeration<Appender>) LogManager.getRootLogger().getAllAppenders())) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
        }
    }

    // log the list of job conf parameters for reference
    LOG.info(sb.toString());

    MergeWork mergeWork = new MergeWork(inputPaths, outputDir);
    DriverContext driverCxt = new DriverContext();
    BlockMergeTask taskExec = new BlockMergeTask();
    taskExec.initialize(hiveConf, null, driverCxt);
    taskExec.setWork(mergeWork);
    int ret = taskExec.execute(driverCxt);

    if (ret != 0) {
        System.exit(2);
    }

}

From source file:org.apache.hadoop.mapred.Counters.java

/**
 * Logs the current counter values.//  www .jav a  2  s.  c om
 * @param log The log to use.
 */
public void log(Log log) {
    log.info("Counters: " + size());
    for (Group group : this) {
        log.info("  " + group.getDisplayName());
        for (Counter counter : group) {
            log.info("    " + counter.getDisplayName() + "=" + counter.getCounter());
        }
    }
}

From source file:org.apache.hadoop.test.TestGenericTestUtils.java

@Test(timeout = 10000)
public void testLogCapturer() {
    final Log log = LogFactory.getLog(TestGenericTestUtils.class);
    LogCapturer logCapturer = LogCapturer.captureLogs(log);
    final String infoMessage = "info message";
    // test get output message
    log.info(infoMessage);
    assertTrue(logCapturer.getOutput().endsWith(String.format(infoMessage + "%n")));
    // test clear output
    logCapturer.clearOutput();// w  w  w  . j av a2  s  .c  o m
    assertTrue(logCapturer.getOutput().isEmpty());
    // test stop capturing
    logCapturer.stopCapturing();
    log.info(infoMessage);
    assertTrue(logCapturer.getOutput().isEmpty());
}

From source file:org.apache.hadoop.tools.Logalyzer.java

public static void main(String[] args) {

    Log LOG = LogFactory.getLog(Logalyzer.class);

    String version = "Logalyzer.0.0.1";
    String usage = "Usage: Logalyzer [-archive -logs <urlsFile>] " + "-archiveDir <archiveDirectory> "
            + "-grep <pattern> -sort <column1,column2,...> -separator <separator> "
            + "-analysis <outputDirectory>";

    System.out.println(version);/*from   w  ww .  j  ava2 s .  c o  m*/
    if (args.length == 0) {
        System.err.println(usage);
        System.exit(-1);
    }

    //Command line arguments
    boolean archive = false;
    boolean grep = false;
    boolean sort = false;

    String archiveDir = "";
    String logListURI = "";
    String grepPattern = ".*";
    String sortColumns = "";
    String columnSeparator = " ";
    String outputDirectory = "";

    for (int i = 0; i < args.length; i++) { // parse command line
        if (args[i].equals("-archive")) {
            archive = true;
        } else if (args[i].equals("-archiveDir")) {
            archiveDir = args[++i];
        } else if (args[i].equals("-grep")) {
            grep = true;
            grepPattern = args[++i];
        } else if (args[i].equals("-logs")) {
            logListURI = args[++i];
        } else if (args[i].equals("-sort")) {
            sort = true;
            sortColumns = args[++i];
        } else if (args[i].equals("-separator")) {
            columnSeparator = args[++i];
        } else if (args[i].equals("-analysis")) {
            outputDirectory = args[++i];
        }
    }

    LOG.info("analysisDir = " + outputDirectory);
    LOG.info("archiveDir = " + archiveDir);
    LOG.info("logListURI = " + logListURI);
    LOG.info("grepPattern = " + grepPattern);
    LOG.info("sortColumns = " + sortColumns);
    LOG.info("separator = " + columnSeparator);

    try {
        Logalyzer logalyzer = new Logalyzer();

        // Archive?
        if (archive) {
            logalyzer.doArchive(logListURI, archiveDir);
        }

        // Analyze?
        if (grep || sort) {
            logalyzer.doAnalyze(archiveDir, outputDirectory, grepPattern, sortColumns, columnSeparator);
        }
    } catch (IOException ioe) {
        ioe.printStackTrace();
        System.exit(-1);
    }

}