Example usage for org.apache.commons.logging Log debug

List of usage examples for org.apache.commons.logging Log debug

Introduction

In this page you can find the example usage for org.apache.commons.logging Log debug.

Prototype

void debug(Object message);

Source Link

Document

Logs a message with debug log level.

Usage

From source file:dk.netarkivet.common.utils.archive.ArchiveBatchJob.java

/**
 * Accepts only arc(.gz) and warc(.gz) files. Runs through all records and calls
 * processRecord() on every record that is allowed by getFilter().
 * Does nothing on a non-(w)arc file.//from  www .ja v  a  2  s  .c om
 *
 * @param archiveFile The arc(.gz) or warc(.gz) file to be processed.
 * @param os the OutputStream to which output is to be written
 * @throws ArgumentNotValid if either argument is null
 * @return true, if file processed successful, otherwise false
 */
public final boolean processFile(File archiveFile, OutputStream os) throws ArgumentNotValid {
    ArgumentNotValid.checkNotNull(archiveFile, "archiveFile");
    ArgumentNotValid.checkNotNull(os, "os");
    Log log = LogFactory.getLog(getClass().getName());
    long arcFileIndex = 0;
    boolean success = true;
    log.info("Processing archive file: " + archiveFile.getName());

    try { // This outer try-catch block catches all unexpected exceptions
          //Create an ArchiveReader and retrieve its Iterator:
        ArchiveReader archiveReader = null;

        try {
            archiveReader = ArchiveReaderFactory.get(archiveFile);
        } catch (IOException e) { //Some IOException
            handleException(e, archiveFile, arcFileIndex);

            return false; // Can't process file after exception
        }

        try {
            Iterator<? extends ArchiveRecord> it = archiveReader.iterator();
            /* Process all records from this Iterator: */
            log.debug("Starting processing records in archive file '" + archiveFile.getName() + "'.");
            if (!it.hasNext()) {
                log.debug("No records found in archive file '" + archiveFile.getName() + "'.");
            }
            ArchiveRecord archiveRecord = null;
            ArchiveRecordBase record;
            while (it.hasNext()) {
                log.trace("At begin of processing-loop");
                // Get a record from the file
                archiveRecord = (ArchiveRecord) it.next();
                record = ArchiveRecordBase.wrapArchiveRecord(archiveRecord);
                // Process with the job
                try {
                    if (!getFilter().accept(record)) {
                        continue;
                    }
                    log.debug("Processing record #" + noOfRecordsProcessed + " in archive file '"
                            + archiveFile.getName() + "'.");
                    processRecord(record, os);
                    ++noOfRecordsProcessed;
                } catch (NetarkivetException e) {
                    // Our exceptions don't stop us
                    success = false;

                    // With our exceptions, we assume that just the
                    // processing of this record got stopped, and we can
                    // easily find the next
                    handleOurException(e, archiveFile, arcFileIndex);
                } catch (Exception e) {
                    success = false; // Strange exceptions do stop us

                    handleException(e, archiveFile, arcFileIndex);
                    // With strange exceptions, we don't know
                    // if we've skipped records
                    break;
                }
                // Close the record
                try {
                    /*
                     // FIXME: Don't know  how to compute this for warc-files
                     // computation for arc-files: long arcRecordOffset =
                     //        record.getBodyOffset() + record.getMetaData().getLength();
                     // computation for warc-files (experimental)
                     long arcRecordOffset = record.getHeader().getOffset();
                     */
                    // TODO maybe this works, maybe not...
                    long arcRecordOffset = archiveRecord.getHeader().getContentBegin()
                            + archiveRecord.getHeader().getLength();
                    archiveRecord.close();
                    arcFileIndex = arcRecordOffset;
                } catch (IOException ioe) { // Couldn't close an WARCRecord
                    success = false;

                    handleException(ioe, archiveFile, arcFileIndex);
                    // If close fails, we don't know if we've skipped
                    // records
                    break;
                }
                log.trace("At end of processing-loop");
            }
        } finally {
            try {
                archiveReader.close();
            } catch (IOException e) { //Some IOException
                // TODO Discuss whether exceptions on close cause
                // filesFailed addition
                handleException(e, archiveFile, arcFileIndex);
            }
        }
    } catch (Exception unexpectedException) {
        handleException(unexpectedException, archiveFile, arcFileIndex);
        return false;
    }
    return success;
}

From source file:com.alibaba.wasp.client.FConnectionManager.java

/**
 * Set the number of retries to use serverside when trying to communicate with
 * another server over {@link com.alibaba.wasp.client.FConnection}. Used
 * updating catalog tables, etc. Call this method before we create any
 * Connections.//from  ww w  . j a  va2  s  .  com
 * 
 * @param c
 *          The Configuration instance to set the retries into.
 * @param log
 *          Used to log what we set in here.
 */
public static void setServerSideFConnectionRetries(final Configuration c, final Log log) {
    int fcRetries = c.getInt(FConstants.WASP_CLIENT_RETRIES_NUMBER,
            FConstants.DEFAULT_WASP_CLIENT_RETRIES_NUMBER);
    // Go big. Multiply by 10. If we can't get to meta after this many retries
    // then something seriously wrong.
    int serversideMultiplier = c.getInt("wasp.client.serverside.retries.multiplier", 10);
    int retries = fcRetries * serversideMultiplier;
    c.setInt(FConstants.WASP_CLIENT_RETRIES_NUMBER, retries);
    log.debug("Set serverside FConnection retries=" + retries);
}

From source file:dk.netarkivet.common.utils.cdx.ArchiveBatchJob.java

/**
 * Accepts only ARC and ARCGZ files. Runs through all records and calls processRecord() on every record that is
 * allowed by getFilter(). Does nothing on a non-arc file.
 *
 * @param arcFile The ARC or ARCGZ file to be processed.
 * @param os the OutputStream to which output is to be written
 * @return true, if file processed successful, otherwise false
 * @throws ArgumentNotValid if either argument is null
 *//*  w  ww .  j  av  a2  s  .c  o m*/
public final boolean processFile(File arcFile, OutputStream os) throws ArgumentNotValid {
    ArgumentNotValid.checkNotNull(arcFile, "arcFile");
    ArgumentNotValid.checkNotNull(os, "os");
    Log log = LogFactory.getLog(getClass().getName());
    long arcFileIndex = 0;
    boolean success = true;
    log.info("Processing file: " + arcFile.getName());

    try { // This outer try-catch block catches all unexpected exceptions
          // Create an ARCReader and retrieve its Iterator:
        ArchiveReader arcReader = null;

        try {
            arcReader = ArchiveReaderFactory.get(arcFile);
        } catch (IOException e) { // Some IOException
            handleException(e, arcFile, arcFileIndex);

            return false; // Can't process file after exception
        }

        try {
            Iterator<? extends ArchiveRecord> it = arcReader.iterator();
            /* Process all records from this Iterator: */
            log.debug("Starting processing records in ARCfile '" + arcFile.getName() + "'.");
            if (!it.hasNext()) {
                log.debug("No ARCRecords found in ARCfile '" + arcFile.getName() + "'.");
            }
            while (it.hasNext()) {
                log.debug("At begin of processing-loop");
                ArchiveRecord record = null;

                // Get a record from the file
                try {
                    record = it.next();
                } catch (Exception unexpectedException) {
                    handleException(unexpectedException, arcFile, arcFileIndex);
                    return false;
                }
                // Process with the job
                try {
                    if (!getFilter().accept(record)) {
                        continue;
                    }
                    log.debug("Processing ArchiveRecord #" + noOfRecordsProcessed + " in file '"
                            + arcFile.getName() + "'.");
                    processRecord(record, os);
                    ++noOfRecordsProcessed;
                } catch (NetarkivetException e) { // Our exceptions don't stop us
                    success = false;

                    // With our exceptions, we assume that just the processing
                    // of this record got stopped, and we can easily find the next
                    handleOurException(e, arcFile, arcFileIndex);
                } catch (Exception e) {
                    success = false; // Strange exceptions do stop us

                    handleException(e, arcFile, arcFileIndex);
                    // With strange exceptions, we don't know if we've skipped records
                    break;
                }
                // Close the record
                try {
                    // FIXME: Don't know how to compute this for warc-files
                    // computation for arc-files: long arcRecordOffset =
                    // record.getBodyOffset() + record.getMetaData().getLength();
                    // computation for warc-files (experimental)
                    long arcRecordOffset = record.getHeader().getOffset();

                    record.close();
                    arcFileIndex = arcRecordOffset;
                } catch (IOException ioe) { // Couldn't close an ARCRecord
                    success = false;

                    handleException(ioe, arcFile, arcFileIndex);
                    // If close fails, we don't know if we've skipped records
                    break;
                }
                log.debug("At end of processing-loop");
            }
        } finally {
            try {
                arcReader.close();
            } catch (IOException e) { // Some IOException
                // TODO: Discuss whether exceptions on close cause filesFailed addition
                handleException(e, arcFile, arcFileIndex);
            }
        }
    } catch (Exception unexpectedException) {
        handleException(unexpectedException, arcFile, arcFileIndex);
        return false;
    }
    return success;
}

From source file:net.fenyo.mail4hotspot.service.AdvancedServicesImpl.java

@Override
public void testLog() {
    final org.apache.commons.logging.Log log = org.apache.commons.logging.LogFactory.getLog(getClass());

    log.trace("testlog: TRACE");
    log.fatal("testlog: FATAL");
    log.error("testlog: ERROR");
    log.info("testlog: INFO");
    log.debug("testlog: DEBUG");
    log.warn("testlog: WARN");
}

From source file:dk.netarkivet.common.utils.arc.ARCBatchJob.java

/**
 * Accepts only ARC and ARCGZ files. Runs through all records and calls
 * processRecord() on every record that is allowed by getFilter().
 * Does nothing on a non-arc file./*from   ww  w  .jav  a2s.co m*/
 *
 * @param arcFile The ARC or ARCGZ file to be processed.
 * @param os the OutputStream to which output is to be written
 * @throws ArgumentNotValid if either argument is null
 * @return true, if file processed successful, otherwise false
 */
@Override
public final boolean processFile(File arcFile, OutputStream os) throws ArgumentNotValid {
    ArgumentNotValid.checkNotNull(arcFile, "arcFile");
    ArgumentNotValid.checkNotNull(os, "os");
    Log log = LogFactory.getLog(getClass().getName());
    long arcFileIndex = 0;
    boolean success = true;
    log.info("Processing ARCfile: " + arcFile.getName());

    try { // This outer try-catch block catches all unexpected exceptions
          //Create an ARCReader and retrieve its Iterator:
        ARCReader arcReader = null;

        try {
            arcReader = ARCReaderFactory.get(arcFile);
        } catch (IOException e) { //Some IOException
            handleException(e, arcFile, arcFileIndex);

            return false; // Can't process file after exception
        }

        try {
            Iterator<? extends ArchiveRecord> it = arcReader.iterator();
            /* Process all records from this Iterator: */
            log.debug("Starting processing records in ARCfile '" + arcFile.getName() + "'.");
            if (!it.hasNext()) {
                log.debug("No ARCRecords found in ARCfile '" + arcFile.getName() + "'.");
            }
            ARCRecord record = null;
            while (it.hasNext()) {
                log.trace("At begin of processing-loop");
                // Get a record from the file
                record = (ARCRecord) it.next();
                // Process with the job
                try {
                    if (!getFilter().accept(record)) {
                        continue;
                    }
                    log.debug("Processing ARCRecord #" + noOfRecordsProcessed + " in ARCfile '"
                            + arcFile.getName() + "'.");
                    processRecord(record, os);
                    ++noOfRecordsProcessed;
                } catch (NetarkivetException e) {
                    // Our exceptions don't stop us
                    success = false;

                    // With our exceptions, we assume that just the
                    // processing of this record got stopped, and we can
                    // easily find the next
                    handleOurException(e, arcFile, arcFileIndex);
                } catch (Exception e) {
                    success = false; // Strange exceptions do stop us

                    handleException(e, arcFile, arcFileIndex);
                    // With strange exceptions, we don't know
                    // if we've skipped records
                    break;
                }
                // Close the record
                try {
                    long arcRecordOffset = record.getBodyOffset() + record.getMetaData().getLength();
                    record.close();
                    arcFileIndex = arcRecordOffset;
                } catch (IOException ioe) { // Couldn't close an ARCRecord
                    success = false;

                    handleException(ioe, arcFile, arcFileIndex);
                    // If close fails, we don't know if we've skipped
                    // records
                    break;
                }
                log.trace("At end of processing-loop");
            }
        } finally {
            try {
                arcReader.close();
            } catch (IOException e) { //Some IOException
                // TODO Discuss whether exceptions on close cause
                // filesFailed addition
                handleException(e, arcFile, arcFileIndex);
            }
        }
    } catch (Exception unexpectedException) {
        handleException(unexpectedException, arcFile, arcFileIndex);
        return false;
    }
    return success;
}

From source file:dk.netarkivet.common.utils.warc.WARCBatchJob.java

/**
 * Accepts only WARC and WARCGZ files. Runs through all records and calls
 * processRecord() on every record that is allowed by getFilter().
 * Does nothing on a non-arc file.//from   w w w  . ja v  a 2  s  .c  o m
 *
 * @param warcFile The WARC or WARCGZ file to be processed.
 * @param os the OutputStream to which output is to be written
 * @throws ArgumentNotValid if either argument is null
 * @return true, if file processed successful, otherwise false
 */
public final boolean processFile(File warcFile, OutputStream os) throws ArgumentNotValid {
    ArgumentNotValid.checkNotNull(warcFile, "warcFile");
    ArgumentNotValid.checkNotNull(os, "os");
    Log log = LogFactory.getLog(getClass().getName());
    long arcFileIndex = 0;
    boolean success = true;
    log.info("Processing WARCfile: " + warcFile.getName());

    try { // This outer try-catch block catches all unexpected exceptions
          //Create an WARCReader and retrieve its Iterator:
        WARCReader warcReader = null;

        try {
            warcReader = WARCReaderFactory.get(warcFile);
        } catch (IOException e) { //Some IOException
            handleException(e, warcFile, arcFileIndex);

            return false; // Can't process file after exception
        }

        try {
            Iterator<? extends ArchiveRecord> it = warcReader.iterator();
            /* Process all records from this Iterator: */
            log.debug("Starting processing records in WARCfile '" + warcFile.getName() + "'.");
            if (!it.hasNext()) {
                log.debug("No WARCRecords found in WARCfile '" + warcFile.getName() + "'.");
            }
            WARCRecord record = null;
            while (it.hasNext()) {
                log.trace("At begin of processing-loop");
                // Get a record from the file
                record = (WARCRecord) it.next();
                // Process with the job
                try {
                    if (!getFilter().accept(record)) {
                        continue;
                    }
                    log.debug("Processing WARCRecord #" + noOfRecordsProcessed + " in WARCfile '"
                            + warcFile.getName() + "'.");
                    processRecord(record, os);
                    ++noOfRecordsProcessed;
                } catch (NetarkivetException e) {
                    // Our exceptions don't stop us
                    success = false;

                    // With our exceptions, we assume that just the
                    // processing of this record got stopped, and we can
                    // easily find the next
                    handleOurException(e, warcFile, arcFileIndex);
                } catch (Exception e) {
                    success = false; // Strange exceptions do stop us

                    handleException(e, warcFile, arcFileIndex);
                    // With strange exceptions, we don't know
                    // if we've skipped records
                    break;
                }
                // Close the record
                try {
                    // TODO maybe this works, maybe not...
                    long arcRecordOffset = record.getHeader().getContentBegin()
                            + record.getHeader().getLength();
                    record.close();
                    arcFileIndex = arcRecordOffset;
                } catch (IOException ioe) { // Couldn't close an WARCRecord
                    success = false;

                    handleException(ioe, warcFile, arcFileIndex);
                    // If close fails, we don't know if we've skipped
                    // records
                    break;
                }
                log.trace("At end of processing-loop");
            }
        } finally {
            try {
                warcReader.close();
            } catch (IOException e) { //Some IOException
                // TODO Discuss whether exceptions on close cause
                // filesFailed addition
                handleException(e, warcFile, arcFileIndex);
            }
        }
    } catch (Exception unexpectedException) {
        handleException(unexpectedException, warcFile, arcFileIndex);
        return false;
    }
    return success;
}

From source file:de.ingrid.iplug.csw.dsc.cache.impl.AbstractUpdateStrategy.java

/**
 * Fetch all records that satisfy the given filter using the GetRecords and
 * return the ids and put them into the cache
 * @note This method guarantees to query the server without a constraint, if the
 * provided filter set is empty //from w  ww  .j  av a  2s. co m
 * 
 * @param client The CSWClient to use
 * @param elementSetName The ElementSetName of the records to fetch
 * @param filterSet The filter set used to select the records
 * @param doCache Determines wether to cache the record or not
 * @return A list of ids of the fetched records
 * @throws Exception
 */
protected List<String> fetchRecords(CSWClient client, ElementSetName elementSetName, Set<Document> filterSet,
        boolean doCache) throws Exception {

    CSWFactory factory = client.getFactory();
    Log log = this.getLog();

    // if the filter set is empty, we add a null a least
    // this causes execution of the iteration below, but
    // but will not add a constraint definition to the request
    if (filterSet == null)
        filterSet = new HashSet<Document>();
    if (filterSet.size() == 0)
        filterSet.add(null);

    // variables for complete fetch process
    // int numTotal = 0;
    List<String> fetchedRecordIds = new CopyOnWriteArrayList<String>();

    // iterate over all filters
    int filterIndex = 1;
    for (Document filter : filterSet) {
        if (log.isDebugEnabled())
            log.debug("Processing filter " + filterIndex + ": "
                    + StringUtils.nodeToString(filter).replace("\n", "") + ".");

        // variables for current fetch process (current filter)
        int numRecordsTotal = 0;
        int numRecordsFetched = 0;
        List<String> currentFetchedRecordIds = new ArrayList<String>();

        // create the query
        CSWQuery query = factory.createQuery();
        query.setConstraint(filter);
        query.setResultType(ResultType.RESULTS);
        query.setElementSetName(elementSetName);
        query.setMaxRecords(this.recordsPerCall);
        query.setStartPosition(1);

        // do requests

        // do first request

        CSWSearchResult result = client.getRecords(query);
        numRecordsFetched += result.getNumberOfRecords();
        numRecordsTotal = result.getNumberOfRecordsTotal();
        if (log.isInfoEnabled())
            log.info(numRecordsTotal + " record(s) from filter " + filterIndex + ":");

        if (numRecordsTotal > 0) {

            if (log.isInfoEnabled()) {
                log.info("\nPARAMETERS OF FETCHING PROCESS:" + "\nrecords per chunk (request): "
                        + recordsPerCall + "\ngeneral pause between requesting next chunk (msec): "
                        + requestPause + "\nnum retries per chunk: " + cswConfig.numRetriesPerRequest
                        + "\npause between retries (msec): " + cswConfig.timeBetweenRetries
                        + "\nmax number of lost chunks: " + cswConfig.maxNumSkippedRequests);
            }

            // process
            currentFetchedRecordIds.addAll(processResult(result, doCache));

            int numSkippedRequests = 0;
            String logLostRecordChunks = "";
            int numLostRecords = 0;
            while (numRecordsFetched < numRecordsTotal) {
                if (cswConfig.maxNumSkippedRequests > -1) {
                    // fetching should end when a maximum number of failures (in a row) is reached.
                    if (numSkippedRequests > cswConfig.maxNumSkippedRequests) {
                        log.error("Problems fetching records. Total number of skipped requests reached ("
                                + cswConfig.maxNumSkippedRequests
                                + " requests without results). We end fetching process for this filter.");
                        statusProvider.addState(
                                "ERROR_FETCH", "Error during fetch, since more than "
                                        + cswConfig.maxNumSkippedRequests + " records have been skipped.",
                                Classification.ERROR);
                        break;
                    }
                }

                // generic pause between requests, set via spring
                Thread.sleep(this.requestPause);

                String logCurrRecordChunk = "";
                try {
                    // prepare next request
                    // Just for safety: get number of last fetched records from last result, if we have a result and records.
                    int numLastFetch = query.getMaxRecords();
                    if (result != null && (result.getNumberOfRecords() > 0)) {
                        numLastFetch = result.getNumberOfRecords();
                    }
                    numRecordsFetched += numLastFetch;
                    statusProvider.addState("FETCH",
                            "Fetching record " + (numRecordsFetched - numLastFetch + 1) + "-"
                                    + numRecordsFetched + " / " + numRecordsTotal + " from "
                                    + client.getFactory().getServiceUrl());

                    query.setStartPosition(query.getStartPosition() + numLastFetch);

                    // for logging below
                    logCurrRecordChunk = "" + query.getStartPosition() + " - "
                            + (query.getStartPosition() + query.getMaxRecords());

                    // do next request, if problems retry with increasing pause in between 
                    int numRetries = 0;
                    while (true) {
                        try {
                            result = null;
                            result = client.getRecords(query);
                            break;

                        } catch (Exception e) {
                            if (numRetries == cswConfig.numRetriesPerRequest) {
                                log.error("Retried " + numRetries + " times ! We skip records "
                                        + logCurrRecordChunk, e);
                                break;
                            }

                            numRetries++;
                            int timeBetweenRetry = numRetries * cswConfig.timeBetweenRetries;
                            log.error("Error fetching records " + logCurrRecordChunk + ". We retry "
                                    + numRetries + ". time after " + timeBetweenRetry + " msec !", e);
                            Thread.sleep(timeBetweenRetry);
                        }
                    }

                    // process
                    if (result == null || result.getNumberOfRecords() == 0) {
                        // no result from this query, we count the failures to check whether fetching process should be ended !
                        numSkippedRequests++;
                        numLostRecords += query.getMaxRecords();
                        logLostRecordChunks += logCurrRecordChunk + "\n";

                    } else {
                        currentFetchedRecordIds.addAll(processResult(result, doCache));
                    }
                } catch (Exception e) {
                    statusProvider.addState("ERROR_FETCH_PROCESS",
                            "Error during processing record: " + logCurrRecordChunk, Classification.ERROR);
                    log.error("Error processing records " + logCurrRecordChunk);
                    log.error(ExceptionUtils.getStackTrace(e));
                }
            }

            if (numLostRecords > 0) {
                statusProvider.addState("ERROR_FETCH_PROCESS",
                        "Error during fetching of record: " + logLostRecordChunks, Classification.ERROR);
                log.error("\nWe had failed GetRecords requests !!!" + "\nThe following " + numLostRecords
                        + " records were NOT fetched and are \"lost\":" + "\n" + logLostRecordChunks);
            }
        }

        // collect record ids
        fetchedRecordIds.addAll(currentFetchedRecordIds);
        // numTotal += currentFetchedRecordIds.size();
        filterIndex++;
    }
    return fetchedRecordIds;
}

From source file:hotbeans.support.AbstractHotBeanModuleRepository.java

/**
 * Destroys this AbstractHotBeanModuleRepository. Subclasses may override this method, but should call the super
 * class implementation.// w  ww  . j a v  a2 s  . c  o  m
 */
public void destroy() throws Exception {
    Log logger = this.getLog();

    synchronized (this.lock) {
        String[] moduleNames = this.getHotBeanModuleNames();
        if (logger.isInfoEnabled())
            logger.info("Destroying " + this.getName() + " (" + ((moduleNames != null) ? moduleNames.length : 0)
                    + " modules).");
        HotBeanModuleType moduleType;
        HotBeanModule[] modules;
        for (int i = 0; i < moduleNames.length; i++) {
            if (logger.isDebugEnabled())
                logger.debug("Unloading revisions of module " + moduleNames[i] + ".");

            moduleType = this.getHotBeanModuleType(moduleNames[i]);
            if (moduleType != null) {
                modules = moduleType.getModules();
                for (int j = 0; j < modules.length; j++) {
                    if (logger.isDebugEnabled())
                        logger.debug("Checking" + modules[i] + ".");

                    if (modules[j].isActive() || modules[j].isInactive()) {
                        if (logger.isDebugEnabled())
                            logger.debug("Unloading " + modules[j] + ".");
                        modules[j].unload();
                    }
                    this.unregisterHotBeanModule(modules[j]);
                }
            }
        }
    }
}

From source file:it.doqui.index.ecmengine.client.engine.AbstractEcmEngineDelegateImpl.java

protected AbstractEcmEngineDelegateImpl(Log inLog) {
    inLog.debug("[" + getClass().getSimpleName() + "::constructor] BEGIN");
    this.log = inLog;
    try {//from w  w w .j ava2s. c o  m
        initializeManagement();
        initializeSearch();
        initializeSecurity();
        initializeMassive();
    } catch (Throwable ex) {
        log.error("[" + getClass().getSimpleName() + "::constructor] eccezione", ex);
    } finally {
        log.debug("[" + getClass().getSimpleName() + "::constructor] END");
    }
}

From source file:com.curl.orb.servlet.InvokeApplicationContextServlet.java

@Override
public void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException {
    super.doPost(request, response);
    Log log = LogFactory.getLog(getClass());
    InvokeApplicationContextRequest invokeRequest = (InvokeApplicationContextRequest) InstanceManagementUtil
            .getRequest(request);/*from   ww  w  .j  a  va  2 s.  com*/
    try {
        String className = invokeRequest.getClassName(); // context's object name
        String methodName = invokeRequest.getMethodName();
        Object[] arguments = invokeRequest.getArguments();

        AbstractApplicationContext applicationContext = ApplicationContextFactory
                .getInstance(getServletContext()).getApplicationContext();
        Object obj = applicationContext.getObject(className);
        if (obj == null)
            throw new InstanceManagementException("This object does not exsit [" + className + "]");
        // security
        RemoteServiceAnnotationChecker.check(applicationContext.getProperType(obj), environment);
        Method method = InstanceManagementUtil.getMethod(obj, methodName, arguments);
        InstanceManagementUtil.setResponse(request, InstanceManagementUtil.invokeMethod(method, obj, arguments),
                InstanceManagementUtil.getSurborinateObject(method));
        // debug
        log.debug("Request invoke method(DI Container)");
    }
    // IOException, SerializerException, ApplicationContextException, InstanceManagementException ...
    catch (Exception e) {
        InstanceManagementUtil.setResponse(request, e, null);
    }
}