List of usage examples for org.apache.commons.logging Log info
void info(Object message);
From source file:com.CodeSeance.JSeance2.CodeGenXML.Runtime.java
public String run(File includesDir, File modelsDir, File targetDir, List<File> templateFiles, Logger externalLog) {/* ww w . j a v a 2s. c om*/ errors = false; Log log = LogFactory.getLog("Runtime"); StringBuffer buffer = new StringBuffer(); if (!((targetDir.exists() && targetDir.isDirectory()) || targetDir.mkdirs())) { String message = ExecutionError.INVALID_TARGET_DIR.getMessage(targetDir); externalLog.errorMessage(message); log.error(message); return null; } DependencyManager dependencyManager = new DependencyManager(targetDir); // access non-option arguments and generate the templates for (File templateFile : templateFiles) { TemplateDependencies templateDependencies = dependencyManager.getTemplateDependencies(templateFile); // Track the processing time externalLog.infoMessage(String.format("Processing template file:[%s]", templateFile.toString())); long startMillis = System.currentTimeMillis(); if (!dependencyManager.getTemplateDependencies(templateFile).isUpToDate() || forceRebuild) { dependencyManager.clearTemplateDependencies(templateFile); try { String result = run(templateFile, includesDir, modelsDir, targetDir, templateDependencies); buffer.append(result); dependencyManager.commit(); } catch (Exception ex) { errors = true; externalLog.errorMessage(ex.getMessage()); log.error(ex.getMessage()); } } else { String message = String.format( "File dependencies are up to date, skipping template generation:[%s]", templateFile); externalLog.infoMessage(message); log.info(message); } long elapsedMillis = System.currentTimeMillis() - startMillis; externalLog.infoMessage(String.format("Completed in :[%s] ms", elapsedMillis)); } return buffer.toString(); }
From source file:com.github.mobius_java_a.my_pub_sub_tutorial.Listener.java
@Override public void onStart(ConnectedNode connectedNode) { final Log log = connectedNode.getLog(); //Client for the master roscore final MasterClient masterClient = new MasterClient(connectedNode.getMasterUri()); connectedNode.executeCancellableLoop(new CancellableLoop() { Response<SystemState> systemState; Collection<TopicSystemState> topicSystemState; String out = ""; @Override//from w w w .ja va 2s . co m protected void setup() { } @Override protected void loop() throws InterruptedException { systemState = masterClient.getSystemState(getDefaultNodeName()); topicSystemState = systemState.getResult().getTopics(); //bit of code actually building the topic lists //TODO: skip if nothing changed? topicPubSub = new HashMap<String, List<Set<String>>>(); for (TopicSystemState i : topicSystemState) { List<Set<String>> temp = new ArrayList<Set<String>>(); temp.add(i.getPublishers()); temp.add(i.getSubscribers()); topicPubSub.put(i.getTopicName(), temp); } //pretty print the lists to the info log. Can/will comment out. String out = ""; for (Map.Entry<String, List<Set<String>>> i : topicPubSub.entrySet()) { out = out + "Topic: " + i.getKey() + "\n"; List<Set<String>> temp = i.getValue(); out = out + "Publishers: \n"; for (String j : temp.get(0)) { out = out + j + "\n"; } out = out + "Subscribers: \n"; for (String j : temp.get(1)) { out = out + j + "\n"; } } log.info(out); Thread.sleep(1000); } }); }
From source file:io.smartspaces.system.bootstrap.osgi.GeneralSmartSpacesSupportActivator.java
/** * Set up the full ROS environment.//from ww w . ja v a 2 s .c o m * * @param containerProperties * properties for configuration * @param log * logger to use */ private void setupRosEnvironment(Map<String, String> containerProperties, Log log) { RosLogFactory.setLog(log); rosEnvironment = new SimpleRosEnvironment(); rosEnvironment.setExecutorService(executorService); rosEnvironment.setLog(spaceEnvironment.getLog()); rosEnvironment.setMaster(SmartSpacesEnvironment.CONFIGURATION_VALUE_CONTAINER_TYPE_MASTER .equals(containerProperties.get(SmartSpacesEnvironment.CONFIGURATION_NAME_CONTAINER_TYPE))); rosEnvironment .setNetworkType(containerProperties.get(SmartSpacesEnvironment.CONFIGURATION_NAME_NETWORK_TYPE)); for (Entry<String, String> entry : containerProperties.entrySet()) { rosEnvironment.setProperty(entry.getKey(), entry.getValue()); } configureRosFromSmartspaces(containerProperties); // Want to start Smart Spaces with no master URI unless there was // one in the config properties. rosEnvironment.setMasterUri(null); rosEnvironment.startup(); containerManagedScope.addResource(new ManagedResource() { @Override public void startup() { // Won't be calling startup } @Override public void shutdown() { rosEnvironment.shutdown(); } }); MasterUriProvider baseProvider = null; URI masterUri = rosEnvironment.getMasterUri(); if (masterUri != null) { log.info(String.format("Have initial ROS Master URI %s", masterUri)); baseProvider = new StaticMasterUriProvider(masterUri); } masterUriProvider = new SwitchableMasterUriProvider(baseProvider); rosEnvironment.setMasterUriProvider(masterUriProvider); }
From source file:com.diversityarrays.kdxplore.trials.TrialExplorerPanel.java
private void initClientLog(DALClient client) { if (Boolean.getBoolean("log_dalclient")) { Log log = org.apache.commons.logging.LogFactory.getLog("dalclient"); log.info("==== Starting log_dalclient ===="); client.setLog(log);/* ww w. j av a 2s . c om*/ } }
From source file:com.CodeSeance.JSeance.CodeGenXML.Runtime.java
public String run(File includesDir, File modelsDir, File targetDir, List<File> templateFiles, Logger externalLog) {// w w w . jav a 2 s . c om errors = false; Log log = LogFactory.getLog("Runtime"); StringBuffer buffer = new StringBuffer(); if (!((targetDir.exists() && targetDir.isDirectory()) || targetDir.mkdirs())) { String message = ExecutionError.INVALID_TARGET_DIR.getMessage(targetDir); externalLog.errorMessage(message); log.error(message); return null; } DependencyManager dependencyManager = new DependencyManager(targetDir); // access non-option arguments and generate the templates for (File templateFile : templateFiles) { TemplateDependencies templateDependencies = dependencyManager.getTemplateDependencies(templateFile); // Track the processing time externalLog.infoMessage(String.format("Processing template file:[%s]", templateFile.toString())); long startMillis = System.currentTimeMillis(); if (!dependencyManager.getTemplateDependencies(templateFile).isUpToDate() || forceRebuild) { dependencyManager.clearTemplateDependencies(templateFile); try { String result = Template.run(templateFile, includesDir, modelsDir, targetDir, ignoreReadOnlyOuputFiles, templateDependencies); buffer.append(result); dependencyManager.commit(); } catch (Exception ex) { errors = true; externalLog.errorMessage(ex.getMessage()); log.error(ex.getMessage()); } } else { String message = String.format( "File dependencies are up to date, skipping template generation:[%s]", templateFile); externalLog.infoMessage(message); log.info(message); } long elapsedMillis = System.currentTimeMillis() - startMillis; externalLog.infoMessage(String.format("Completed in :[%s] ms", elapsedMillis)); } return buffer.toString(); }
From source file:com.dhcc.framework.web.context.DhccContextLoader.java
protected void configureAndRefreshWebApplicationContext(ConfigurableWebApplicationContext wac, ServletContext sc) {//from www. j a v a 2s . c o m Log logger = LogFactory.getLog(DhccContextLoader.class); if (ObjectUtils.identityToString(wac).equals(wac.getId())) { // The application context id is still set to its original default value // -> assign a more useful id based on available information String idParam = sc.getInitParameter(CONTEXT_ID_PARAM); if (idParam != null) { wac.setId(idParam); } else { // Generate default id... if (sc.getMajorVersion() == 2 && sc.getMinorVersion() < 5) { // Servlet <= 2.4: resort to name specified in web.xml, if any. wac.setId(ConfigurableWebApplicationContext.APPLICATION_CONTEXT_ID_PREFIX + ObjectUtils.getDisplayString(sc.getServletContextName())); } else { wac.setId(ConfigurableWebApplicationContext.APPLICATION_CONTEXT_ID_PREFIX + ObjectUtils.getDisplayString(sc.getContextPath())); } } } wac.setServletContext(sc); String initParameter = sc.getInitParameter(CONFIG_LOCATION_PARAM); if (isMicrokernelStart(sc)) { initParameter = "classpath:codeTemplate/applicationSetupContext.xml"; logger.error("because cant't connect to db or setup flg is 0 so init application as Microkernel "); } else { logger.info("initParameter==" + initParameter); } if (initParameter != null) { wac.setConfigLocation(initParameter); } customizeContext(sc, wac); wac.refresh(); }
From source file:de.ingrid.iplug.csw.dsc.cache.impl.AbstractUpdateStrategy.java
/** * Fetch all records that satisfy the given filter using the GetRecords and * return the ids and put them into the cache * @note This method guarantees to query the server without a constraint, if the * provided filter set is empty //from ww w. j a va 2 s . co m * * @param client The CSWClient to use * @param elementSetName The ElementSetName of the records to fetch * @param filterSet The filter set used to select the records * @param doCache Determines wether to cache the record or not * @return A list of ids of the fetched records * @throws Exception */ protected List<String> fetchRecords(CSWClient client, ElementSetName elementSetName, Set<Document> filterSet, boolean doCache) throws Exception { CSWFactory factory = client.getFactory(); Log log = this.getLog(); // if the filter set is empty, we add a null a least // this causes execution of the iteration below, but // but will not add a constraint definition to the request if (filterSet == null) filterSet = new HashSet<Document>(); if (filterSet.size() == 0) filterSet.add(null); // variables for complete fetch process // int numTotal = 0; List<String> fetchedRecordIds = new CopyOnWriteArrayList<String>(); // iterate over all filters int filterIndex = 1; for (Document filter : filterSet) { if (log.isDebugEnabled()) log.debug("Processing filter " + filterIndex + ": " + StringUtils.nodeToString(filter).replace("\n", "") + "."); // variables for current fetch process (current filter) int numRecordsTotal = 0; int numRecordsFetched = 0; List<String> currentFetchedRecordIds = new ArrayList<String>(); // create the query CSWQuery query = factory.createQuery(); query.setConstraint(filter); query.setResultType(ResultType.RESULTS); query.setElementSetName(elementSetName); query.setMaxRecords(this.recordsPerCall); query.setStartPosition(1); // do requests // do first request CSWSearchResult result = client.getRecords(query); numRecordsFetched += result.getNumberOfRecords(); numRecordsTotal = result.getNumberOfRecordsTotal(); if (log.isInfoEnabled()) log.info(numRecordsTotal + " record(s) from filter " + filterIndex + ":"); if (numRecordsTotal > 0) { if (log.isInfoEnabled()) { log.info("\nPARAMETERS OF FETCHING PROCESS:" + "\nrecords per chunk (request): " + recordsPerCall + "\ngeneral pause between requesting next chunk (msec): " + requestPause + "\nnum retries per chunk: " + cswConfig.numRetriesPerRequest + "\npause between retries (msec): " + cswConfig.timeBetweenRetries + "\nmax number of lost chunks: " + cswConfig.maxNumSkippedRequests); } // process currentFetchedRecordIds.addAll(processResult(result, doCache)); int numSkippedRequests = 0; String logLostRecordChunks = ""; int numLostRecords = 0; while (numRecordsFetched < numRecordsTotal) { if (cswConfig.maxNumSkippedRequests > -1) { // fetching should end when a maximum number of failures (in a row) is reached. if (numSkippedRequests > cswConfig.maxNumSkippedRequests) { log.error("Problems fetching records. Total number of skipped requests reached (" + cswConfig.maxNumSkippedRequests + " requests without results). We end fetching process for this filter."); statusProvider.addState( "ERROR_FETCH", "Error during fetch, since more than " + cswConfig.maxNumSkippedRequests + " records have been skipped.", Classification.ERROR); break; } } // generic pause between requests, set via spring Thread.sleep(this.requestPause); String logCurrRecordChunk = ""; try { // prepare next request // Just for safety: get number of last fetched records from last result, if we have a result and records. int numLastFetch = query.getMaxRecords(); if (result != null && (result.getNumberOfRecords() > 0)) { numLastFetch = result.getNumberOfRecords(); } numRecordsFetched += numLastFetch; statusProvider.addState("FETCH", "Fetching record " + (numRecordsFetched - numLastFetch + 1) + "-" + numRecordsFetched + " / " + numRecordsTotal + " from " + client.getFactory().getServiceUrl()); query.setStartPosition(query.getStartPosition() + numLastFetch); // for logging below logCurrRecordChunk = "" + query.getStartPosition() + " - " + (query.getStartPosition() + query.getMaxRecords()); // do next request, if problems retry with increasing pause in between int numRetries = 0; while (true) { try { result = null; result = client.getRecords(query); break; } catch (Exception e) { if (numRetries == cswConfig.numRetriesPerRequest) { log.error("Retried " + numRetries + " times ! We skip records " + logCurrRecordChunk, e); break; } numRetries++; int timeBetweenRetry = numRetries * cswConfig.timeBetweenRetries; log.error("Error fetching records " + logCurrRecordChunk + ". We retry " + numRetries + ". time after " + timeBetweenRetry + " msec !", e); Thread.sleep(timeBetweenRetry); } } // process if (result == null || result.getNumberOfRecords() == 0) { // no result from this query, we count the failures to check whether fetching process should be ended ! numSkippedRequests++; numLostRecords += query.getMaxRecords(); logLostRecordChunks += logCurrRecordChunk + "\n"; } else { currentFetchedRecordIds.addAll(processResult(result, doCache)); } } catch (Exception e) { statusProvider.addState("ERROR_FETCH_PROCESS", "Error during processing record: " + logCurrRecordChunk, Classification.ERROR); log.error("Error processing records " + logCurrRecordChunk); log.error(ExceptionUtils.getStackTrace(e)); } } if (numLostRecords > 0) { statusProvider.addState("ERROR_FETCH_PROCESS", "Error during fetching of record: " + logLostRecordChunks, Classification.ERROR); log.error("\nWe had failed GetRecords requests !!!" + "\nThe following " + numLostRecords + " records were NOT fetched and are \"lost\":" + "\n" + logLostRecordChunks); } } // collect record ids fetchedRecordIds.addAll(currentFetchedRecordIds); // numTotal += currentFetchedRecordIds.size(); filterIndex++; } return fetchedRecordIds; }
From source file:com.cisco.dvbu.ps.common.util.CommonUtils.java
public static void writeOutput(String message, String prefix, String options, Log logger, boolean debug1, boolean debug2, boolean debug3) { // Determine if there is a prefix to prepend if (prefix == null) { prefix = ""; } else {//from w ww . j av a2 s .co m prefix = prefix + "::"; } //Write out the log if not suppressed if (!options.contains("-suppress")) { //Write to log when -error if (options.contains("-error")) { if (logger.isErrorEnabled()) { logger.error(prefix + message); } } //Write to log when -info if (options.contains("-info")) { if (logger.isInfoEnabled()) { logger.info(prefix + message); } } //Write to log when -debug1 if (options.contains("-debug1") && debug1) { // logger.isInfoEnabled() is checked on purpose. Don't change it. if (logger.isInfoEnabled()) { logger.info("DEBUG1::" + prefix + message); } } //Write to log when -debug2 if (options.contains("-debug2") && debug2) { // logger.isInfoEnabled() is checked on purpose. Don't change it. if (logger.isInfoEnabled()) { logger.info("DEBUG2::" + prefix + message); } } //Write to log when -debug3 if (options.contains("-debug3") && debug3) { // logger.isInfoEnabled() is checked on purpose. Don't change it. if (logger.isInfoEnabled()) { logger.info("DEBUG3::" + prefix + message); } } } }
From source file:edu.stanford.muse.util.Util.java
/** cleans up files in directory with the given suffix */ public static void deleteAllFilesWithSuffix(String dir, String suffix, Log log) throws IOException, ClassNotFoundException { if (dir == null) return;/*w w w. j a v a 2 s .c o m*/ File cache = new File(dir); if (!cache.exists()) return; // empty result File files[] = new File(dir).listFiles(new Util.MyFilenameFilter(null, suffix)); if (files != null) for (File f : files) { boolean success = f.delete(); if (log != null) { if (success) log.info("Deleted file: " + f.getName()); else log.warn("Failed to delete file: " + f.getName()); } } }
From source file:com.amazon.carbonado.repo.indexed.ManagedIndex.java
/** * Build the entire index, repairing as it goes. * * @param repo used to enter transactions *//*from w w w . j av a 2 s. c o m*/ void buildIndex(double desiredSpeed) throws RepositoryException { final MergeSortBuffer buffer; final Comparator c; final Log log = LogFactory.getLog(IndexedStorage.class); final Query<S> masterQuery; { // Need to explicitly order master query by primary key in order // for fetchAfter to work correctly in case corrupt records are // encountered. masterQuery = mMasterStorage.query().orderBy(naturalOrdering(mMasterStorage.getStorableType())); } // Quick check to see if any records exist in master. { Transaction txn = mRepository.enterTopTransaction(IsolationLevel.READ_COMMITTED); try { if (!masterQuery.exists()) { if (mIndexEntryStorage.query().exists()) { txn.exit(); mIndexEntryStorage.truncate(); } return; } } finally { txn.exit(); } } // Enter top transaction with isolation level of none to make sure // preload operation does not run in a long nested transaction. Transaction txn = mRepository.enterTopTransaction(IsolationLevel.NONE); try { Cursor<S> cursor = masterQuery.fetch(); try { if (log.isInfoEnabled()) { StringBuilder b = new StringBuilder(); b.append("Preparing index on "); b.append(mMasterStorage.getStorableType().getName()); b.append(": "); try { mIndex.appendTo(b); } catch (java.io.IOException e) { // Not gonna happen. } log.info(b.toString()); } // Preload and sort all index entries for improved performance. buffer = new MergeSortBuffer(mIndexEntryStorage, null, BUILD_SORT_BUFFER_SIZE); c = getComparator(); buffer.prepare(c); long nextReportTime = System.currentTimeMillis() + BUILD_INFO_DELAY_MILLIS; // These variables are used when corrupt records are encountered. S lastUserStorable = null; int skippedCount = 0; while (cursor.hasNext()) { S userStorable; try { userStorable = cursor.next(); skippedCount = 0; } catch (CorruptEncodingException e) { log.warn("Omitting corrupt record from index: " + e.toString()); // Exception forces cursor to close. Close again to be sure. cursor.close(); if (lastUserStorable == null) { cursor = masterQuery.fetch(); } else { cursor = masterQuery.fetchAfter(lastUserStorable); } cursor.skipNext(++skippedCount); continue; } buffer.add(makeIndexEntry(userStorable)); if (log.isInfoEnabled()) { long now = System.currentTimeMillis(); if (now >= nextReportTime) { log.info("Prepared " + buffer.size() + " index entries"); nextReportTime = now + BUILD_INFO_DELAY_MILLIS; } } lastUserStorable = userStorable; } // No need to commit transaction because no changes should have been made. } finally { cursor.close(); } } finally { txn.exit(); } // This is not expected to take long, since MergeSortBuffer sorts as // needed. This just finishes off what was not written to a file. buffer.sort(); if (isUnique()) { // If index is unique, scan buffer and check for duplicates // _before_ inserting index entries. If there are duplicates, // fail, since unique index cannot be built. if (log.isInfoEnabled()) { log.info("Verifying index"); } Object last = null; for (Object obj : buffer) { if (last != null) { if (c.compare(last, obj) == 0) { buffer.close(); throw new UniqueConstraintException("Cannot build unique index because duplicates exist: " + this + ", " + last + " == " + obj); } } last = obj; } } final int bufferSize = buffer.size(); if (log.isInfoEnabled()) { log.info("Begin build of " + bufferSize + " index entries"); } // Need this index entry query for deleting bogus entries. final Query indexEntryQuery = mIndexEntryStorage.query() .orderBy(naturalOrdering(mIndexEntryStorage.getStorableType())); Throttle throttle = desiredSpeed < 1.0 ? new Throttle(BUILD_THROTTLE_WINDOW) : null; long totalInserted = 0; long totalUpdated = 0; long totalDeleted = 0; long totalProgress = 0; txn = enterBuildTxn(); try { Cursor<? extends Storable> indexEntryCursor = indexEntryQuery.fetch(); Storable existingIndexEntry = null; if (!indexEntryCursor.hasNext()) { indexEntryCursor.close(); // Don't try opening again. indexEntryCursor = null; } boolean retry = false; Storable indexEntry = null; Storable lastIndexEntry = null; long nextReportTime = System.currentTimeMillis() + BUILD_INFO_DELAY_MILLIS; Iterator it = buffer.iterator(); bufferIterate: while (true) { if (!retry) { Object obj; if (it.hasNext()) { obj = it.next(); } else if (indexEntryCursor != null && indexEntryCursor.hasNext()) { obj = null; } else { break; } indexEntry = (Storable) obj; } try { if (indexEntry != null) { if (indexEntry.tryInsert()) { totalInserted++; } else { // Couldn't insert because an index entry already exists. Storable existing = indexEntry.copy(); boolean doUpdate = false; if (!existing.tryLoad()) { doUpdate = true; } else if (!existing.equalProperties(indexEntry)) { // If only the version differs, leave existing entry alone. indexEntry.copyVersionProperty(existing); doUpdate = !existing.equalProperties(indexEntry); } if (doUpdate) { indexEntry.tryDelete(); indexEntry.tryInsert(); totalUpdated++; } } } if (indexEntryCursor != null) while (true) { if (existingIndexEntry == null) { if (indexEntryCursor.hasNext()) { existingIndexEntry = indexEntryCursor.next(); } else { indexEntryCursor.close(); // Don't try opening again. indexEntryCursor = null; break; } } int compare = c.compare(existingIndexEntry, indexEntry); if (compare == 0) { // Existing entry cursor matches so allow cursor to advance. existingIndexEntry = null; break; } else if (compare > 0) { // Existing index entry is ahead so check later. break; } else { // Existing index entry might be bogus. Check again // in case master record changed. doDelete: { S master = mMasterStorage.prepare(); copyToMasterPrimaryKey(existingIndexEntry, master); if (master.tryLoad()) { Storable temp = makeIndexEntry(master); existingIndexEntry.copyVersionProperty(temp); if (existingIndexEntry.equalProperties(temp)) { break doDelete; } } existingIndexEntry.tryDelete(); totalDeleted++; if (totalDeleted % BUILD_BATCH_SIZE == 0) { txn.commit(); txn.exit(); nextReportTime = logProgress(nextReportTime, log, totalProgress, bufferSize, totalInserted, totalUpdated, totalDeleted); txn = enterBuildTxn(); indexEntryCursor.close(); indexEntryCursor = indexEntryQuery.fetchAfter(existingIndexEntry); if (!indexEntryCursor.hasNext()) { indexEntryCursor.close(); // Don't try opening again. indexEntryCursor = null; break; } } } existingIndexEntry = null; throttle(throttle, desiredSpeed); } } if (indexEntry != null) { totalProgress++; } lastIndexEntry = indexEntry; retry = false; } catch (RepositoryException e) { if (e instanceof FetchTimeoutException || e instanceof PersistTimeoutException) { log.warn("Lock conflict during index repair; will retry: " + indexEntry + ", " + e); // This re-uses the last index entry to repair and forces // the current transaction to commit. retry = true; } else { throw e; } } if (retry || (totalProgress % BUILD_BATCH_SIZE == 0)) { txn.commit(); txn.exit(); nextReportTime = logProgress(nextReportTime, log, totalProgress, bufferSize, totalInserted, totalUpdated, totalDeleted); txn = enterBuildTxn(); if (indexEntryCursor != null) { indexEntryCursor.close(); existingIndexEntry = null; if (indexEntry == null || lastIndexEntry == null) { indexEntryCursor = indexEntryQuery.fetch(); } else if (!retry) { indexEntryCursor = indexEntryQuery.fetchAfter(indexEntry); } else { // Re-fetch starting at the same spot. indexEntryCursor = indexEntryQuery.fetchAfter(lastIndexEntry); } } } throttle(throttle, desiredSpeed); } txn.commit(); } finally { txn.exit(); buffer.close(); } if (log.isInfoEnabled()) { log.info("Finished building " + totalProgress + " index entries " + progressSubMessgage(totalInserted, totalUpdated, totalDeleted)); } }