Example usage for org.springframework.util StopWatch getTotalTimeSeconds

List of usage examples for org.springframework.util StopWatch getTotalTimeSeconds

Introduction

In this page you can find the example usage for org.springframework.util StopWatch getTotalTimeSeconds.

Prototype

public double getTotalTimeSeconds() 

Source Link

Document

Get the total time in seconds for all tasks.

Usage

From source file:com.persistent.cloudninja.scheduler.TenantStorageBWProcessor.java

/** 
 * Calculates storage bandwidth of tenant.
 *///from   ww  w  .j  av a 2  s.  com
@Override
public boolean execute() {
    StopWatch watch = new StopWatch();
    boolean retVal = true;
    List<StorageBandwidthBatchEntity> batchesToProcess = null;
    try {
        LOGGER.debug("In TenantStorageBWProcessor");
        TenantStorageBWQueue queue = (TenantStorageBWQueue) getWorkQueue();
        batchesToProcess = queue.dequeue();
        if (batchesToProcess == null) {
            retVal = false;
        } else {
            watch.start();
            processBatches(batchesToProcess);
            watch.stop();
            taskCompletionDao.updateTaskCompletionDetails(watch.getTotalTimeSeconds(),
                    "GenerateStorageBandwidth", "Batch Size = " + batchesToProcess.size());
        }
    } catch (Exception e) {
        retVal = false;
        LOGGER.error(e.getMessage(), e);
    }
    return retVal;
}

From source file:org.openmrs.module.diabetesmanagement.web.controller.SimulationFormController.java

/**
 * The onSubmit method receives the form/command object that was modified by the input form and
 * saves it to the database.//from   w  w  w . j  av  a  2 s . co  m
 * 
 * @see org.springframework.web.servlet.mvc.SimpleFormController#onSubmit(javax.servlet.http.HttpServletRequest,
 *      javax.servlet.http.HttpServletResponse, java.lang.Object,
 *      org.springframework.validation.BindException)
 * @param request Current servlet request.
 * @param response Current servlet response.
 * @param command Form object with request parameters bound onto it.
 * @param errors Holder without errors.
 * @return The prepared model and view, or null.
 * @throws Exception In case of errors.
 */
protected ModelAndView onSubmit(HttpServletRequest request, HttpServletResponse response, Object command,
        BindException errors) throws Exception {
    Context.addProxyPrivilege(OpenmrsConstants.PRIV_VIEW_USERS);
    Context.addProxyPrivilege(OpenmrsConstants.PRIV_VIEW_PATIENTS);
    try {
        if (Context.isAuthenticated()) {
            StopWatch stopwatch = new StopWatch();
            ObjectOutputStream out = null;
            File f = null;
            File root = OpenmrsUtil.getDirectoryInApplicationDataDirectory("diabetesmanagement/simulation");
            String sessionId = request.getSession().getId() + "_";

            // Benchmarking the simulation model run
            Simulation sim = (Simulation) command;
            stopwatch.start();
            sim.runSimulation();
            stopwatch.stop();
            sim.setExecutionTime(stopwatch.getTotalTimeSeconds());

            // Serializing current results, if available
            if (sim.getResultsAvailableCurrent()) {
                // Current plasma glucose
                f = new File(root.getAbsolutePath(), sessionId + FILENAME_PLASMA_GLUCOSE_CURRENT);
                f.delete();
                out = new ObjectOutputStream(new FileOutputStream(f));
                out.writeObject(sim.getResultGlucoseCurrent());
                out.close();
                // Current plasma insulin
                f = new File(root.getAbsolutePath(), sessionId + FILENAME_PLASMA_INSULIN_CURRENT);
                f.delete();
                out = new ObjectOutputStream(new FileOutputStream(f));
                out.writeObject(sim.getResultInsulinCurrent());
                out.close();
                // Current meals
                if (sim.getMealsCurrent() != null) {
                    f = new File(root.getAbsolutePath(), sessionId + FILENAME_MEALS_CURRENT);
                    f.delete();
                    out = new ObjectOutputStream(new FileOutputStream(f));
                    out.writeObject(sim.getMealsCurrent());
                    out.close();
                }
                // Current insulin injections (1)
                if (sim.getInsulinInjections1() != null) {
                    f = new File(root.getAbsolutePath(), sessionId + FILENAME_INJECTIONS_1);
                    f.delete();
                    out = new ObjectOutputStream(new FileOutputStream(f));
                    out.writeObject(sim.getInsulinInjections1());
                    out.close();
                }
                // Current insulin injections (2)
                if (sim.getInsulinInjections2() != null) {
                    f = new File(root.getAbsolutePath(), sessionId + FILENAME_INJECTIONS_2);
                    f.delete();
                    out = new ObjectOutputStream(new FileOutputStream(f));
                    out.writeObject(sim.getInsulinInjections2());
                    out.close();
                }
            }

            // Serializing previous results, if available
            if (sim.getResultsAvailablePrevious()) {
                // Previous plasma glucose
                f = new File(root.getAbsolutePath(), sessionId + FILENAME_PLASMA_GLUCOSE_PREVIOUS);
                f.delete();
                out = new ObjectOutputStream(new FileOutputStream(f));
                out.writeObject(sim.getResultGlucosePrevious());
                out.close();
                // Previous plasma insulin
                f = new File(root.getAbsolutePath(), sessionId + FILENAME_PLASMA_INSULIN_PREVIOUS);
                f.delete();
                out = new ObjectOutputStream(new FileOutputStream(f));
                out.writeObject(sim.getResultInsulinPrevious());
                out.close();
                // Previous meals
                if (sim.getMealsPrevious() != null) {
                    f = new File(root.getAbsolutePath(), sessionId + FILENAME_MEALS_PREVIOUS);
                    f.delete();
                    out = new ObjectOutputStream(new FileOutputStream(f));
                    out.writeObject(sim.getMealsCurrent());
                    out.close();
                }
            }
        }
    } finally {
        Context.removeProxyPrivilege(OpenmrsConstants.PRIV_VIEW_USERS);
        Context.removeProxyPrivilege(OpenmrsConstants.PRIV_VIEW_PATIENTS);
    }

    return showForm(request, response, errors);
}

From source file:com.github.totyumengr.minicubes.core.MiniCubeTest.java

@Test
public void test_5_2_DistinctCount_20140606() throws Throwable {

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();//  w  w w  .j a v a  2  s.  c  o  m
    Map<String, List<Integer>> filter = new HashMap<String, List<Integer>>(1);
    filter.put("tradeId", Arrays.asList(
            new Integer[] { 3205, 3206, 3207, 3208, 3209, 3210, 3212, 3299, 3204, 3203, 3202, 3201, 3211 }));
    Map<Integer, RoaringBitmap> distinct = miniCube.distinct("postId", true, "tradeId", filter);
    stopWatch.stop();

    Assert.assertEquals(13, distinct.size());
    Assert.assertEquals(277, distinct.get(3209).getCardinality());
    Assert.assertEquals(186, distinct.get(3211).getCardinality());
    Assert.assertEquals(464, distinct.get(3206).getCardinality());
    LOGGER.info(stopWatch.getTotalTimeSeconds() + " used for distinct result {}", distinct.toString());

}

From source file:com.persistent.cloudninja.scheduler.TenantDBSizeProcessor.java

/** 
 * Calculates DB size of tenant DB./* w w  w.  j  a va2s. com*/
 */
@Override
public boolean execute() {

    boolean retVal = true;
    String tenantId = null;
    try {

        LOGGER.debug("In Processor");
        long dbSize = 0;
        TenantDBSizeQueue queue = (TenantDBSizeQueue) getWorkQueue();
        tenantId = queue.dequeue(SchedulerSettings.MessageVisibilityTimeout);
        if (tenantId == null) {
            retVal = false;
            LOGGER.debug("Processor : msg is null");
        } else {
            StopWatch watch = new StopWatch();
            watch.start();
            LOGGER.debug("Processor : msg is " + tenantId);
            dbSize = partitionStatsAndBWUsageDao.getDBSize(tenantId);
            MeteringEntity metering = new MeteringEntity();
            metering.setTenantId(tenantId);

            Calendar calendar = Calendar.getInstance();
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S z");
            dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
            String date = dateFormat.format(calendar.getTime());
            metering.setSnapshotTime(dateFormat.parse(date));

            metering.setDatabaseSize(dbSize);
            meteringDao.add(metering);
            LOGGER.info("Processor : dbSize is " + dbSize);
            watch.stop();
            taskCompletionDao.updateTaskCompletionDetails(watch.getTotalTimeSeconds(),
                    "ProcessMeteringTenantDatabaseSize",
                    "Measured " + dbSize + " for tenant " + tenantId + " database");
        }
    } catch (StorageException e) {
        retVal = false;
        LOGGER.error(e.getMessage(), e);
    } catch (ParseException e) {
        retVal = false;
        LOGGER.error(e.getMessage(), e);
    }
    return retVal;
}

From source file:org.sventon.cache.direntrycache.DirEntryCacheUpdater.java

/**
 * Updates the cache with the given revisions.
 *
 * @param revisionUpdate The updated revisions.
 *//*from ww  w.  j a v a 2 s  . c o  m*/
public void update(final RevisionUpdate revisionUpdate) {
    final RepositoryName repositoryName = revisionUpdate.getRepositoryName();

    LOGGER.info("Listener got [" + revisionUpdate.getRevisions().size()
            + "] updated revision(s) for repository: " + repositoryName);

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    SVNConnection connection = null;

    try {
        final DirEntryCache entryCache = cacheManager.getCache(repositoryName);
        final RepositoryConfiguration configuration = application.getConfiguration(repositoryName);
        connection = connectionFactory.createConnection(repositoryName, configuration.getSVNURL(),
                configuration.getCacheCredentials());
        updateInternal(entryCache, connection, revisionUpdate);
    } catch (final Exception ex) {
        LOGGER.warn("Could not update cache instance [" + repositoryName + "]", ex);
    } finally {
        if (connection != null) {
            connection.closeSession();
        }
    }

    stopWatch.stop();
    LOGGER.info("Update completed in [" + stopWatch.getTotalTimeSeconds() + "] seconds");
}

From source file:com.persistent.cloudninja.scheduler.TenantBlobSizeProcessor.java

/** 
 * Calculates the blob sizes of private and public container.
 * //from   w w  w . j  av  a  2  s  .  c o m
 */
@Override
public boolean execute() {
    boolean retVal = true;
    String tenantId = null;
    long blobSize = 0;
    try {
        LOGGER.debug("In Processor");
        TenantBlobSizeQueue queue = (TenantBlobSizeQueue) getWorkQueue();
        tenantId = queue.dequeue(SchedulerSettings.MessageVisibilityTimeout);
        if (tenantId == null) {
            retVal = false;
            LOGGER.debug("Processor : msg is null");
        } else {
            StopWatch watch = new StopWatch();
            watch.start();
            //get the size of blobs in private container.
            blobSize = storageUtility.getContainerSize("tntp-" + tenantId.toLowerCase());
            //get the size of blobs in public container.
            blobSize = blobSize + storageUtility.getContainerSize("tnts-" + tenantId.toLowerCase());
            LOGGER.debug("Processor : msg is " + tenantId);
            MeteringEntity metering = new MeteringEntity();
            metering.setTenantId(tenantId);

            Calendar calendar = Calendar.getInstance();
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S z");
            dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
            String date = dateFormat.format(calendar.getTime());
            metering.setSnapshotTime(dateFormat.parse(date));
            //set the calculated size
            blobSize = blobSize / 1024;
            metering.setBlobStoreUsage(blobSize);
            meteringDao.add(metering);
            LOGGER.info("Processor : blobSize is " + blobSize);
            watch.stop();
            taskCompletionDao.updateTaskCompletionDetails(watch.getTotalTimeSeconds(),
                    "ProcessMeteringBlobSizes", "Measured " + blobSize + " kb for tenant " + tenantId);
        }
    } catch (Exception e) {
        retVal = false;
        LOGGER.error(e.getMessage(), e);
    }
    return retVal;
}

From source file:org.jason.mapmaker.repository.impl.HibernateGenericRepository.java

public void saveList(List<T> objectList) {

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();//  w  w  w  . j ava 2s .  c  om
    /*        for (T obj: objectList) {
    try {
        sessionFactory.getCurrentSession().save(obj);
    } catch (DataIntegrityViolationException ex) {
        // okay, so CVE extends RuntimeException, and you aren't supposed to catch RuntimeExceptions. Do
        // I want to roll back an entire save operation just because the USGS screwed up one particular
        // id number???
        System.out.println("Caught DataIntegrityViolationException");
    }
            }*/

    Session session = sessionFactory.openSession();
    Transaction tx = session.beginTransaction();
    int i = 1;
    for (T obj : objectList) {

        session.save(obj);
        //sessionFactory.getCurrentSession().save(obj);
        i++;
        if (i % 100 == 0) {
            session.flush();
            session.clear();
            //sessionFactory.getCurrentSession().flush();
        }
    }

    tx.commit();
    session.close();

    stopWatch.stop();
    log.debug("Persisted " + i + " objects in " + stopWatch.getTotalTimeSeconds() + " seconds");

    //        Session session = sessionFactory.getCurrentSession();
    //        Transaction tx = session.beginTransaction();
    //        int i = 1;
    //        for (T obj : objectList) {
    //
    //            session.save(obj);
    //            //sessionFactory.getCurrentSession().save(obj);
    //            i++;
    //            if (i % 100 == 0) {
    //                session.flush();
    //                session.clear();
    //                //sessionFactory.getCurrentSession().flush();
    //            }
    //        }
    //        if (!tx.wasCommitted()) {
    //            tx.commit();
    //        }
    //sessionFactory.getCurrentSession().getTransaction().commit();
}

From source file:com.persistent.cloudninja.scheduler.TenantWebBandWidthUsageGenerator.java

@Override
public boolean execute() {
    StopWatch watch = new StopWatch();
    LOGGER.info("Start of TenantWebBandWidthUsageGenerator:execute");
    // Generate Queue
    TenantWebBandWidthUsageQueue queue = (TenantWebBandWidthUsageQueue) getWorkQueue();
    boolean returnFlag = true;
    int batchSize = SchedulerSettings.WEB_BANDWIDTH_USAGE_BATCH_SIZE;
    int numBatchesPrepared = 0;
    int webLogMeteringBatchListSize = 0;

    try {/*from   ww w. j  a v a  2 s. co  m*/
        watch.start();
        List<WebLogMeteringBatch> webLogMeteringBatchList = tenantWebBandWidthUsageGeneratorUtility
                .retrieveTomcatLogs();
        webLogMeteringBatchListSize = webLogMeteringBatchList.size();
        for (int i = 0; i < webLogMeteringBatchListSize; i = i + batchSize) {

            List<WebLogMeteringBatch> batchToProcess = new ArrayList<WebLogMeteringBatch>();
            for (int j = 0; j < batchSize
                    && ((numBatchesPrepared * batchSize) + j) < webLogMeteringBatchList.size(); j++) {

                batchToProcess.add(webLogMeteringBatchList.get(j + i));
            }
            // Enqueue
            queue.enqueue(batchToProcess);
            numBatchesPrepared++;
        }

        LOGGER.debug("TenantWebBandWidthUsageGenerator:execute ---> Number of batched processed"
                + numBatchesPrepared);
        LOGGER.info("End of TenantWebBandWidthUsageGenerator:execute");
        watch.stop();
        taskCompletionDao.updateTaskCompletionDetails(watch.getTotalTimeSeconds(),
                "GenerateMeterWebAppBandwidthWork",
                "Measure " + webLogMeteringBatchListSize + " logs in " + numBatchesPrepared + " batches.");
    } catch (StorageException stgException) {
        LOGGER.error(stgException.getMessage(), stgException);
    } catch (Exception e) {
        LOGGER.error(e.getMessage(), e);
    }
    return returnFlag;
}

From source file:com.quartzdesk.executor.core.job.AbstractJob.java

/**
 * The method invoked by the Quartz scheduler.
 *
 * @param context a {@link JobExecutionContext} instance.
 * @throws JobExecutionException if an error occurs while executing the job.
 */// w  w w . j a v a2  s  . c  o m
@Override
public final void execute(JobExecutionContext context) throws JobExecutionException {
    String jobFullName = context.getJobDetail().getKey().toString();

    StopWatch sw = new StopWatch();
    sw.start();

    ClassLoader origContextClassLoader = Thread.currentThread().getContextClassLoader();

    try {
        if (log.isInfoEnabled())
            log.info("Started scheduled job: {}", jobFullName);

        if (log.isDebugEnabled()) {
            StringBuilder jobDataMapDump = new StringBuilder();

            // map that contains merged job data from the job detail data map and trigger data map
            JobDataMap jobDataMap = context.getMergedJobDataMap();
            for (Iterator<String> keys = jobDataMap.keySet().iterator(); keys.hasNext();) {
                String key = keys.next();
                String value = CommonUtils.safeToString(jobDataMap.get(key));

                jobDataMapDump.append(key).append('=').append(value);

                if (keys.hasNext())
                    jobDataMapDump.append(CommonConst.NL);
            }

            log.debug("Job data map dump:{}{}", CommonConst.NL, jobDataMapDump.toString());
        }

        // Set the context class loader to be the class loader of the job class.
        // This is a workaround/fix for the WebSpere Application Server where
        // WebSphere work manager threads are typically used to execute jobs.
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());

        executeJob(context);

        sw.stop();

        if (log.isInfoEnabled())
            log.info("Finished scheduled job: {}. Time taken: {}s.", jobFullName, sw.getTotalTimeSeconds());
    } catch (JobExecutionException e) {
        if (log.isErrorEnabled())
            log.error("Error executing scheduled job: " + jobFullName, e);
        throw e;
    } finally {
        // restore the original thread context class loader
        Thread.currentThread().setContextClassLoader(origContextClassLoader);
    }
}

From source file:com.quartzdesk.test.quartz.v2.AbstractJob.java

/**
 * The method invoked by the Spring scheduler. This method simply delegates the
 * execution to the {@link #executeJob(JobExecutionContext)} method.
 *
 * @param context a {@link JobExecutionContext} instance.
 * @throws JobExecutionException if an error occurs while executing the
 * job./* w w w  .  j  a v  a  2 s .  com*/
 */
@Override
public final void execute(JobExecutionContext context) throws JobExecutionException {
    String jobFullName = context.getJobDetail().getKey().toString();
    String triggerFullName = context.getTrigger().getKey().toString();

    StopWatch sw = new StopWatch();
    sw.start();

    ClassLoader origContextClassLoader = Thread.currentThread().getContextClassLoader();

    try {
        if (log.isInfoEnabled())
            log.info("Started scheduled job: {}, fired by trigger: {}", jobFullName, triggerFullName);

        if (log.isDebugEnabled()) {
            StringBuilder jobDataMapDump = new StringBuilder();

            // map that contains merged job data from the job detail data map and trigger data map
            JobDataMap jobDataMap = context.getMergedJobDataMap();
            for (Iterator<String> keys = jobDataMap.keySet().iterator(); keys.hasNext();) {
                String key = keys.next();
                String value = CommonUtils.safeToString(jobDataMap.get(key));

                jobDataMapDump.append(key).append('=').append(value);

                if (keys.hasNext())
                    jobDataMapDump.append(CommonConst.NL);
            }

            log.debug("Job data map dump:{}{}", CommonConst.NL, jobDataMapDump.toString());
        }

        // Set the context class loader to be the class loader of the job class.
        // This is a workaround/fix for a problem with setting the thread's context
        // class loader through Quartz properties when WebSphere work manager threads
        // are used.
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());

        executeJob(context);

        sw.stop();

        if (log.isInfoEnabled())
            log.info("Finished scheduled job: {}. Time taken: {}s.", jobFullName, sw.getTotalTimeSeconds());
    } catch (JobExecutionException e) {
        if (log.isErrorEnabled())
            log.error("Error executing scheduled job: " + jobFullName, e);
        throw e;
    } finally {
        // restore the original thread context class loader
        Thread.currentThread().setContextClassLoader(origContextClassLoader);
    }
}