Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:org.apache.drill.exec.store.parquet.columnreaders.BatchReader.java

protected void readAllFixedFields(long recordsToRead) throws Exception {
    Stopwatch timer = Stopwatch.createStarted();
    if (readState.useAsyncColReader()) {
        readAllFixedFieldsParallel(recordsToRead);
    } else {//  ww w  .  j  a  va2 s. c  o m
        readAllFixedFieldsSerial(recordsToRead);
    }
    readState.parquetReaderStats().timeFixedColumnRead.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
}

From source file:com.facebook.buck.cli.DistBuildRunCommand.java

@Override
public int runWithoutHelp(CommandRunnerParams params) throws IOException, InterruptedException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    Console console = params.getConsole();
    try (DistBuildService service = DistBuildFactory.newDistBuildService(params)) {
        Pair<BuildJobState, String> jobStateAndBuildName = getBuildJobStateAndBuildName(
                params.getCell().getFilesystem(), console, service);
        BuildJobState jobState = jobStateAndBuildName.getFirst();
        String buildName = jobStateAndBuildName.getSecond();

        console.getStdOut().println(/*from w w  w.  ja  va2s  . com*/
                String.format("BuildJob depends on a total of [%d] input deps.", jobState.getFileHashesSize()));
        try (CommandThreadManager pool = new CommandThreadManager(getClass().getName(),
                getConcurrencyLimit(params.getBuckConfig()))) {
            DistBuildSlaveExecutor distBuildExecutor = DistBuildFactory.createDistBuildExecutor(jobState,
                    params, pool.getExecutor(), service);
            int returnCode = distBuildExecutor.buildAndReturnExitCode();
            console.printSuccess(String.format("Successfully ran distributed build [%s] in [%d millis].",
                    buildName, stopwatch.elapsed(TimeUnit.MILLISECONDS)));
            return returnCode;
        }
    }
}

From source file:io.ecarf.core.cloud.task.processor.files.ProcessFilesTask.java

@Override
public void run() throws IOException {

    log.info("START: processing files");

    Stopwatch stopwatch = Stopwatch.createStarted();

    Set<String> filesSet = ObjectUtils.csvToSet(files);
    log.info("Processing files: " + filesSet);

    List<Callable<T>> tasks = getSubTasks(filesSet);

    int processors = Runtime.getRuntime().availableProcessors();

    try {//from  w  w w .  ja  va 2 s.co m

        // check if we only have one file to process
        if (tasks.size() == 1) {

            this.processSingleOutput(tasks.get(0).call());

        } else if (processors == 1) {
            // only one process then process synchronously
            List<T> output = new ArrayList<>();
            for (Callable<T> task : tasks) {
                output.add(task.call());
            }

            this.processMultiOutput(output);

        } else {

            // multiple cores
            ExecutorService executor = Utils.createFixedThreadPool(processors);

            try {

                List<Future<T>> results = executor.invokeAll(tasks);
                List<T> output = new ArrayList<>();

                for (Future<T> result : results) {
                    output.add(result.get());
                }

                this.processMultiOutput(output);

            } finally {
                executor.shutdown();
            }
        }

    } catch (Exception e) {
        log.error("Failed to process multiple files", e);
        throw new IOException(e);

    }

    log.info("TIMER# All files are processed successfully, elapsed time: " + stopwatch);
}

From source file:io.druid.segment.LoggingProgressIndicator.java

@Override
public void startSection(String section) {
    log.info("[%s]: Starting [%s]", progressName, section);

    Stopwatch sectionWatch = sections.get(section);
    if (sectionWatch != null) {
        throw new ISE("[%s]: Cannot start progress tracker for [%s]. It is already started.", progressName,
                section);/*from  w w w .  j a  v  a2 s . c  o m*/
    }
    sectionWatch = Stopwatch.createStarted();
    sections.put(section, sectionWatch);
}

From source file:org.factcast.store.pgsql.internal.catchup.PgCatchUpPrepare.java

@SuppressWarnings("ConstantConditions")
public long prepareCatchup(AtomicLong serial) {
    PgQueryBuilder b = new PgQueryBuilder(req);
    long clientId = jdbc.queryForObject(PgConstants.NEXT_FROM_CATCHUP_SEQ, Long.class);
    String catchupSQL = b.catchupSQL(clientId);
    // noinspection ConstantConditions
    return jdbc.execute(catchupSQL, (PreparedStatementCallback<Long>) ps -> {
        log.debug("{} preparing paging for matches after {}", req, serial.get());
        try {//from   w  ww  .j  a  va  2 s  .c o  m
            Stopwatch sw = Stopwatch.createStarted();
            b.createStatementSetter(serial).setValues(ps);
            int numberOfFactsToCatchup = ps.executeUpdate();
            sw.stop();
            if (numberOfFactsToCatchup > 0) {
                log.debug("{} prepared {} facts for cid={} in {}ms", req, numberOfFactsToCatchup, clientId,
                        sw.elapsed(TimeUnit.MILLISECONDS));
                return clientId;
            } else {
                log.debug("{} nothing to catch up", req);
                return 0L;
            }
        } catch (SQLException ex) {
            log.error("While trying to prepare catchup", ex);
            throw ex;
        }
    });
}

From source file:com.tkmtwo.sarapi.ArsTemplate.java

public <T> T execute(ARServerUserCallback<T> action, String description) throws DataAccessException {

    Stopwatch sw = Stopwatch.createStarted();
    T result = execute(action);//from w w w .j a va2s. c o  m
    sw.stop();

    logger.trace("Executed callback {} in {}", description, sw.toString());
    return result;
}

From source file:co.mitro.core.servlets.GetOrganizationState.java

public static GetOrganizationStateResponse doOperation(MitroRequestContext context, int orgId)
        throws MitroServletException, SQLException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    GetOrganizationStateResponse out = new GetOrganizationStateResponse();

    @SuppressWarnings("deprecation")
    AuthenticatedDB userDb = AuthenticatedDB.deprecatedNew(context.manager, context.requestor);
    DBGroup org = userDb.getOrganizationAsMember(orgId);
    assert (null != org);

    Set<String> users = Sets.newHashSet();
    Set<Integer> groupIds = ListMySecretsAndGroupKeys.getGroupsUsersAndOrgsFromRawStatement(context, null,
            org.getId(), out.groups, out.organizations, null, users);

    // prevent users who are memebers of org groups but not members of the org from being returned as members.
    Set<String> orgMembers = DBIdentity.getUserNamesFromIds(context.manager,
            MutateOrganization.getMemberIdsAndPrivateGroupIdsForOrg(context.manager, org).keySet());
    out.members = Lists.newArrayList(Sets.intersection(orgMembers, users));

    Set<Integer> orgAdmins = Sets.newHashSet();
    org.putDirectUsersIntoSet(orgAdmins, DBAcl.adminAccess());
    out.admins = Lists.newArrayList(DBIdentity.getUserNamesFromIds(context.manager, orgAdmins));

    // all users get a list of THEIR secrets
    if (userDb.isOrganizationAdmin(orgId)) {
        // if user is admin: get list of secrets
        // TODO: audit log is super slow; this should move to its own API call?
        final IncludeAuditLogInfo GET_AUDIT = IncludeAuditLogInfo.NO_AUDIT_LOG_INFO;
        groupIds.add(org.getId());/*from w w w .  j  av  a  2  s.c  o  m*/
        ListMySecretsAndGroupKeys.getSecretInfo(context, AdminAccess.FORCE_ACCESS_VIA_TOPLEVEL_GROUPS,
                out.orgSecretsToPath, groupIds, null, GET_AUDIT);

        // any org secrets with no users, no hidden groups and only the org group are orphaned.
        out.orphanedSecretsToPath = Maps.newHashMap();
        for (Iterator<Entry<Integer, SecretToPath>> iter = out.orgSecretsToPath.entrySet().iterator(); iter
                .hasNext();) {
            Entry<Integer, SecretToPath> entry = iter.next();
            SecretToPath stp = entry.getValue();
            if (stp.users.isEmpty() && stp.hiddenGroups.isEmpty() && stp.groups.size() == 1) {
                // this is an orphaned secret
                assert (stp.groups.get(0) == org.getId());
                out.orphanedSecretsToPath.put(entry.getKey(), entry.getValue());
                // remove orphaned secrets from regular org secrets
                iter.remove();
            }
        }
    } else {
        // these variables are not filled; set to null so the caller doesn't rely on them
        out.orgSecretsToPath = null;
        out.orphanedSecretsToPath = null;
        out.organizations = null;

        // Remove private data from groups: membership; encrypted keys
        for (GroupInfo group : out.groups.values()) {
            group.users = null;
            group.encryptedPrivateKey = null;
        }
    }

    logger.info("{} elapsed: {} ms:", context.requestor, stopwatch.elapsed(TimeUnit.MILLISECONDS));
    return out;
}

From source file:qa.qcri.nadeef.core.util.CSVTools.java

/**
 * Append CSV file content into a database table.
 * @param tableName target table name.//from w ww  . j a  v  a  2 s.  c o  m
 * @param dbConfig DB connection config.
 * @param file CSV file.
 * @return new created table name.
 */
public static HashSet<Integer> append(DBConfig dbConfig, SQLDialectBase dialectManager, String tableName,
        File file) throws Exception {
    Preconditions.checkNotNull(dbConfig);
    Preconditions.checkNotNull(dialectManager);

    Tracer tracer = Tracer.getTracer(CSVTools.class);
    Stopwatch stopwatch = Stopwatch.createStarted();
    HashSet<Integer> result = Sets.newHashSet();
    try {
        boolean hasTableExist = DBMetaDataTool.isTableExist(dbConfig, tableName);

        // Create table
        if (!hasTableExist) {
            throw new IllegalArgumentException("Table " + tableName + " does not exist.");
        }

        // get the current max tid.
        int startTid = DBMetaDataTool.getMaxTid(dbConfig, tableName) + 1;

        // load the data
        int size = 0;

        if (dialectManager.supportBulkLoad()) {
            size = dialectManager.bulkLoad(dbConfig, tableName, file.toPath(), true);
        } else {
            size = dialectManager.fallbackLoad(dbConfig, tableName, file, true);
        }

        tracer.info("Appended " + size + " bytes in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms.");
        stopwatch.stop();

        // build the tid set.
        int endTid = DBMetaDataTool.getMaxTid(dbConfig, tableName);
        for (int i = startTid; i <= endTid; i++) {
            result.add(i);
        }

    } catch (Exception ex) {
        tracer.err("Cannot load file " + file.getName(), ex);
    }
    return result;
}

From source file:eu.europa.ec.fisheries.uvms.rules.service.bean.TemplateEngine.java

@PostConstruct
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public void initialize() {
    log.info("[START] Initializing templates and rules...");
    try {//from  w  ww . j a v  a  2s  . c om
        Stopwatch stopwatch = Stopwatch.createStarted();
        List<TemplateRuleMapDto> templatesAndRules = rulesDb.getAllFactTemplatesAndRules();
        ruleEvaluator.initializeRules(templatesAndRules);
        rulesStatusUpdaterBean.updateRulesStatus(ruleEvaluator.getFailedRules());
        log.info("[END] It took " + stopwatch + " to initialize the rules.");
    } catch (RulesModelException e) {
        log.error(e.getMessage(), e);
    }

}

From source file:qa.qcri.nadeef.core.utils.CSVTools.java

/**
 * Append CSV file content into a database table.
 * @param tableName target table name.//from w ww.  j  a  va2 s  . co m
 * @param dbConfig DB connection config.
 * @param file CSV file.
 * @return new created table name.
 */
public static HashSet<Integer> append(DBConfig dbConfig, SQLDialectBase dialectManager, String tableName,
        File file) throws Exception {
    Preconditions.checkNotNull(dbConfig);
    Preconditions.checkNotNull(dialectManager);

    Stopwatch stopwatch = Stopwatch.createStarted();
    HashSet<Integer> result = Sets.newHashSet();
    try {
        boolean hasTableExist = DBMetaDataTool.isTableExist(dbConfig, tableName);

        // Create table
        if (!hasTableExist) {
            throw new IllegalArgumentException("Table " + tableName + " does not exist.");
        }

        // get the current max tid.
        int startTid = DBMetaDataTool.getMaxTid(dbConfig, tableName) + 1;

        // load the data
        int size = 0;

        if (dialectManager.supportBulkLoad()) {
            size = dialectManager.bulkLoad(dbConfig, tableName, file.toPath(), true);
        } else {
            size = dialectManager.fallbackLoad(dbConfig, tableName, file, true);
        }

        logger.info("Appended " + size + " bytes in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms.");
        stopwatch.stop();

        // build the tid set.
        int endTid = DBMetaDataTool.getMaxTid(dbConfig, tableName);
        for (int i = startTid; i <= endTid; i++) {
            result.add(i);
        }

    } catch (Exception ex) {
        logger.error("Cannot load file " + file.getName(), ex);
    }
    return result;
}