Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java

private Table<URI, URI, MatchResult> queryForMatchResults(String queryStr) {

    ImmutableTable.Builder<URI, URI, MatchResult> result = ImmutableTable.builder();

    // Query the engine
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.sparqlEndpoint.toASCIIString(), query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);
    try {/*w  w  w  . j ava2  s. c om*/
        Stopwatch stopwatch = new Stopwatch().start();
        ResultSet qResults = qexec.execSelect();
        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        // Obtain matches if any and figure out the type
        MatchType type;
        URI origin;
        URI destination;
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();
            // Only process if we can get complete match information
            if (soln.contains(ORIGIN_VAR) && soln.contains(DESTINATION_VAR)) {
                type = getMatchType(soln);
                origin = new URI(soln.getResource(ORIGIN_VAR).getURI());
                destination = new URI(soln.getResource(DESTINATION_VAR).getURI());
                log.debug("Concept {} was matched to {} with type {}", origin, destination, type);
                result.put(origin, destination, new AtomicMatchResult(origin, destination, type, this));
            }
        }

    } catch (URISyntaxException e) {
        log.error("Error creating URI for match results", e);
    } finally {
        qexec.close();
    }

    return result.build();
}

From source file:org.apache.twill.internal.logging.KafkaAppender.java

/**
 * Publishes buffered logs to Kafka, within the given timeout.
 *
 * @return Number of logs published./*www  .j ava  2s  . c om*/
 * @throws TimeoutException If timeout reached before publish completed.
 */
private int publishLogs(long timeout, TimeUnit timeoutUnit) throws TimeoutException {
    List<ByteBuffer> logs = Lists.newArrayListWithExpectedSize(bufferedSize.get());

    for (String json : Iterables.consumingIterable(buffer)) {
        logs.add(Charsets.UTF_8.encode(json));
    }

    long backOffTime = timeoutUnit.toNanos(timeout) / 10;
    if (backOffTime <= 0) {
        backOffTime = 1;
    }

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long publishTimeout = timeout;

        do {
            try {
                int published = doPublishLogs(logs).get(publishTimeout, timeoutUnit);
                bufferedSize.addAndGet(-published);
                return published;
            } catch (ExecutionException e) {
                addError("Failed to publish logs to Kafka.", e);
                TimeUnit.NANOSECONDS.sleep(backOffTime);
                publishTimeout -= stopwatch.elapsedTime(timeoutUnit);
                stopwatch.reset();
                stopwatch.start();
            }
        } while (publishTimeout > 0);
    } catch (InterruptedException e) {
        addWarn("Logs publish to Kafka interrupted.", e);
    }
    return 0;
}

From source file:processing.LayersCalculator.java

public static void predictSample(String filename, int trainSize, int sampleSize, int beta) {
    //filename += "_res";
    BookmarkReader reader = new BookmarkReader(trainSize, false);
    reader.readFile(filename);/*from www.  j av a2s . c o m*/

    List<int[]> predictionValues = new ArrayList<int[]>();
    Stopwatch timer = new Stopwatch();
    timer.start();
    LayersCalculator calculator = new LayersCalculator(reader, trainSize, beta);
    timer.stop();
    long trainingTime = timer.elapsed(TimeUnit.MILLISECONDS);

    timer = new Stopwatch();
    timer.start();
    for (int i = trainSize; i < trainSize + sampleSize; i++) { // the test-set
        UserData data = reader.getUserLines().get(i);
        Map<Integer, Double> map = calculator.getRankedTagList(data.getUserID(), data.getWikiID(),
                data.getCategories());
        predictionValues.add(Ints.toArray(map.keySet()));
    }
    timer.stop();
    long testTime = timer.elapsed(TimeUnit.MILLISECONDS);
    timeString += ("Full training time: " + trainingTime + "\n");
    timeString += ("Full test time: " + testTime + "\n");
    timeString += ("Average test time: " + testTime / (double) sampleSize) + "\n";
    timeString += ("Total time: " + (trainingTime + testTime) + "\n");
    String outputFile = filename + "_3layers";
    Utilities.writeStringToFile("./data/metrics/" + outputFile + "_TIME.txt", timeString);

    reader.setUserLines(reader.getUserLines().subList(trainSize, reader.getUserLines().size()));
    PredictionFileWriter writer = new PredictionFileWriter(reader, predictionValues);
    writer.writeFile(outputFile);
}

From source file:org.apache.bookkeeper.replication.ReplicationWorker.java

/**
 * Replicates the under replicated fragments from failed bookie ledger to
 * targetBookie//from  www . j a  va2s . com
 */
private void rereplicate() throws InterruptedException, BKException, UnavailableException {
    long ledgerIdToReplicate = underreplicationManager.getLedgerToRereplicate();

    Stopwatch stopwatch = new Stopwatch().start();
    boolean success = false;
    try {
        success = rereplicate(ledgerIdToReplicate);
    } finally {
        long latencyMillis = stopwatch.stop().elapsedMillis();
        if (success) {
            rereplicateOpStats.registerSuccessfulEvent(latencyMillis, TimeUnit.MILLISECONDS);
        } else {
            rereplicateOpStats.registerFailedEvent(latencyMillis, TimeUnit.MILLISECONDS);
        }
    }
}

From source file:org.icgc.dcc.submission.validation.key.core.KVSubmissionProcessor.java

@SuppressWarnings("deprecation")
private static Stopwatch createStopwatch() {
    // Can't use the new API here because Hadoop doesn't know about it.
    return new Stopwatch().start();
}

From source file:co.cask.tigon.sql.flowlet.AbstractInputFlowlet.java

/**
 * This method initializes all the components required to setup the SQL Compiler environment.
 *//*from   w  ww . j a  va 2s . c o  m*/
@Override
public final void initialize(FlowletContext ctx) throws Exception {
    super.initialize(ctx);
    portsAnnouncementList = Lists.newArrayList();
    DefaultInputFlowletConfigurer configurer = new DefaultInputFlowletConfigurer(this);
    create(configurer);
    InputFlowletSpecification spec = configurer.createInputFlowletSpec();

    dataIngestionPortsMap = Maps.newHashMap();
    int httpPort = 0;
    if (ctx.getRuntimeArguments().get(Constants.HTTP_PORT) != null) {
        httpPort = Integer.parseInt(ctx.getRuntimeArguments().get(Constants.HTTP_PORT));
    }
    dataIngestionPortsMap.put(Constants.HTTP_PORT, httpPort);
    for (String inputName : spec.getInputSchemas().keySet()) {
        int tcpPort = 0;
        if (ctx.getRuntimeArguments().get(Constants.TCP_INGESTION_PORT_PREFIX + inputName) != null) {
            tcpPort = Integer
                    .parseInt(ctx.getRuntimeArguments().get(Constants.TCP_INGESTION_PORT_PREFIX + inputName));
        }
        dataIngestionPortsMap.put(Constants.TCP_INGESTION_PORT_PREFIX + inputName, tcpPort);
    }

    // Setup temporary directory structure
    tmpFolder = Files.createTempDir();
    File baseDir = new File(tmpFolder, "baseDir");
    baseDir.mkdirs();

    InputFlowletConfiguration inputFlowletConfiguration = new LocalInputFlowletConfiguration(baseDir, spec);
    File binDir = inputFlowletConfiguration.createStreamEngineProcesses();

    healthInspector = new HealthInspector(this);
    metricsRecorder = new MetricsRecorder(metrics);

    //Initiating AbstractInputFlowlet Components
    recordQueue = new GDATRecordQueue();

    //Initiating Netty TCP I/O ports
    inputFlowletService = new InputFlowletService(binDir, spec, healthInspector, metricsRecorder, recordQueue,
            dataIngestionPortsMap, this);
    inputFlowletService.startAndWait();

    //Starting health monitor service
    healthInspector.startAndWait();

    //Initializing methodsDriver
    Map<String, StreamSchema> schemaMap = MetaInformationParser.getSchemaMap(new File(binDir.toURI()));
    methodsDriver = new MethodsDriver(this, schemaMap);

    //Initialize stopwatch and retry counter
    stopwatch = new Stopwatch();
    retryCounter = 0;
}

From source file:com.google.walkaround.wave.server.attachment.AttachmentService.java

/**
 * @param maxTimeMillis Maximum time to take. -1 for indefinite. If the time
 *          runs out, some data may not be returned, so the resulting map may
 *          be missing some of the input ids. Callers may retry to get the
 *          remaining data for the missing ids.
 *
 * @return a map of input id to attachment metadata for each id. invalid ids
 *         will map to Optional.absent(). Some ids may be missing due to the time limit.
 *
 *         At least one id is guaranteed to be returned.
 *///from  w  ww  .  j  ava  2 s  . co m
public Map<AttachmentId, Optional<AttachmentMetadata>> getMetadata(List<AttachmentId> ids,
        @Nullable Long maxTimeMillis) throws IOException {
    Stopwatch stopwatch = new Stopwatch().start();
    Map<AttachmentId, Optional<AttachmentMetadata>> result = Maps.newHashMap();
    for (AttachmentId id : ids) {
        // TODO(danilatos): To optimise, re-arrange the code so that
        //   1. Query all the ids from memcache in one go
        //   2. Those that failed, query all remaining ids from the data store in one go
        //   3. Finally, query all remaining ids from the raw service in one go (the
        //      raw service api should be changed to accept a list, and it needs to
        //      query the __BlobInfo__ entities directly.
        Optional<AttachmentMetadata> metadata = metadataCache.get(id);
        if (metadata == null) {
            AttachmentMetadata storedMetadata = metadataDirectory.getWithoutTx(id);
            if (storedMetadata != null) {
                metadata = Optional.of(storedMetadata);
                metadataCache.put(id, metadata);
            } else {
                metadata = Optional.absent();
                metadataCache.put(id, metadata, Expiration.byDeltaSeconds(INVALID_ID_CACHE_EXPIRY_SECONDS),
                        MemcacheService.SetPolicy.ADD_ONLY_IF_NOT_PRESENT);
            }
        }
        Assert.check(metadata != null, "Null metadata");
        result.put(id, metadata);

        if (maxTimeMillis != null && stopwatch.elapsedMillis() > maxTimeMillis) {
            break;
        }
    }
    Assert.check(!result.isEmpty(), "Should return at least one id");
    return result;
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("jobFlow/{cluster}/{jobId}")
@Produces(MediaType.APPLICATION_JSON)//from   ww w .  ja  v a  2  s . com
public Flow getJobFlowById(@PathParam("cluster") String cluster, @PathParam("jobId") String jobId,
        @QueryParam("includeFlowField") List<String> includeFlowFields,
        @QueryParam("includeJobField") List<String> includeJobFields) throws IOException {
    LOG.info(String.format("Fetching Flow for cluster=%s, jobId=%s", cluster, jobId));
    Stopwatch timer = new Stopwatch().start();
    Predicate<String> jobFilter = null;
    if (includeJobFields != null && !includeJobFields.isEmpty()) {
        jobFilter = new SerializationContext.FieldNameFilter(includeJobFields);
    }

    Predicate<String> flowFilter = null;
    if (includeFlowFields != null && !includeFlowFields.isEmpty()) {
        flowFilter = new SerializationContext.FieldNameFilter(includeFlowFields);
    }

    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING, null,
            flowFilter, jobFilter, null));
    Flow flow = getJobHistoryService().getFlowByJobID(cluster, jobId, false);
    timer.stop();

    if (flow != null) {
        LOG.info("For jobFlow/{cluster}/{jobId} with input query: " + "jobFlow/" + cluster + SLASH + jobId + "&"
                + StringUtil.buildParam("includeJobField", includeJobFields) + "&"
                + StringUtil.buildParam("includeFlowField", includeFlowFields) + " fetched flow "
                + flow.getFlowName() + " with # " + flow.getJobCount() + " in " + timer);
    } else {
        LOG.info("For jobFlow/{cluster}/{jobId} with input query: " + "jobFlow/" + cluster + SLASH + jobId + "&"
                + StringUtil.buildParam("includeJobField", includeJobFields) + "&"
                + StringUtil.buildParam("includeFlowField", includeFlowFields) + " No flow found, spent "
                + timer);
    }

    // export latency metrics
    HravenResponseMetrics.JOBFLOW_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return flow;
}

From source file:org.apache.drill.exec.store.schedule.BlockMapBuilder.java

/**
 * For a given FileWork, calculate how many bytes are available on each on drillbit endpoint
 *
 * @param work the FileWork to calculate endpoint bytes for
 * @throws IOException/*from   w  w w. java  2  s . c o m*/
 */
public EndpointByteMap getEndpointByteMap(FileWork work) throws IOException {
    Stopwatch watch = new Stopwatch();
    watch.start();
    Path fileName = new Path(work.getPath());

    ImmutableRangeMap<Long, BlockLocation> blockMap = getBlockMap(fileName);
    EndpointByteMapImpl endpointByteMap = new EndpointByteMapImpl();
    long start = work.getStart();
    long end = start + work.getLength();
    Range<Long> rowGroupRange = Range.closedOpen(start, end);

    // Find submap of ranges that intersect with the rowGroup
    ImmutableRangeMap<Long, BlockLocation> subRangeMap = blockMap.subRangeMap(rowGroupRange);

    // Iterate through each block in this submap and get the host for the block location
    for (Map.Entry<Range<Long>, BlockLocation> block : subRangeMap.asMapOfRanges().entrySet()) {
        String[] hosts;
        Range<Long> blockRange = block.getKey();
        try {
            hosts = block.getValue().getHosts();
        } catch (IOException ioe) {
            throw new RuntimeException("Failed to get hosts for block location", ioe);
        }
        Range<Long> intersection = rowGroupRange.intersection(blockRange);
        long bytes = intersection.upperEndpoint() - intersection.lowerEndpoint();

        // For each host in the current block location, add the intersecting bytes to the corresponding endpoint
        for (String host : hosts) {
            DrillbitEndpoint endpoint = getDrillBitEndpoint(host);
            if (endpoint != null) {
                endpointByteMap.add(endpoint, bytes);
            } else {
                logger.info("Failure finding Drillbit running on host {}.  Skipping affinity to that host.",
                        host);
            }
        }
    }

    logger.debug("FileWork group ({},{}) max bytes {}", work.getPath(), work.getStart(),
            endpointByteMap.getMaxBytes());

    logger.debug("Took {} ms to set endpoint bytes", watch.stop().elapsed(TimeUnit.MILLISECONDS));
    return endpointByteMap;
}

From source file:co.cask.cdap.data2.transaction.queue.AbstractQueueConsumer.java

private DequeueResult<byte[]> performDequeue(int maxBatchSize) throws IOException {
    Preconditions.checkArgument(maxBatchSize > 0, "Batch size must be > 0.");

    // pre-compute the "claimed" state content in case of FIFO.
    byte[] claimedStateValue = null;
    if (getConfig().getDequeueStrategy() == DequeueStrategy.FIFO && getConfig().getGroupSize() > 1) {
        claimedStateValue = encodeStateColumn(ConsumerEntryState.CLAIMED);
    }//  w  ww. j  ava  2  s .com

    boolean isReachedDequeueTimeLimit = false;
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();
    while (consumingEntries.size() < maxBatchSize && getEntries(consumingEntries, maxBatchSize, stopwatch)) {

        // ANDREAS: this while loop should stop once getEntries/populateCache reaches the end of the queue. Currently, it
        // will retry as long as it gets at least one entry in every round, even if that is an entry that must be ignored
        // because it cannot be claimed.
        // ANDREAS: It could be a problem that we always read to the end of the queue. This way one flowlet instance may
        // always all entries, while others are idle.

        // For FIFO, need to try claiming the entry if group size > 1
        if (getConfig().getDequeueStrategy() == DequeueStrategy.FIFO && getConfig().getGroupSize() > 1) {
            Iterator<Map.Entry<byte[], SimpleQueueEntry>> iterator = consumingEntries.entrySet().iterator();
            while (iterator.hasNext()) {
                SimpleQueueEntry entry = iterator.next().getValue();

                if (entry.getState() == null
                        || QueueEntryRow.getStateInstanceId(entry.getState()) >= getConfig().getGroupSize()) {
                    // If not able to claim it, remove it, and move to next one.
                    if (!claimEntry(entry.getRowKey(), claimedStateValue)) {
                        iterator.remove();
                    }

                    if (stopwatch.elapsedMillis() >= maxDequeueMillis) {
                        break;
                    }
                }
            }
            // Drain the iterator in case of dequeue time limit reached
            Iterators.advance(iterator, Integer.MAX_VALUE);
        }

        if (stopwatch.elapsedMillis() >= maxDequeueMillis) {
            // If time limit reached and yet we don't have enough entries as requested, treat it as dequeue time limit
            // reached. There can be some false positive (reached the end of queue, yet passed the time limit), but
            // it's ok since we only use this boolean for logging only and normally it won't be the case as long as
            // dequeue is completed in relatively short time comparing to the tx timeout.
            isReachedDequeueTimeLimit = consumingEntries.size() < maxBatchSize;
            break;
        }
    }

    // If nothing get dequeued, return the empty result.
    if (consumingEntries.isEmpty()) {
        if (isReachedDequeueTimeLimit) {
            LOG.warn("Unable to dequeue any entry after {}ms.", maxDequeueMillis);
        }
        return EMPTY_RESULT;
    }

    if (isReachedDequeueTimeLimit) {
        LOG.warn("Dequeue time limit of {}ms reached. Requested batch size {}, dequeued {}", maxDequeueMillis,
                maxBatchSize, consumingEntries.size());
    }

    return new SimpleDequeueResult(consumingEntries.values());
}