Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:cosmos.impl.CosmosImpl.java

@Override
public void addResults(Store id, Iterable<? extends Record<?>> queryResults) throws Exception {
    checkNotNull(id);//  w w  w. j ava2  s .  c o  m
    checkNotNull(queryResults);

    Stopwatch sw = new Stopwatch().start();
    try {
        State s = PersistedStores.getState(id);

        if (!State.LOADING.equals(s)) {
            // stopwatch closed in finally
            UnexpectedStateException e = unexpectedState(id, State.LOADING, s);
            log.error(e.getMessage());
            throw e;
        }

        InterProcessMutex lock = getMutex(id);

        // TODO We don't need to lock on multiple calls to addResults; however, we need to lock over adding the
        // new records to make sure a call to index() doesn't come in while we're processing a stale set of Columns to index
        boolean locked = false;
        int count = 1;

        if (id.lockOnUpdates()) {
            while (!locked && count < 4) {
                if (locked = lock.acquire(10, TimeUnit.SECONDS)) {
                    try {
                        performAdd(id, queryResults);
                    } finally {
                        // Don't hog the lock
                        lock.release();
                    }
                } else {
                    count++;
                    log.warn(
                            "addResults() on {} could not acquire lock after {} seconds. Attempting acquire #{}",
                            new Object[] { id.uuid(), LOCK_SECS, count });
                }

                throw new IllegalStateException(
                        "Could not acquire lock during index() after " + count + " attempts");
            }
        } else {
            performAdd(id, queryResults);
        }
    } finally {
        sw.stop();
        id.tracer().addTiming("Cosmos:addResults", sw.elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:co.cask.cdap.examples.wordcount.RetrieveCountsHandler.java

/**
 * Returns the counts for all words in the input.  The request body is expected to contain
 * a comma-separated list of words.//from w  w  w  .j a  va2  s  .co  m
 *
 * <p>
 * This endpoint method differs from {@link RetrieveCountsHandler#getCounts(HttpServiceRequest,HttpServiceResponder)}
 * in using {@link KeyValueTable#readAll(byte[][])} to perform a batched read.
 * </p>
 */
@Path("multicounts")
@POST
public void getMultiCounts(HttpServiceRequest request, HttpServiceResponder responder) {
    String wordString = Charsets.UTF_8.decode(request.getContent()).toString();
    String[] words = wordString.split(",");
    byte[][] wordBytes = new byte[words.length][];
    for (int i = 0; i < words.length; i++) {
        wordBytes[i] = Bytes.toBytes(words[i]);
    }
    Stopwatch timer = new Stopwatch().start();
    Map<byte[], byte[]> results = wordCountsTable.readAll(wordBytes);
    Map<String, Long> wordCounts = Maps.newHashMap();
    for (Map.Entry<byte[], byte[]> entry : results.entrySet()) {
        byte[] val = entry.getValue();
        wordCounts.put(Bytes.toString(entry.getKey()), val != null ? Bytes.toLong(entry.getValue()) : 0);
    }
    timer.stop();
    Map<String, Object> response = Maps.newHashMap();
    response.put("counts", wordCounts);
    response.put("elapsed", timer.toString());
    responder.sendJson(response);
}

From source file:co.cask.cdap.data2.util.hbase.HBaseTableUtil.java

/**
 * Create a hbase table if it does not exist. Deals with race conditions when two clients concurrently attempt to
 * create the table./*  www . j a va  2 s  . c  o  m*/
 * @param admin the hbase admin
 * @param tableId {@link TableId} representing the table
 * @param tableDescriptor hbase table descriptor for the new table
 * @param timeout Maximum time to wait for table creation.
 * @param timeoutUnit The TimeUnit for timeout.
 */
public void createTableIfNotExists(HBaseAdmin admin, TableId tableId, HTableDescriptor tableDescriptor,
        @Nullable byte[][] splitKeys, long timeout, TimeUnit timeoutUnit) throws IOException {
    if (tableExists(admin, tableId)) {
        return;
    }
    setDefaultConfiguration(tableDescriptor, admin.getConfiguration());

    try {
        LOG.info("Creating table '{}'", tableId);
        // HBaseAdmin.createTable can handle null splitKeys.
        admin.createTable(tableDescriptor, splitKeys);
        LOG.info("Table created '{}'", tableId);
        return;
    } catch (TableExistsException e) {
        // table may exist because someone else is creating it at the same
        // time. But it may not be available yet, and opening it might fail.
        LOG.info("Failed to create table '{}'. {}.", tableId, e.getMessage(), e);
    }

    // Wait for table to materialize
    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long sleepTime = timeoutUnit.toNanos(timeout) / 10;
        sleepTime = sleepTime <= 0 ? 1 : sleepTime;
        do {
            if (tableExists(admin, tableId)) {
                LOG.info("Table '{}' exists now. Assuming that another process concurrently created it.",
                        tableId);
                return;
            } else {
                TimeUnit.NANOSECONDS.sleep(sleepTime);
            }
        } while (stopwatch.elapsedTime(timeoutUnit) < timeout);
    } catch (InterruptedException e) {
        LOG.warn("Sleeping thread interrupted.");
    }
    LOG.error("Table '{}' does not exist after waiting {} ms. Giving up.", tableId, MAX_CREATE_TABLE_WAIT);
}

From source file:com.google.gerrit.pgm.Reindex.java

private int indexAll() throws Exception {
    ReviewDb db = sysInjector.getInstance(ReviewDb.class);
    ChangeIndexer indexer = sysInjector.getInstance(ChangeIndexer.class);
    Stopwatch sw = new Stopwatch().start();
    int queueLen = 2 * threads;
    final Semaphore sem = new Semaphore(queueLen);
    final AtomicBoolean ok = new AtomicBoolean(true);
    int i = 0;/*w  w w .  j  av a 2  s  .  co m*/
    for (final Change change : db.changes().all()) {
        sem.acquire();
        final ListenableFuture<?> future = indexer.index(change);
        future.addListener(new Runnable() {
            @Override
            public void run() {
                try {
                    future.get();
                } catch (InterruptedException e) {
                    log.error("Failed to index change " + change.getId(), e);
                    ok.set(false);
                } catch (ExecutionException e) {
                    log.error("Failed to index change " + change.getId(), e);
                    ok.set(false);
                } finally {
                    sem.release();
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        i++;
    }
    sem.acquire(queueLen);
    double elapsed = sw.elapsed(TimeUnit.MILLISECONDS) / 1000d;
    System.out.format("Reindexed %d changes in %.02fms\n", i, elapsed);

    return ok.get() ? 0 : 1;
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("tasks/{cluster}/{jobId}")
@Produces(MediaType.APPLICATION_JSON)/* w  ww .j av a 2s.c o  m*/
public List<TaskDetails> getJobTasksById(@PathParam("cluster") String cluster, @PathParam("jobId") String jobId,
        @QueryParam("include") List<String> includeFields) throws IOException {
    LOG.info("Fetching tasks info for jobId=" + jobId);
    Stopwatch timer = new Stopwatch().start();

    Predicate<String> includeFilter = null;
    if (includeFields != null && !includeFields.isEmpty()) {
        includeFilter = new SerializationContext.FieldNameFilter(includeFields);
    }
    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING, null, null,
            null, includeFilter));

    JobDetails jobDetails = getJobHistoryService().getJobByJobID(cluster, jobId, true);
    List<TaskDetails> tasks = jobDetails.getTasks();
    timer.stop();

    if (tasks != null && !tasks.isEmpty()) {
        LOG.info("For endpoint /tasks/" + cluster + "/" + jobId + "?"
                + StringUtil.buildParam("include", includeFields) + " fetched " + tasks.size()
                + " tasks, spent time " + timer);
    } else {
        LOG.info("For endpoint /tasks/" + cluster + "/" + jobId + "?"
                + StringUtil.buildParam("include", includeFields) + ", found no tasks, spent time " + timer);
    }
    return tasks;
}

From source file:org.apache.drill.exec.physical.impl.TopN.TopNBatch.java

@Override
public IterOutcome innerNext() {
    if (state == BatchState.DONE) {
        return IterOutcome.NONE;
    }//from   w w  w .  j  a va  2s .  c  o m
    if (schema != null) {
        if (getSelectionVector4().next()) {
            recordCount = sv4.getCount();
            return IterOutcome.OK;
        } else {
            recordCount = 0;
            return IterOutcome.NONE;
        }
    }

    try {
        outer: while (true) {
            Stopwatch watch = new Stopwatch();
            watch.start();
            IterOutcome upstream;
            if (first) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
                first = false;
            } else {
                upstream = next(incoming);
            }
            if (upstream == IterOutcome.OK && schema == null) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
                container.clear();
            }
            logger.debug("Took {} us to get next", watch.elapsed(TimeUnit.MICROSECONDS));
            switch (upstream) {
            case NONE:
                break outer;
            case NOT_YET:
                throw new UnsupportedOperationException();
            case OUT_OF_MEMORY:
            case STOP:
                return upstream;
            case OK_NEW_SCHEMA:
                // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
                if (!incoming.getSchema().equals(schema)) {
                    if (schema != null) {
                        throw new UnsupportedOperationException(
                                "Sort doesn't currently support sorts with changing schemas.");
                    }
                    this.schema = incoming.getSchema();
                }
                // fall through.
            case OK:
                if (incoming.getRecordCount() == 0) {
                    for (VectorWrapper w : incoming) {
                        w.clear();
                    }
                    break;
                }
                countSincePurge += incoming.getRecordCount();
                batchCount++;
                RecordBatchData batch = new RecordBatchData(incoming);
                boolean success = false;
                try {
                    batch.canonicalize();
                    if (priorityQueue == null) {
                        priorityQueue = createNewPriorityQueue(context, config.getOrderings(),
                                new ExpandableHyperContainer(batch.getContainer()), MAIN_MAPPING, LEFT_MAPPING,
                                RIGHT_MAPPING);
                    }
                    priorityQueue.add(context, batch);
                    if (countSincePurge > config.getLimit() && batchCount > batchPurgeThreshold) {
                        purge();
                        countSincePurge = 0;
                        batchCount = 0;
                    }
                    success = true;
                } finally {
                    if (!success) {
                        batch.clear();
                    }
                }
                break;
            default:
                throw new UnsupportedOperationException();
            }
        }

        if (schema == null || priorityQueue == null) {
            // builder may be null at this point if the first incoming batch is empty
            state = BatchState.DONE;
            return IterOutcome.NONE;
        }

        priorityQueue.generate();

        this.sv4 = priorityQueue.getFinalSv4();
        container.clear();
        for (VectorWrapper w : priorityQueue.getHyperBatch()) {
            container.add(w.getValueVectors());
        }
        container.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);

        recordCount = sv4.getCount();
        return IterOutcome.OK_NEW_SCHEMA;

    } catch (SchemaChangeException | ClassTransformationException | IOException ex) {
        kill(false);
        logger.error("Failure during query", ex);
        context.fail(ex);
        return IterOutcome.STOP;
    }
}

From source file:cosmos.mapred.MediawikiQueries.java

public void run(int numIterations) throws Exception {
    final Random offsetR = new Random(), cardinalityR = new Random();

    int iters = 0;

    while (iters < numIterations) {
        Store id = Store.create(this.con,
                this.con.securityOperations().getUserAuthorizations(this.con.whoami()),
                IdentitySet.<Index>create());

        int offset = offsetR.nextInt(MAX_OFFSET);
        int numRecords = cardinalityR.nextInt(MAX_SIZE) + 1;

        BatchScanner bs = this.con.createBatchScanner("sortswiki", new Authorizations(), 4);

        bs.setRanges(Collections.singleton(new Range(Integer.toString(offset), Integer.toString(MAX_ROW))));

        Iterable<Entry<Key, Value>> inputIterable = Iterables.limit(bs, numRecords);

        this.sorts.register(id);

        System.out.println(Thread.currentThread().getName() + ": " + id.uuid() + " - Iteration " + iters);
        long recordsReturned = 0l;
        Function<Entry<Key, Value>, MultimapRecord> func = new Function<Entry<Key, Value>, MultimapRecord>() {
            @Override//from ww  w . j  ava  2 s.  c om
            public MultimapRecord apply(Entry<Key, Value> input) {
                Page p;
                try {
                    p = Page.parseFrom(input.getValue().get());
                } catch (InvalidProtocolBufferException e) {
                    throw new RuntimeException(e);
                }
                return pagesToQueryResult(p);
            }
        };

        Map<Column, Long> counts = Maps.newHashMap();
        ArrayList<MultimapRecord> tformSource = Lists.newArrayListWithCapacity(20000);

        Stopwatch sw = new Stopwatch();
        Stopwatch tformSw = new Stopwatch();

        for (Entry<Key, Value> input : inputIterable) {
            tformSw.start();

            MultimapRecord r = func.apply(input);
            tformSource.add(r);

            tformSw.stop();

            loadCountsForRecord(counts, r);
            recordsReturned++;
        }

        sw.start();
        this.sorts.addResults(id, tformSource);
        sw.stop();

        long actualNumResults = tformSource.size();

        System.out.println(Thread.currentThread().getName() + ": Took " + tformSw + " transforming and " + sw
                + " to store " + recordsReturned + " records");
        logTiming(actualNumResults, tformSw.elapsed(TimeUnit.MILLISECONDS), "transformInput");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "ingest");

        bs.close();

        Random r = new Random();
        int max = r.nextInt(10) + 1;

        // Run a bunch of queries
        for (int count = 0; count < max; count++) {
            long resultCount;
            String name;
            int i = r.nextInt(9);

            if (0 == i) {
                resultCount = docIdFetch(id, counts, actualNumResults);
                name = "docIdFetch";
            } else if (1 == i) {
                resultCount = columnFetch(id, REVISION_ID, counts, actualNumResults);
                name = "revisionIdFetch";
            } else if (2 == i) {
                resultCount = columnFetch(id, PAGE_ID, counts, actualNumResults);
                name = "pageIdFetch";
            } else if (3 == i) {
                groupBy(id, REVISION_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (4 == i) {
                groupBy(id, PAGE_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (5 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                name = "contributorUsernameFetch";
            } else if (6 == i) {
                groupBy(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorUsername";
            } else if (7 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_ID, counts, actualNumResults);
                name = "contributorIdFetch";
            } else {//if (8 == i) {
                groupBy(id, CONTRIBUTOR_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorID";
            }
        }
        System.out.println(Thread.currentThread().getName() + ": not deleting " + id);
        // Delete the results
        sw = new Stopwatch();

        sw.start();

        this.sorts.delete(id);
        sw.stop();

        System.out.println(Thread.currentThread().getName() + ": Took " + sw.toString() + " to delete results");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "deleteResults");

        iters++;
    }

    this.sorts.close();
}

From source file:co.cask.cdap.data.stream.StreamDataFileReader.java

@Override
public int read(Collection<? super PositionStreamEvent> events, int maxEvents, long timeout, TimeUnit unit,
        ReadFilter readFilter) throws IOException, InterruptedException {
    if (closed) {
        throw new IOException("Reader already closed.");
    }/*from  w  w w. j a  va  2  s .  c om*/

    int eventCount = 0;
    long sleepNano = computeSleepNano(timeout, unit);
    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        // Keep reading events until max events.
        while (!eof && eventCount < maxEvents) {
            try {
                if (eventInput == null) {
                    doOpen();
                }

                PositionStreamEvent event = nextStreamEvent(readFilter);
                if (event != null) {
                    events.add(event);
                    eventCount++;
                } else if (eof) {
                    break;
                }

            } catch (IOException e) {
                if (eventInput != null) {
                    eventInput.close();
                    eventInput = null;
                }

                if (!(e instanceof EOFException || e instanceof FileNotFoundException)) {
                    throw e;
                }

                // If end of stream file or no timeout is allowed, break the loop.
                if (eof || timeout <= 0) {
                    break;
                }

                if (stopwatch.elapsedTime(unit) >= timeout) {
                    break;
                }

                TimeUnit.NANOSECONDS.sleep(sleepNano);

                if (stopwatch.elapsedTime(unit) >= timeout) {
                    break;
                }
            }
        }

        return (eventCount == 0 && eof) ? -1 : eventCount;

    } catch (IOException e) {
        close();
        throw e;
    }
}

From source file:org.caleydo.view.tourguide.impl.GSEAAlgorithm.java

private List<Map<Integer, Float>> rankedSet(List<RankedSet> sets, IProgressMonitor monitor) {
    Stopwatch w = new Stopwatch().start();

    ATableBasedDataDomain dataDomain = (ATableBasedDataDomain) perspective.getDataDomain();
    Table table = dataDomain.getTable();

    List<Integer> rows = perspective.getVirtualArray().getIDs();
    List<Integer> cols = table.getDefaultDimensionPerspective(false).getVirtualArray().getIDs();
    for (Integer col : cols) {
        for (Integer row : rows) {
            Float v = table.getRaw(col, row);
            if (v == null || v.isNaN() || v.isInfinite())
                continue;
            // boolean neg = v < 0;
            // v = (float) Math.log(abs(v));
            // if (neg)
            // v = -v;
            for (RankedSet s : sets)
                s.add(row, v);/*  w w w  . ja va2s.  c  om*/
        }
        for (RankedSet s : sets)
            s.flush(col);
        if (monitor.isCanceled())
            return null;
    }
    List<Map<Integer, Float>> rs = new ArrayList<>(sets.size());
    for (RankedSet s : sets)
        rs.add(s.toSignal2Noise(dim2primary));
    System.out.println(w);
    return rs;
}

From source file:org.apache.drill.exec.client.QuerySubmitter.java

public int submitQuery(DrillClient client, String plan, String type, String format, int width)
        throws Exception {

    PrintingResultsListener listener;//from  w ww.ja  v a2  s. c  o  m

    String[] queries;
    QueryType queryType;
    type = type.toLowerCase();
    switch (type) {
    case "sql":
        queryType = QueryType.SQL;
        queries = plan.trim().split(";");
        break;
    case "logical":
        queryType = QueryType.LOGICAL;
        queries = new String[] { plan };
        break;
    case "physical":
        queryType = QueryType.PHYSICAL;
        queries = new String[] { plan };
        break;
    default:
        System.out.println("Invalid query type: " + type);
        return -1;
    }

    Format outputFormat;
    format = format.toLowerCase();
    switch (format) {
    case "csv":
        outputFormat = Format.CSV;
        break;
    case "tsv":
        outputFormat = Format.TSV;
        break;
    case "table":
        outputFormat = Format.TABLE;
        break;
    default:
        System.out.println("Invalid format type: " + format);
        return -1;
    }
    Stopwatch watch = new Stopwatch();
    for (String query : queries) {
        listener = new PrintingResultsListener(client.getConfig(), outputFormat, width);
        watch.start();
        client.runQuery(queryType, query, listener);
        int rows = listener.await();
        System.out.println(String.format("%d record%s selected (%f seconds)", rows, rows > 1 ? "s" : "",
                (float) watch.elapsed(TimeUnit.MILLISECONDS) / (float) 1000));
        if (query != queries[queries.length - 1]) {
            System.out.println();
        }
        watch.stop();
        watch.reset();
    }
    return 0;

}