Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:org.caleydo.core.util.clusterer.algorithm.affinity.AffinityClusterer.java

private PerspectiveInitializationData affinityPropagation() {
    Stopwatch w = new Stopwatch().start();

    int nrClusters = 0;

    int iNrIterations = 0, decit = convIterations;
    boolean doIterate = true;
    boolean isConverged = false;
    float[] mx1 = new float[nrSamples];
    float[] mx2 = new float[nrSamples];
    float[] srp = new float[nrSamples];
    float[] decsum = new float[nrSamples];
    int[] idx = new int[nrSamples];
    int[][] dec = new int[convIterations][nrSamples];
    float tmp = 0;
    int j = 0;/*from   w  ww.jav  a2 s  .  c om*/
    float[] dArResposibilities = new float[nrSimilarities];
    float[] dArAvailabilities = new float[nrSimilarities];

    // add noise to similarities
    // for (int m = 0; m < s.length; m++) {
    // s[m] = (float) (s[m] + (1E-16 * s[j] + Float.MIN_VALUE * 100) *
    // ((float) Math.random() / 2f));
    // }

    int iPercentage = 1;

    rename("Affinitiy propagpation of " + getPerspectiveLabel() + " in progress");

    while (doIterate) {
        iNrIterations++;

        int tempPercentage = (int) ((float) iNrIterations / maxIterations * 100);
        if (iPercentage == tempPercentage) {
            progress(iPercentage, false);
            iPercentage++;
        }

        // Compute responsibilities
        for (j = 0; j < nrSamples; j++) {
            mx1[j] = -Float.MAX_VALUE;
            mx2[j] = -Float.MAX_VALUE;
        }
        for (j = 0; j < nrSimilarities; j++) {
            tmp = dArAvailabilities[j] + s[j];
            if (tmp > mx1[i[j]]) {
                mx2[i[j]] = mx1[i[j]];
                mx1[i[j]] = tmp;
            } else if (tmp > mx2[i[j]]) {
                mx2[i[j]] = tmp;
            }
        }
        for (j = 0; j < nrSimilarities; j++) {
            tmp = dArAvailabilities[j] + s[j];
            if (tmp == mx1[i[j]]) {
                dArResposibilities[j] = dampingFactor * dArResposibilities[j]
                        + (1 - dampingFactor) * (s[j] - mx2[i[j]]);
            } else {
                dArResposibilities[j] = dampingFactor * dArResposibilities[j]
                        + (1 - dampingFactor) * (s[j] - mx1[i[j]]);
            }
        }

        // Compute availabilities
        for (j = 0; j < nrSimilarities - nrSamples; j++)
            if (dArResposibilities[j] > 0.0) {
                srp[k[j]] = srp[k[j]] + dArResposibilities[j];
            }
        for (j = nrSimilarities - nrSamples; j < nrSimilarities; j++) {
            srp[k[j]] = srp[k[j]] + dArResposibilities[j];
        }
        for (j = 0; j < nrSimilarities - nrSamples; j++) {
            if (dArResposibilities[j] > 0.0) {
                tmp = srp[k[j]] - dArResposibilities[j];
            } else {
                tmp = srp[k[j]];
            }
            if (tmp < 0.0) {
                dArAvailabilities[j] = dampingFactor * dArAvailabilities[j] + (1 - dampingFactor) * tmp;
            } else {
                dArAvailabilities[j] = dampingFactor * dArAvailabilities[j];
            }
        }
        for (j = nrSimilarities - nrSamples; j < nrSimilarities; j++) {
            dArAvailabilities[j] = dampingFactor * dArAvailabilities[j]
                    + (1 - dampingFactor) * (srp[k[j]] - dArResposibilities[j]);
        }

        // Identify exemplars and check to see if finished
        decit++;
        if (decit >= convIterations) {
            decit = 0;
        }
        for (j = 0; j < nrSamples; j++) {
            decsum[j] = decsum[j] - dec[decit][j];
        }
        for (j = 0; j < nrSamples; j++)
            if (dArAvailabilities[nrSimilarities - nrSamples + j]
                    + dArResposibilities[nrSimilarities - nrSamples + j] > 0.0) {
                dec[decit][j] = 1;
            } else {
                dec[decit][j] = 0;
            }
        nrClusters = 0;
        for (j = 0; j < nrSamples; j++) {
            nrClusters = nrClusters + dec[decit][j];
        }
        for (j = 0; j < nrSamples; j++) {
            decsum[j] = decsum[j] + dec[decit][j];
        }
        if ((iNrIterations >= convIterations) || (iNrIterations >= maxIterations)) {
            // Check convergence
            isConverged = true;
            for (j = 0; j < nrSamples; j++)
                if ((decsum[j] != 0) && (decsum[j] != convIterations)) {
                    isConverged = false;
                }
            // Check to see if done
            if ((isConverged && (nrClusters > 0)) || (iNrIterations == maxIterations)) {
                doIterate = false;
            }
        }
        eventListeners.processEvents();
        if (isClusteringCanceled) {
            log.info("Affinity propagation clustering was canceled!");
            progress(100, true);
            return null;
        }
    }

    // Arraylist holding indices of examples (cluster centers)
    List<Integer> alExamples = new ArrayList<Integer>();

    // If clusters were identified, find the assignments
    if (nrClusters > 0) {
        for (j = 0; j < nrSimilarities; j++)
            if (dec[decit][k[j]] == 1) {
                dArAvailabilities[j] = 0.0f;
            } else {
                dArAvailabilities[j] = -Float.MAX_VALUE;
            }
        for (j = 0; j < nrSamples; j++) {
            mx1[j] = -Float.MAX_VALUE;
        }
        for (j = 0; j < nrSimilarities; j++) {
            tmp = dArAvailabilities[j] + s[j];
            if (tmp > mx1[i[j]]) {
                mx1[i[j]] = tmp;
                idx[i[j]] = k[j];
            }
        }
        for (j = 0; j < nrSamples; j++)
            if (dec[decit][j] == 1) {
                idx[j] = j;
            }
        for (j = 0; j < nrSamples; j++) {
            srp[j] = 0.0f;
        }
        for (j = 0; j < nrSimilarities; j++)
            if (idx[i[j]] == idx[k[j]]) {
                srp[k[j]] = srp[k[j]] + s[j];
            }
        for (j = 0; j < nrSamples; j++) {
            mx1[j] = -Float.MAX_VALUE;
        }
        for (j = 0; j < nrSamples; j++)
            if (srp[j] > mx1[idx[j]]) {
                mx1[idx[j]] = srp[j];
            }
        for (j = 0; j < nrSamples; j++)
            if (srp[j] == mx1[idx[j]]) {
                dec[decit][j] = 1;
            } else {
                dec[decit][j] = 0;
            }
        for (j = 0; j < nrSimilarities; j++)
            if (dec[decit][k[j]] == 1) {
                dArAvailabilities[j] = 0.0f;
            } else {
                dArAvailabilities[j] = -Float.MAX_VALUE;
            }
        for (j = 0; j < nrSamples; j++) {
            mx1[j] = -Float.MAX_VALUE;
        }
        for (j = 0; j < nrSimilarities; j++) {
            tmp = dArAvailabilities[j] + s[j];
            if (tmp > mx1[i[j]]) {
                mx1[i[j]] = tmp;
                idx[i[j]] = k[j];
            }
        }
        for (j = 0; j < nrSamples; j++)
            if (dec[decit][j] == 1) {
                idx[j] = j;
                alExamples.add(j);
            }

        StringBuilder b = new StringBuilder();
        b.append("runtime: ").append(w).append('\n');
        b.append("Cluster factor: ").append(clusterFactor).append('\n');
        b.append("Number of identified clusters: ").append(nrClusters).append('\n');
        b.append("Number of iterations: ").append(iNrIterations);
        log.debug(b.toString());
    } else {
        progress(100, true);
        log.error("Affinity clustering could not identify any clusters.");
        log.debug("affinity propagation " + w);
        return null;

    }
    if (isConverged == false) {
        progress(100, true);
        log.error("Affinity propagation did not converge!");
        log.debug("affinity propagation " + w);
        return null;
    }

    log.debug("affinity propagation " + w);
    return postProcess(idx, alExamples);
}

From source file:gov.nih.nci.firebird.selenium2.scalability.tests.TimedAction.java

public T time() {
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();/*from  w  w  w .j  a va  2 s  .  c om*/
    T result;
    try {
        result = perform();
    } catch (Exception e) {
        throw new RuntimeException("Unexpected Exception in execution of " + actionName, e);
    }
    stopwatch.stop();
    long elapsedMillis = stopwatch.elapsed(TimeUnit.MILLISECONDS);
    System.out.println(actionName + " \t" + elapsedMillis);
    String timeoutMessage = "Exeuction time of " + elapsedMillis + " milliseconds exceeded timeout of "
            + timeoutSeconds + " for " + actionName;
    assertTrue(timeoutMessage, elapsedMillis <= timeoutSeconds * DateUtils.MILLIS_PER_SECOND);
    return result;
}

From source file:org.apache.drill.exec.store.mongo.MongoRecordReader.java

@Override
public int next() {
    if (cursor == null) {
        logger.info("Filters Applied : " + filters);
        logger.info("Fields Selected :" + fields);
        cursor = collection.find(filters).projection(fields).batchSize(100).iterator();
    }//  w  w  w. j av  a 2 s  .  com

    writer.allocate();
    writer.reset();

    int docCount = 0;
    Stopwatch watch = new Stopwatch();
    watch.start();

    try {
        while (docCount < BaseValueVector.INITIAL_VALUE_ALLOCATION && cursor.hasNext()) {
            writer.setPosition(docCount);
            String doc = cursor.next().toJson();
            jsonReader.setSource(doc.getBytes(Charsets.UTF_8));
            jsonReader.write(writer);
            docCount++;
        }

        jsonReader.ensureAtLeastOneField(writer);

        writer.setValueCount(docCount);
        logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), docCount);
        return docCount;
    } catch (IOException e) {
        String msg = "Failure while reading document. - Parser was at record: " + (docCount + 1);
        logger.error(msg, e);
        throw new DrillRuntimeException(msg, e);
    }
}

From source file:com.twitter.hraven.datasource.HdfsStatsService.java

/**
 * Scans the hbase table and populates the hdfs stats
 * @param cluster//from   w  w w. ja va  2s.c o m
 * @param scan
 * @param maxCount
 * @return
 * @throws IOException
 */
private List<HdfsStats> createFromScanResults(String cluster, String path, Scan scan, int maxCount,
        boolean checkPath, long starttime, long endtime) throws IOException {
    Map<HdfsStatsKey, HdfsStats> hdfsStats = new HashMap<HdfsStatsKey, HdfsStats>();
    ResultScanner scanner = null;
    Stopwatch timer = new Stopwatch().start();
    int rowCount = 0;
    long colCount = 0;
    long resultSize = 0;

    try {
        scanner = hdfsUsageTable.getScanner(scan);
        for (Result result : scanner) {
            if (result != null && !result.isEmpty()) {
                colCount += result.size();
                resultSize += result.getWritableSize();
                rowCount = populateHdfsStats(result, hdfsStats, checkPath, path, starttime, endtime, rowCount);
                // return if we've already hit the limit
                if (rowCount >= maxCount) {
                    break;
                }
            }
        }
    } finally {
        timer.stop();
        LOG.info("In createFromScanResults For cluster " + cluster + " Fetched from hbase " + rowCount
                + " rows, " + colCount + " columns, " + resultSize + " bytes ( " + resultSize / (1024 * 1024)
                + ") MB, in total time of " + timer);
        if (scanner != null) {
            scanner.close();
        }
    }

    List<HdfsStats> values = new ArrayList<HdfsStats>(hdfsStats.values());
    // sort so that timestamps are arranged in descending order
    Collections.sort(values);
    return values;
}

From source file:cosmos.impl.CosmosImpl.java

@Override
public void addResult(Store id, Record<?> queryResult) throws Exception {
    checkNotNull(queryResult);//from w  w w.ja  va 2  s  . co  m

    Stopwatch sw = new Stopwatch().start();
    try {
        addResults(id, Single.<Record<?>>create(queryResult));
    } finally {
        sw.stop();
        id.tracer().addTiming("Cosmos:addResult", sw.elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:co.cask.tigon.data.util.hbase.HBaseTableUtil.java

/**
 * Create a hbase table if it does not exist. Deals with race conditions when two clients concurrently attempt to
 * create the table.//w  w  w  .  j a v  a2  s . c om
 * @param admin the hbase admin
 * @param tableName the name of the table
 * @param tableDescriptor hbase table descriptor for the new table
 * @param timeout Maximum time to wait for table creation.
 * @param timeoutUnit The TimeUnit for timeout.
 */
public void createTableIfNotExists(HBaseAdmin admin, byte[] tableName, HTableDescriptor tableDescriptor,
        byte[][] splitKeys, long timeout, TimeUnit timeoutUnit) throws IOException {
    if (admin.tableExists(tableName)) {
        return;
    }
    setDefaultConfiguration(tableDescriptor, admin.getConfiguration());

    String tableNameString = Bytes.toString(tableName);

    try {
        LOG.info("Creating table '{}'", tableNameString);
        // HBaseAdmin.createTable can handle null splitKeys.
        admin.createTable(tableDescriptor, splitKeys);
        LOG.info("Table created '{}'", tableNameString);
        return;
    } catch (TableExistsException e) {
        // table may exist because someone else is creating it at the same
        // time. But it may not be available yet, and opening it might fail.
        LOG.info("Failed to create table '{}'. {}.", tableNameString, e.getMessage(), e);
    }

    // Wait for table to materialize
    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long sleepTime = timeoutUnit.toNanos(timeout) / 10;
        sleepTime = sleepTime <= 0 ? 1 : sleepTime;
        do {
            if (admin.tableExists(tableName)) {
                LOG.info("Table '{}' exists now. Assuming that another process concurrently created it.",
                        tableName);
                return;
            } else {
                TimeUnit.NANOSECONDS.sleep(sleepTime);
            }
        } while (stopwatch.elapsedTime(timeoutUnit) < timeout);
    } catch (InterruptedException e) {
        LOG.warn("Sleeping thread interrupted.");
    }
    LOG.error("Table '{}' does not exist after waiting {} ms. Giving up.", tableName, MAX_CREATE_TABLE_WAIT);
}

From source file:org.apache.drill.exec.store.hbase.HBaseRecordReader.java

@Override
public int next() {
    Stopwatch watch = new Stopwatch();
    watch.start();//from   w w w.j ava 2s.  com
    if (rowKeyVector != null) {
        rowKeyVector.clear();
        rowKeyVector.allocateNew();
    }
    for (ValueVector v : familyVectorMap.values()) {
        v.clear();
        v.allocateNew();
    }

    int rowCount = 0;
    done: for (; rowCount < TARGET_RECORD_COUNT; rowCount++) {
        Result result = null;
        try {
            if (operatorContext != null) {
                operatorContext.getStats().startWait();
            }
            try {
                result = resultScanner.next();
            } finally {
                if (operatorContext != null) {
                    operatorContext.getStats().stopWait();
                }
            }
        } catch (IOException e) {
            throw new DrillRuntimeException(e);
        }
        if (result == null) {
            break done;
        }

        // parse the result and populate the value vectors
        Cell[] cells = result.rawCells();
        if (rowKeyVector != null) {
            rowKeyVector.getMutator().setSafe(rowCount, cells[0].getRowArray(), cells[0].getRowOffset(),
                    cells[0].getRowLength());
        }
        if (!rowKeyOnly) {
            for (Cell cell : cells) {
                int familyOffset = cell.getFamilyOffset();
                int familyLength = cell.getFamilyLength();
                byte[] familyArray = cell.getFamilyArray();
                MapVector mv = getOrCreateFamilyVector(new String(familyArray, familyOffset, familyLength),
                        true);

                int qualifierOffset = cell.getQualifierOffset();
                int qualifierLength = cell.getQualifierLength();
                byte[] qualifierArray = cell.getQualifierArray();
                NullableVarBinaryVector v = getOrCreateColumnVector(mv,
                        new String(qualifierArray, qualifierOffset, qualifierLength));

                int valueOffset = cell.getValueOffset();
                int valueLength = cell.getValueLength();
                byte[] valueArray = cell.getValueArray();
                v.getMutator().setSafe(rowCount, valueArray, valueOffset, valueLength);
            }
        }
    }

    setOutputRowCount(rowCount);
    logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), rowCount);
    return rowCount;
}

From source file:pl.llp.aircasting.view.presenter.MeasurementPresenter.java

private CopyOnWriteArrayList<Measurement> prepareFullView() {
    if (fullView != null)
        return fullView;

    Stopwatch stopwatch = new Stopwatch().start();

    String sensorName = sensor.getSensorName();
    MeasurementStream stream = sessionManager.getMeasurementStream(sensorName);
    Iterable<Measurement> measurements;
    if (stream == null) {
        measurements = newArrayList();// w w w  .j  a v  a2s.c o m
    } else {
        measurements = stream.getMeasurements();
    }

    ImmutableListMultimap<Long, Measurement> forAveraging = index(measurements,
            new Function<Measurement, Long>() {
                @Override
                public Long apply(Measurement measurement) {
                    return measurement.getSecond() / settingsHelper.getAveragingTime();
                }
            });

    Logger.logGraphPerformance("prepareFullView step 1 took " + stopwatch.elapsed(TimeUnit.MILLISECONDS));

    ArrayList<Long> times = newArrayList(forAveraging.keySet());
    sort(times);

    Logger.logGraphPerformance("prepareFullView step 2 took " + stopwatch.elapsed(TimeUnit.MILLISECONDS));
    List<Measurement> timeboxedMeasurements = newLinkedList();
    for (Long time : times) {
        ImmutableList<Measurement> chunk = forAveraging.get(time);
        timeboxedMeasurements.add(average(chunk));
    }

    Logger.logGraphPerformance("prepareFullView step 3 took " + stopwatch.elapsed(TimeUnit.MILLISECONDS));
    CopyOnWriteArrayList<Measurement> result = Lists.newCopyOnWriteArrayList(timeboxedMeasurements);

    Logger.logGraphPerformance("prepareFullView step n took " + stopwatch.elapsed(TimeUnit.MILLISECONDS));
    fullView = result;
    return result;
}

From source file:com.flipkart.foxtrot.core.querystore.impl.ElasticsearchQueryStore.java

@Override
public void save(String table, List<Document> documents) throws QueryStoreException {
    table = ElasticsearchUtils.getValidTableName(table);
    try {/*from w ww  .j  a  va 2  s.  c  o m*/
        if (!tableMetadataManager.exists(table)) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.NO_SUCH_TABLE,
                    "No table exists with the name: " + table);
        }
        if (documents == null || documents.size() == 0) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST,
                    "Invalid Document List");
        }
        dataStore.save(tableMetadataManager.get(table), documents);
        BulkRequestBuilder bulkRequestBuilder = connection.getClient().prepareBulk();

        DateTime dateTime = new DateTime().plusDays(1);

        for (Document document : documents) {
            long timestamp = document.getTimestamp();
            if (dateTime.minus(timestamp).getMillis() < 0) {
                continue;
            }
            final String index = ElasticsearchUtils.getCurrentIndex(table, timestamp);
            IndexRequest indexRequest = new IndexRequest().index(index).type(ElasticsearchUtils.TYPE_NAME)
                    .id(document.getId()).timestamp(Long.toString(timestamp))
                    .source(mapper.writeValueAsBytes(document.getData()));
            bulkRequestBuilder.add(indexRequest);
        }
        if (bulkRequestBuilder.numberOfActions() > 0) {
            Stopwatch stopwatch = new Stopwatch();
            stopwatch.start();
            BulkResponse responses = bulkRequestBuilder.setConsistencyLevel(WriteConsistencyLevel.QUORUM)
                    .execute().get(10, TimeUnit.SECONDS);
            logger.info(String.format("ES took : %d table : %s", stopwatch.elapsedMillis(), table));
            int failedCount = 0;
            for (int i = 0; i < responses.getItems().length; i++) {
                BulkItemResponse itemResponse = responses.getItems()[i];
                failedCount += (itemResponse.isFailed() ? 1 : 0);
                if (itemResponse.isFailed()) {
                    logger.error(String.format("Table : %s Failure Message : %s Document : %s", table,
                            itemResponse.getFailureMessage(), mapper.writeValueAsString(documents.get(i))));
                }
            }
            if (failedCount > 0) {
                logger.error(String.format("Table : %s Failed Documents : %d", table, failedCount));
            }
        }
    } catch (QueryStoreException ex) {
        throw ex;
    } catch (DataStoreException ex) {
        DataStoreException.ErrorCode code = ex.getErrorCode();
        if (code.equals(DataStoreException.ErrorCode.STORE_INVALID_REQUEST)
                || code.equals(DataStoreException.ErrorCode.STORE_INVALID_DOCUMENT)) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST, ex.getMessage(), ex);
        } else {
            throw new QueryStoreException(QueryStoreException.ErrorCode.DOCUMENT_SAVE_ERROR, ex.getMessage(),
                    ex);
        }
    } catch (JsonProcessingException ex) {
        throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST, ex.getMessage(), ex);
    } catch (Exception ex) {
        throw new QueryStoreException(QueryStoreException.ErrorCode.DOCUMENT_SAVE_ERROR, ex.getMessage(), ex);
    }
}

From source file:com.google.walkaround.slob.server.SlobStoreImpl.java

@Override
public HistoryResult loadHistory(ObjectId slobId, long startVersion, @Nullable Long endVersion)
        throws SlobNotFoundException, IOException, AccessDeniedException {
    accessChecker.checkCanRead(slobId);//from  w w  w.jav  a2  s. com
    IdentifiableValue<Long> cachedVersion = cache.currentVersions.getIdentifiable(slobId);
    log.info(
            "loadHistory(" + slobId + ", " + startVersion + " - " + endVersion + "); cached: " + cachedVersion);
    if (cachedVersion != null && cachedVersion.getValue() != null && startVersion >= cachedVersion.getValue()
            && endVersion == null) {
        return new HistoryResult(ImmutableList.<Delta<String>>of(), false);
    }
    final int MAX_MILLIS = 3 * 1000;
    try {
        CheckedTransaction tx = datastore.beginTransaction();
        try {
            // TODO(ohler): put current version into cache
            DeltaIterator result = mutationLogFactory.create(tx, slobId).forwardHistory(startVersion,
                    endVersion);
            if (!result.hasNext()) {
                return new HistoryResult(ImmutableList.<Delta<String>>of(), false);
            }
            ImmutableList.Builder<Delta<String>> list = ImmutableList.builder();
            Stopwatch stopwatch = new Stopwatch().start();
            do {
                list.add(result.next());
            } while (result.hasNext() && stopwatch.elapsedMillis() < MAX_MILLIS);
            return new HistoryResult(list.build(), result.hasNext());
        } finally {
            tx.rollback();
        }
    } catch (PermanentFailure e) {
        throw new IOException(e);
    } catch (RetryableFailure e) {
        // TODO(danilatos): Retry?
        throw new IOException(e);
    }
}