Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:org.apache.tez.runtime.library.shuffle.common.HttpConnection.java

/**
 * HttpConnection//  w w  w  . j av  a  2  s  .c o  m
 * 
 * @param url
 * @param connParams
 * @param logIdentifier
 * @param jobTokenSecret
 * @throws IOException
 */
public HttpConnection(URL url, HttpConnectionParams connParams, String logIdentifier, SecretKey jobTokenSecret)
        throws IOException {
    this.logIdentifier = logIdentifier;
    this.jobTokenSecret = jobTokenSecret;
    this.httpConnParams = connParams;
    this.url = url;
    this.stopWatch = new Stopwatch();
    if (LOG.isDebugEnabled()) {
        LOG.debug("MapOutput URL :" + url.toString());
    }
}

From source file:org.apache.drill.exec.physical.impl.xsort.BatchGroup.java

public void addBatch(VectorContainer newContainer) throws IOException {
    assert fs != null;
    assert path != null;
    if (outputStream == null) {
        outputStream = fs.create(path);/*from  w  ww .  j a va  2 s.  c  o  m*/
    }
    int recordCount = newContainer.getRecordCount();
    WritableBatch batch = WritableBatch.getBatchNoHVWrap(recordCount, newContainer, false);
    VectorAccessibleSerializable outputBatch = new VectorAccessibleSerializable(batch, allocator);
    Stopwatch watch = new Stopwatch();
    watch.start();
    outputBatch.writeToStream(outputStream);
    newContainer.zeroVectors();
    logger.debug("Took {} us to spill {} records", watch.elapsed(TimeUnit.MICROSECONDS), recordCount);
    spilledBatches++;
}

From source file:com.couchbase.roadrunner.workloads.GetSetWorkload.java

private void getWorkloadWithMeasurement(String key) throws Exception {
    Stopwatch watch = new Stopwatch().start();
    getWorkload(key);//from  w ww. j  ava2 s  .  co  m
    watch.stop();
    addMeasure("get", watch);
}

From source file:com.facebook.presto.split.NativeSplitManager.java

@Override
public List<Partition> getPartitions(TableHandle tableHandle, Map<ColumnHandle, Object> bindings) {
    Stopwatch partitionTimer = new Stopwatch();
    partitionTimer.start();/*from  w  w w.j  a  v a2  s  . co  m*/

    checkArgument(tableHandle instanceof NativeTableHandle, "Table must be a native table");

    TableMetadata tableMetadata = metadata.getTableMetadata(tableHandle);

    checkState(tableMetadata != null, "no metadata for %s found", tableHandle);

    Set<TablePartition> tablePartitions = shardManager.getPartitions(tableHandle);

    log.debug("Partition retrieval, native table %s (%d partitions): %dms", tableHandle, tablePartitions.size(),
            partitionTimer.elapsed(TimeUnit.MILLISECONDS));

    Multimap<String, ? extends PartitionKey> allPartitionKeys = shardManager.getAllPartitionKeys(tableHandle);
    Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(tableHandle);

    log.debug("Partition key retrieval, native table %s (%d keys): %dms", tableHandle, allPartitionKeys.size(),
            partitionTimer.elapsed(TimeUnit.MILLISECONDS));

    List<Partition> partitions = ImmutableList.copyOf(
            Collections2.transform(tablePartitions, new PartitionFunction(columnHandles, allPartitionKeys)));

    log.debug("Partition generation, native table %s (%d partitions): %dms", tableHandle, partitions.size(),
            partitionTimer.elapsed(TimeUnit.MILLISECONDS));

    return partitions;
}

From source file:org.jenkinsci.plugins.vsphere.builders.PowerOn.java

private boolean powerOn(final Run<?, ?> run, Launcher launcher, final TaskListener listener)
        throws VSphereException, IOException, InterruptedException {
    PrintStream jLogger = listener.getLogger();
    EnvVars env;/*from   ww w  .  jav a 2  s  .c  o  m*/
    String expandedVm = vm;

    env = run.getEnvironment(listener);

    if (run instanceof AbstractBuild) {
        env.overrideAll(((AbstractBuild) run).getBuildVariables()); // Add in matrix axes..
        expandedVm = env.expand(vm);
    }

    Stopwatch stopwatch = new Stopwatch().start();
    vsphere.startVm(expandedVm, timeoutInSeconds);
    long elapsedTime = stopwatch.elapsedTime(TimeUnit.SECONDS);

    int secondsToWaitForIp = (int) (timeoutInSeconds - elapsedTime);

    IP = vsphere.getIp(vsphere.getVmByName(expandedVm), secondsToWaitForIp);

    if (IP == null) {
        VSphereLogger.vsLogger(jLogger, "Error: Timed out after waiting " + secondsToWaitForIp
                + " seconds to get IP for \"" + expandedVm + "\" ");
        return false;
    }

    VSphereLogger.vsLogger(jLogger, "Successfully retrieved IP for \"" + expandedVm + "\" : " + IP);
    stopwatch.stop();

    // useful to tell user about the environment variable
    VSphereLogger.vsLogger(jLogger, "Exposing " + IP + " as environment variable VSPHERE_IP");

    if (run instanceof AbstractBuild) {
        VSphereEnvAction envAction = new VSphereEnvAction();
        envAction.add("VSPHERE_IP", IP);
        run.addAction(envAction);
    } else {
        env.put("VSPHERE_IP", IP);
    }

    return true;
}

From source file:mil.nga.giat.geowave.adapter.vector.delete.CQLDelete.java

@Override
public void execute(OperationParams params) throws ParseException {
    if (debug) {/*from w ww  .  jav a 2 s  .com*/
        org.apache.log4j.Logger.getRootLogger().setLevel(org.apache.log4j.Level.DEBUG);
    }

    final Stopwatch stopWatch = new Stopwatch();

    // Ensure we have all the required arguments
    if (parameters.size() != 1) {
        throw new ParameterException("Requires arguments: <storename>");
    }

    String storeName = parameters.get(0);

    // Config file
    File configFile = (File) params.getContext().get(ConfigOptions.PROPERTIES_FILE_CONTEXT);

    // Attempt to load store.
    StoreLoader storeOptions = new StoreLoader(storeName);
    if (!storeOptions.loadFromConfig(configFile)) {
        throw new ParameterException("Cannot find store name: " + storeOptions.getStoreName());
    }

    DataStore dataStore;
    AdapterStore adapterStore;
    try {
        dataStore = storeOptions.createDataStore();
        adapterStore = storeOptions.createAdapterStore();

        final GeotoolsFeatureDataAdapter adapter;
        if (adapterId != null) {
            adapter = (GeotoolsFeatureDataAdapter) adapterStore.getAdapter(adapterId);
        } else {
            final CloseableIterator<DataAdapter<?>> it = adapterStore.getAdapters();
            adapter = (GeotoolsFeatureDataAdapter) it.next();
            it.close();
        }

        if (debug && (adapter != null)) {
            LOGGER.debug(adapter.toString());
        }

        stopWatch.start();
        final long results = delete(adapter, adapterId, indexId, dataStore, debug);
        stopWatch.stop();

        if (debug) {
            LOGGER.debug(results + " results remaining after delete; time = " + stopWatch.toString());
        }
    } catch (IOException e) {
        LOGGER.warn("Unable to read adapter", e);
    }
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.CrawlCallable.java

private Model fetchModel(URI modelUri, String syntax) {
    OntModel model = ModelFactory.createOntologyModel(modelSpec);

    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();/*from w  w w  . j  a v a2s . c  o  m*/
    // Fetch the model
    model.read(modelUri.toASCIIString(), syntax);
    stopwatch.stop();

    log.info("Remote ontology fetched - {} . Time taken: {}", modelUri, stopwatch);

    return model;
}

From source file:com.metamx.druid.utils.DruidMasterBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = new Stopwatch();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();//www  . j  av a 2  s  . c o m
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    master.moveSegment(EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(master);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        DruidServer server = EasyMock.createMock(DruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidMasterRuntimeParams params = DruidMasterRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withMasterSegmentSettings(
                    new MasterSegmentSettings.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidMasterBalancerTester tester = new DruidMasterBalancerTester(master);
    DruidMasterRuleRunner runner = new DruidMasterRuleRunner(master, 500, 5);
    watch.start();
    DruidMasterRuntimeParams balanceParams = tester.run(params);
    DruidMasterRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}

From source file:fr.ippon.wip.filter.PerformanceFilter.java

public void doFilter(ActionRequest request, ActionResponse response, FilterChain chain)
        throws IOException, PortletException {
    Stopwatch timeProcess = new Stopwatch().start();
    chain.doFilter(request, response);//from   ww w.ja  v a  2 s  .  c  o  m
    StringBuilder data = new StringBuilder();
    writePerformance(data.append("ACTION\t").append(request.getParameter(WIPortlet.LINK_URL_KEY) + "\t")
            .append(timeProcess.elapsedMillis() + "\t"));
}

From source file:co.cask.cdap.data.stream.MultiLiveStreamFileReader.java

@Override
public int read(Collection<? super StreamEventOffset> events, int maxEvents, long timeout, TimeUnit unit,
        ReadFilter readFilter) throws IOException, InterruptedException {
    int eventsRead = 0;

    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();//from  ww  w.j a  v a  2 s.c  o  m

    while (eventsRead < maxEvents && !(emptySources.isEmpty() && eventSources.isEmpty())) {
        if (!emptySources.isEmpty()) {
            prepareEmptySources(readFilter);
        }
        eventsRead += read(events, readFilter);

        if (eventSources.isEmpty() && stopwatch.elapsedTime(unit) >= timeout) {
            break;
        }
    }

    return (eventsRead == 0 && emptySources.isEmpty() && eventSources.isEmpty()) ? -1 : eventsRead;
}