Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:com.metamx.druid.indexing.coordinator.RemoteTaskRunner.java

/**
 * Creates a ZK entry under a specific path associated with a worker. The worker is responsible for
 * removing the task ZK entry and creating a task status ZK entry.
 *
 * @param theWorker          The worker the task is assigned to
 * @param taskRunnerWorkItem The task to be assigned
 *//*from   w  w  w  . j  a va  2  s  .c o m*/
private void announceTask(Worker theWorker, RemoteTaskRunnerWorkItem taskRunnerWorkItem) throws Exception {
    final Task task = taskRunnerWorkItem.getTask();

    log.info("Coordinator asking Worker[%s] to add task[%s]", theWorker.getHost(), task.getId());

    byte[] rawBytes = jsonMapper.writeValueAsBytes(task);
    if (rawBytes.length > config.getMaxNumBytes()) {
        throw new ISE("Length of raw bytes for task too large[%,d > %,d]", rawBytes.length,
                config.getMaxNumBytes());
    }

    String taskPath = JOINER.join(config.getIndexerTaskPath(), theWorker.getHost(), task.getId());

    if (cf.checkExists().forPath(taskPath) == null) {
        cf.create().withMode(CreateMode.EPHEMERAL).forPath(taskPath, rawBytes);
    }

    RemoteTaskRunnerWorkItem workItem = pendingTasks.remove(task.getId());
    if (workItem == null) {
        log.makeAlert("WTF?! Got a null work item from pending tasks?! How can this be?!")
                .addData("taskId", task.getId()).emit();
        return;
    }

    RemoteTaskRunnerWorkItem newWorkItem = workItem.withWorker(theWorker);
    runningTasks.put(task.getId(), newWorkItem);
    log.info("Task %s switched from pending to running (on [%s])", task.getId(),
            newWorkItem.getWorker().getHost());

    // Syncing state with Zookeeper - don't assign new tasks until the task we just assigned is actually running
    // on a worker - this avoids overflowing a worker with tasks
    Stopwatch timeoutStopwatch = new Stopwatch();
    timeoutStopwatch.start();
    synchronized (statusLock) {
        while (!isWorkerRunningTask(theWorker, task)) {
            statusLock.wait(config.getTaskAssignmentTimeoutDuration().getMillis());
            if (timeoutStopwatch.elapsed(TimeUnit.MILLISECONDS) >= config.getTaskAssignmentTimeoutDuration()
                    .getMillis()) {
                log.error("Something went wrong! %s never ran task %s after %s!", theWorker.getHost(),
                        task.getId(), config.getTaskAssignmentTimeoutDuration());

                taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTask().getId()));
                break;
            }
        }
    }
}

From source file:co.cask.cdap.data2.util.hbase.HBaseTableUtil.java

/**
 * Create a hbase table if it does not exist. Deals with race conditions when two clients concurrently attempt to
 * create the table./*from  w w  w. ja  v a  2 s.c  om*/
 * @param admin the hbase admin
 * @param tableId {@link TableId} representing the table
 * @param tableDescriptor hbase table descriptor for the new table
 * @param timeout Maximum time to wait for table creation.
 * @param timeoutUnit The TimeUnit for timeout.
 */
public void createTableIfNotExists(HBaseAdmin admin, TableId tableId, HTableDescriptor tableDescriptor,
        @Nullable byte[][] splitKeys, long timeout, TimeUnit timeoutUnit) throws IOException {
    if (tableExists(admin, tableId)) {
        return;
    }
    setDefaultConfiguration(tableDescriptor, admin.getConfiguration());

    try {
        LOG.info("Creating table '{}'", tableId);
        // HBaseAdmin.createTable can handle null splitKeys.
        admin.createTable(tableDescriptor, splitKeys);
        LOG.info("Table created '{}'", tableId);
        return;
    } catch (TableExistsException e) {
        // table may exist because someone else is creating it at the same
        // time. But it may not be available yet, and opening it might fail.
        LOG.info("Failed to create table '{}'. {}.", tableId, e.getMessage(), e);
    }

    // Wait for table to materialize
    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long sleepTime = timeoutUnit.toNanos(timeout) / 10;
        sleepTime = sleepTime <= 0 ? 1 : sleepTime;
        do {
            if (tableExists(admin, tableId)) {
                LOG.info("Table '{}' exists now. Assuming that another process concurrently created it.",
                        tableId);
                return;
            } else {
                TimeUnit.NANOSECONDS.sleep(sleepTime);
            }
        } while (stopwatch.elapsedTime(timeoutUnit) < timeout);
    } catch (InterruptedException e) {
        LOG.warn("Sleeping thread interrupted.");
    }
    LOG.error("Table '{}' does not exist after waiting {} ms. Giving up.", tableId, MAX_CREATE_TABLE_WAIT);
}

From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumer.java

@Override
public final DequeueResult<StreamEvent> poll(int maxEvents, long timeout, TimeUnit timeoutUnit)
        throws IOException, InterruptedException {

    // Only need the CLAIMED state for FIFO with group size > 1.
    byte[] fifoStateContent = null;
    if (consumerConfig.getDequeueStrategy() == DequeueStrategy.FIFO && consumerConfig.getGroupSize() > 1) {
        fifoStateContent = encodeStateColumn(ConsumerEntryState.CLAIMED);
    }//from   w  ww .  j  a  v a 2 s . c o m

    // Try to read from cache if any
    if (!eventCache.isEmpty()) {
        getEvents(eventCache, polledEvents, maxEvents, fifoStateContent);
    }

    if (polledEvents.size() == maxEvents) {
        return new SimpleDequeueResult(polledEvents);
    }

    // Number of events it tries to read by multiply the maxEvents with the group size. It doesn't have to be exact,
    // just a rough estimate for better read throughput.
    // Also, this maxRead is used throughout the read loop below, hence some extra events might be read and cached
    // for next poll call.
    int maxRead = maxEvents * consumerConfig.getGroupSize();

    long timeoutNano = timeoutUnit.toNanos(timeout);
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();

    // Save the reader position.
    // It's a conservative approach to save the reader position before reading so that no
    // event will be missed upon restart.
    consumerState.setState(reader.getPosition());

    // Read from the underlying file reader
    while (polledEvents.size() < maxEvents) {
        int readCount = reader.read(eventCache, maxRead, timeoutNano, TimeUnit.NANOSECONDS, readFilter);
        long elapsedNano = stopwatch.elapsedTime(TimeUnit.NANOSECONDS);
        timeoutNano -= elapsedNano;

        if (readCount > 0) {
            int eventsClaimed = getEvents(eventCache, polledEvents, maxEvents - polledEvents.size(),
                    fifoStateContent);

            // TODO: This is a quick fix for preventing backoff logic in flowlet drive kicks in too early.
            // But it doesn't entirely prevent backoff. A proper fix would have a special state in the dequeue result
            // to let flowlet driver knows it shouldn't have backoff.

            // If able to read some events but nothing is claimed, don't check for normal timeout.
            // Only do short transaction timeout checks.
            if (eventsClaimed == 0 && polledEvents.isEmpty()) {
                if (elapsedNano < (txTimeoutNano / 2)) {
                    // If still last than half of tx timeout, continue polling without checking normal timeout.
                    continue;
                }
            }
        }

        if (timeoutNano <= 0) {
            break;
        }
    }

    if (polledEvents.isEmpty()) {
        return EMPTY_RESULT;
    } else {
        return new SimpleDequeueResult(polledEvents);
    }
}

From source file:yaphyre.raytracer.RayTracer.java

private long executeRenderingTasks(final Stopwatch overallTime, final int numberOfCores,
        final List<RenderCallable> slices) {

    ExecutorService renderingExecutor = Executors.newFixedThreadPool(numberOfCores);

    long cpuTime = 0l;
    try {/*  ww  w.  j  av  a  2 s. c  o m*/
        LOGGER.info("Start rendering");

        overallTime.start();
        List<Future<Long>> renderResults = renderingExecutor.invokeAll(slices);
        boolean allDone;
        do {
            Thread.sleep(THREAD_POLL_TIMEOUT);
            allDone = true;
            for (Future<Long> result : renderResults) {
                allDone &= result.isDone();
            }
        } while (!allDone);
        overallTime.stop();

        for (Future<Long> result : renderResults) {
            cpuTime += result.get();
        }

        renderingExecutor.shutdown();

    } catch (Exception e) {
        LOGGER.error("Error while rendering", e);
    } finally {
        try {
            renderingExecutor.shutdownNow();
        } catch (Exception e) {
            LOGGER.error("Could not shutdown the rendering engines!", e);
        }
    }
    return cpuTime;
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ConcurrentSparqlGraphStoreManager.java

@Override
public Set<URI> listResourcesByQuery(String queryStr, String variableName) {

    ImmutableSet.Builder<URI> result = ImmutableSet.builder();
    // If the SPARQL endpoint does not exist return immediately.
    if (this.getSparqlQueryEndpoint() == null || queryStr == null || queryStr.isEmpty()) {
        return result.build();
    }//from www. ja  v a  2 s  .  c  o m

    // Query the engine
    log.debug("Evaluating SPARQL query in Knowledge Base: \n {}", queryStr);
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.getSparqlQueryEndpoint().toASCIIString(),
            query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        ResultSet qResults = qexec.execSelect();

        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        Resource resource;
        URI matchUri;
        // Iterate over the results obtained
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();

            // Get the match URL
            resource = soln.getResource(variableName);

            if (resource != null && resource.isURIResource()) {
                matchUri = new URI(resource.getURI());
                result.add(matchUri);
            } else {
                log.warn("Skipping result as the URL is null");
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result.build();
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ConcurrentSparqlGraphStoreManager.java

@Override
public Multimap<URI, URI> listResourcesMapByQuery(String queryStr, String variableNameA, String variableNameB) {
    Multimap<URI, URI> result = HashMultimap.create();
    // If the SPARQL endpoint does not exist return immediately.
    if (this.getSparqlQueryEndpoint() == null || queryStr == null || queryStr.isEmpty()) {
        return result;
    }//from  ww  w  . j  a  v  a  2 s.  c o  m

    // Query the engine
    log.debug("Evaluating SPARQL query in Knowledge Base: \n {}", queryStr);
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.getSparqlQueryEndpoint().toASCIIString(),
            query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        ResultSet qResults = qexec.execSelect();

        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        Resource resourceA;
        Resource resourceB;
        // Iterate over the results obtained
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();

            // Get the match URL
            resourceA = soln.getResource(variableNameA);
            resourceB = soln.getResource(variableNameB);

            if (resourceA != null && resourceA.isURIResource() && resourceB != null
                    && resourceB.isURIResource()) {
                result.put(new URI(resourceA.getURI()), new URI(resourceB.getURI()));
            } else {
                log.warn("Skipping result as the URL is null");
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result;
}

From source file:org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences.java

@Override
public void run() {
    // As long as we depend on a newer Guava than Hadoop uses, we have to make sure we're compatible with
    // what the version they bundle uses.
    Stopwatch sw = new Stopwatch();

    Connector conn;//  ww w .  j a va 2s .  c  o m
    try {
        conn = context.getConnector();
    } catch (Exception e) {
        log.error("Could not create connector", e);
        throw new RuntimeException(e);
    }

    if (!ReplicationTable.isOnline(conn)) {
        log.debug("Replication table isn't online, not attempting to clean up wals");
        return;
    }

    Span findWalsSpan = Trace.start("findReferencedWals");
    HashSet<String> closed = null;
    try {
        sw.start();
        closed = getClosedLogs(conn);
    } finally {
        sw.stop();
        findWalsSpan.stop();
    }

    log.info("Found " + closed.size() + " WALs referenced in metadata in " + sw.toString());
    sw.reset();

    Span updateReplicationSpan = Trace.start("updateReplicationTable");
    long recordsClosed = 0;
    try {
        sw.start();
        recordsClosed = updateReplicationEntries(conn, closed);
    } finally {
        sw.stop();
        updateReplicationSpan.stop();
    }

    log.info(
            "Closed " + recordsClosed + " WAL replication references in replication table in " + sw.toString());
}

From source file:org.terasology.cities.debug.SwingRasterizer.java

public void rasterizeChunk(Graphics2D g, Vector2i coord) {

    int chunkSizeX = ChunkConstants.SIZE_X;
    int chunkSizeZ = ChunkConstants.SIZE_Z;

    int wx = coord.getX() * chunkSizeX;
    int wz = coord.getY() * chunkSizeZ;

    Sector sector = Sectors.getSectorForBlock(wx, wz);

    if (g.hitClip(wx, wz, chunkSizeX, chunkSizeZ)) {

        Stopwatch swBK = debugMap.getUnchecked("RASTER Background");
        Stopwatch swCt = debugMap.getUnchecked("RASTER Cities");
        Stopwatch swRd = debugMap.getUnchecked("RASTER Roads");

        BufferedImage image = new BufferedImage(chunkSizeX, chunkSizeZ, BufferedImage.TYPE_INT_RGB);
        Brush brush = new SwingBrush(wx, wz, image, colorFunc);

        HeightMap cachedHm = HeightMaps.caching(heightMap, brush.getAffectedArea(), 8);
        TerrainInfo ti = new TerrainInfo(cachedHm);

        swBK.start();
        drawBackground(image, wx, wz, ti);
        swBK.stop();//  www . j  av  a 2 s.co m

        swCt.start();
        drawCities(sector, ti, brush);
        swCt.stop();

        swRd.start();
        drawRoads(sector, ti, brush);
        swRd.stop();

        int ix = wx;
        int iy = wz;
        g.drawImage(image, ix, iy, null);

    }

}

From source file:org.apache.eagle.alert.coordinator.Coordinator.java

public synchronized ScheduleState schedule(ScheduleOption option) throws TimeoutException {
    ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig);
    AtomicReference<ScheduleState> reference = new AtomicReference<>();
    try {//w  w w  .j a  v  a  2 s.co m
        executor.execute(GREEDY_SCHEDULER_ZK_PATH, () -> {
            ScheduleState state = null;
            Stopwatch watch = Stopwatch.createStarted();
            IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext();
            TopologyMgmtService mgmtService = new TopologyMgmtService();
            IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler();

            scheduler.init(context, mgmtService);
            state = scheduler.schedule(option);

            long scheduleTime = watch.elapsed(TimeUnit.MILLISECONDS);
            state.setScheduleTimeMillis((int) scheduleTime);// hardcode to integer
            watch.reset();
            watch.start();

            // persist & notify
            try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) {
                postSchedule(client, state, producer);
            }

            watch.stop();
            long postTime = watch.elapsed(TimeUnit.MILLISECONDS);
            LOG.info("Schedule result, schedule time {} ms, post schedule time {} ms !", scheduleTime,
                    postTime);
            reference.set(state);
            currentState = state;
        });
    } catch (TimeoutException e1) {
        LOG.error("time out when schedule", e1);
        throw e1;
    } finally {
        try {
            executor.close();
        } catch (IOException e) {
            LOG.error("Exception when close exclusive executor, log and ignore!", e);
        }
    }
    return reference.get();
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Scan scan = getScan();/* ww  w. ja v  a2s  .c  o  m*/

    String jobName = "testScanMapReduce";

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setJarByClass(getClass());

    TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class,
            NullWritable.class, job);

    job.setNumReduceTasks(0);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    scanTimer.start();
    job.waitForCompletion(true);
    scanTimer.stop();

    Counters counters = job.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

    long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan mapreduce: ");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}