Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:ezbake.groups.service.CachingEzGroupsService.java

private Stopwatch getStopwatch() {
    Stopwatch watch = Stopwatch.createUnstarted();
    if (logTimer) {
        watch.start();
    }/* w ww  .  j a va  2 s. c o m*/
    return watch;
}

From source file:ubic.gemma.persistence.service.expression.experiment.GeeqServiceImpl.java

/**
 * Does all the preparations and calls the appropriate scoring methods.
 *
 * @param eeId the id of experiment to be scored.
 * @param mode the mode of scoring. All will redo all scores, batchEffect and batchConfound will only recalculate
 *             scores relevant to batch effect and batch confound, respectively.
 *             Scoring batch effect and confound is fairly fast, especially compared to the 'all' mode, which goes
 *             through almost all information associated with the experiment, and can therefore be very slow,
 *             depending on the experiment.
 *///from w  w  w. j  av  a2 s. c o  m
private void doScoring(Long eeId, String mode) {
    ExpressionExperiment ee = expressionExperimentService.load(eeId);

    if (ee == null) {
        return;
    }

    this.ensureEeHasGeeq(ee);
    Geeq gq = ee.getGeeq();

    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();

    try {
        // Update score values
        switch (mode) {
        case GeeqService.OPT_MODE_ALL:
            Log.info(this.getClass(),
                    GeeqServiceImpl.LOG_PREFIX + " Starting full geeq scoring for ee id " + eeId);
            gq = this.scoreAll(ee);
            break;
        case GeeqService.OPT_MODE_BATCH:
            Log.info(this.getClass(), GeeqServiceImpl.LOG_PREFIX
                    + " Starting batch info, confound and batch effect geeq re-scoring for ee id " + eeId);
            gq = this.scoreOnlyBatchArtifacts(ee);
            break;
        case GeeqService.OPT_MODE_REPS:
            Log.info(this.getClass(),
                    GeeqServiceImpl.LOG_PREFIX + " Starting replicates geeq re-scoring for ee id " + eeId);
            gq = this.scoreOnlyReplicates(ee);
            break;
        case GeeqService.OPT_MODE_PUB:
            Log.info(this.getClass(),
                    GeeqServiceImpl.LOG_PREFIX + " Starting publication geeq re-scoring for ee id " + eeId);
            gq = this.scoreOnlyPublication(ee);
            break;
        default:
            Log.warn(this.getClass(), GeeqServiceImpl.LOG_PREFIX + " Did not recognize the given mode " + mode
                    + " for ee id " + eeId);
        }
        Log.info(this.getClass(), GeeqServiceImpl.LOG_PREFIX + " Finished geeq re-scoring for ee id " + eeId
                + ", saving results...");
    } catch (Exception e) {
        Log.info(this.getClass(), GeeqServiceImpl.LOG_PREFIX
                + " Major problem encountered, scoring did not finish for ee id " + eeId + ".");
        e.printStackTrace();
        gq.addOtherIssues(e.getMessage());
    }

    // Recalculate final scores
    gq = this.updateQualityScore(gq);
    gq = this.updateSuitabilityScore(gq);

    // Add note if experiment curation not finished
    if (ee.getCurationDetails().getNeedsAttention()) {
        gq.addOtherIssues("Experiment was not fully curated when the score was calculated.");
    }

    stopwatch.stop();
    gq.setLastRun(this.createGeeqEvent(ee, "Re-ran geeq scoring (mode: " + mode + ")", "Took "
            + stopwatch.elapsedMillis() + "ms.\nUnexpected problems encountered: \n" + gq.getOtherIssues()));

    this.update(gq);
    Log.info(this.getClass(), GeeqServiceImpl.LOG_PREFIX + " took "
            + Math.round(stopwatch.elapsedTime(TimeUnit.SECONDS) / 60.0) + " minutes to process ee id " + eeId);

}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Scan scan = getScan();//from w w  w. j a  va 2  s. c  o  m

    String jobName = "testSnapshotScanMapReduce";

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setJarByClass(getClass());

    TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, NullWritable.class,
            NullWritable.class, job, true, new Path(restoreDir));

    job.setNumReduceTasks(0);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    scanTimer.start();
    job.waitForCompletion(true);
    scanTimer.stop();

    Counters counters = job.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

    long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan mapreduce: ");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}

From source file:org.opendaylight.controller.netconf.persist.impl.ConfigPusherImpl.java

/**
 * First calls {@link #getOperationServiceWithRetries(java.util.Set, String)} in order to wait until
 * expected capabilities are present, then tries to push configuration. If {@link ConflictingVersionException}
 * is caught, whole process is retried - new service instance need to be obtained from the factory. Closes
 * {@link NetconfOperationService} after each use.
 *///from   w w  w . ja  v a2  s .c  o m
private synchronized EditAndCommitResponse pushConfigWithConflictingVersionRetries(
        ConfigSnapshotHolder configSnapshotHolder) throws NetconfDocumentedException {
    ConflictingVersionException lastException;
    Stopwatch stopwatch = Stopwatch.createUnstarted();
    do {
        String idForReporting = configSnapshotHolder.toString();
        SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(),
                "Expected capabilities must not be null - %s, check %s", idForReporting,
                configSnapshotHolder.getClass().getName());
        try (NetconfOperationService operationService = getOperationServiceWithRetries(expectedCapabilities,
                idForReporting)) {
            if (!stopwatch.isRunning()) {
                stopwatch.start();
            }
            return pushConfig(configSnapshotHolder, operationService);
        } catch (ConflictingVersionException e) {
            lastException = e;
            LOG.info("Conflicting version detected, will retry after timeout");
            sleep();
        }
    } while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis);
    throw new IllegalStateException("Max wait for conflicting version stabilization timeout after "
            + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms", lastException);
}

From source file:fr.ens.transcriptome.aozan.fastqscreen.FastqScreen.java

/**
 * Mode single-end : execute fastqscreen.
 * @param fastqRead1 fastq read1 file input for mapper
 * @param fastqRead2 fastq read2 file input for mapper
 * @param fastqSample instance to describe fastq sample
 * @param genomes list or reference genome, used by mapper
 * @param genomeSample genome reference corresponding to sample
 * @param isPairedMode true if a pair-end run and option paired mode equals
 *          true else false/*  w  w w.  java 2  s . c om*/
 * @throws AozanException
 */
public FastqScreenResult execute(final File fastqRead1, final File fastqRead2, final FastqSample fastqSample,
        final List<String> genomes, final String genomeSample, final boolean isPairedMode)
        throws AozanException {

    // Timer
    final Stopwatch timer = Stopwatch.createStarted();

    final FastqScreenPseudoMapReduce pmr = new FastqScreenPseudoMapReduce(this.tmpDir, isPairedMode,
            this.mapperName, this.mapperArgument);

    try {

        if (isPairedMode) {
            pmr.doMap(fastqRead1, fastqRead2, genomes, genomeSample, this.confThreads);
        } else {
            pmr.doMap(fastqRead1, genomes, genomeSample, this.confThreads);
        }

        LOGGER.fine("FASTQSCREEN : step map for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " on genome(s) " + genomes + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        timer.reset();
        timer.start();

        pmr.doReduce(new File(this.tmpDir + "/outputDoReduce.txt"));

        LOGGER.fine("FASTQSCREEN : step reduce for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        // Remove temporary output file use in map-reduce step
        final File f = new File(this.tmpDir + "/outputDoReduce.txt");
        if (!f.delete()) {
            LOGGER.warning("Fastqscreen : fail to delete file " + f.getAbsolutePath());
        }

    } catch (final IOException e) {
        throw new AozanException(e);

    } finally {
        timer.stop();
    }

    return pmr.getFastqScreenResult();
}

From source file:org.apache.drill.exec.physical.impl.xsort.ExternalSortBatch.java

@Override
public IterOutcome innerNext() {
    if (schema != null) {
        if (spillCount == 0) {
            return (getSelectionVector4().next()) ? IterOutcome.OK : IterOutcome.NONE;
        } else {/*from  w  w w.  j  av  a 2  s. c o m*/
            Stopwatch w = new Stopwatch();
            w.start();
            int count = copier.next(targetRecordCount);
            if (count > 0) {
                long t = w.elapsed(TimeUnit.MICROSECONDS);
                logger.debug("Took {} us to merge {} records", t, count);
                container.setRecordCount(count);
                return IterOutcome.OK;
            } else {
                logger.debug("copier returned 0 records");
                return IterOutcome.NONE;
            }
        }
    }

    int totalCount = 0;

    try {
        container.clear();
        outer: while (true) {
            Stopwatch watch = new Stopwatch();
            watch.start();
            IterOutcome upstream;
            if (first) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            } else {
                upstream = next(incoming);
            }
            if (upstream == IterOutcome.OK && sorter == null) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
            }
            //        logger.debug("Took {} us to get next", watch.elapsed(TimeUnit.MICROSECONDS));
            switch (upstream) {
            case NONE:
                if (first) {
                    return upstream;
                }
                break outer;
            case NOT_YET:
                throw new UnsupportedOperationException();
            case STOP:
                return upstream;
            case OK_NEW_SCHEMA:
                // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
                if (!incoming.getSchema().equals(schema)) {
                    if (schema != null) {
                        throw new SchemaChangeException();
                    }
                    this.schema = incoming.getSchema();
                    this.sorter = createNewSorter(context, incoming);
                }
                // fall through.
            case OK:
                if (first) {
                    first = false;
                }
                if (incoming.getRecordCount() == 0) {
                    for (VectorWrapper w : incoming) {
                        w.clear();
                    }
                    break;
                }
                totalSizeInMemory += getBufferSize(incoming);
                SelectionVector2 sv2;
                if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) {
                    sv2 = incoming.getSelectionVector2();
                    if (sv2.getBuffer(false).isRootBuffer()) {
                        oContext.getAllocator().takeOwnership(sv2.getBuffer(false));
                    }
                } else {
                    try {
                        sv2 = newSV2();
                    } catch (InterruptedException e) {
                        return IterOutcome.STOP;
                    } catch (OutOfMemoryException e) {
                        throw new OutOfMemoryRuntimeException(e);
                    }
                }
                int count = sv2.getCount();
                totalCount += count;
                sorter.setup(context, sv2, incoming);
                Stopwatch w = new Stopwatch();
                w.start();
                sorter.sort(sv2);
                //          logger.debug("Took {} us to sort {} records", w.elapsed(TimeUnit.MICROSECONDS), count);
                RecordBatchData rbd = new RecordBatchData(incoming);
                boolean success = false;
                try {
                    if (incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.NONE) {
                        rbd.setSv2(sv2);
                    }
                    batchGroups.add(new BatchGroup(rbd.getContainer(), rbd.getSv2()));
                    batchesSinceLastSpill++;
                    if (// We have spilled at least once and the current memory used is more than the 75% of peak memory used.
                    (spillCount > 0 && totalSizeInMemory > .75 * highWaterMark) ||
                    // If we haven't spilled so far, do we have enough memory for MSorter if this turns out to be the last incoming batch?
                            (spillCount == 0 && !hasMemoryForInMemorySort(totalCount)) ||
                            // current memory used is more than 95% of memory usage limit of this operator
                            (totalSizeInMemory > .95 * popConfig.getMaxAllocation()) ||
                            // current memory used is more than 95% of memory usage limit of this fragment
                            (totalSizeInMemory > .95 * oContext.getAllocator().getFragmentLimit()) ||
                            // Number of incoming batches (BatchGroups) exceed the limit and number of incoming batches accumulated
                            // since the last spill exceed the defined limit
                            (batchGroups.size() > SPILL_THRESHOLD
                                    && batchesSinceLastSpill >= SPILL_BATCH_GROUP_SIZE)) {

                        if (firstSpillBatchCount == 0) {
                            firstSpillBatchCount = batchGroups.size();
                        }

                        if (spilledBatchGroups.size() > firstSpillBatchCount / 2) {
                            logger.info("Merging spills");
                            spilledBatchGroups.addFirst(mergeAndSpill(spilledBatchGroups));
                        }
                        spilledBatchGroups.add(mergeAndSpill(batchGroups));
                        batchesSinceLastSpill = 0;
                    }
                    long t = w.elapsed(TimeUnit.MICROSECONDS);
                    //          logger.debug("Took {} us to sort {} records", t, count);
                    success = true;
                } finally {
                    if (!success) {
                        rbd.clear();
                    }
                }
                break;
            case OUT_OF_MEMORY:
                logger.debug("received OUT_OF_MEMORY, trying to spill");
                highWaterMark = totalSizeInMemory;
                if (batchesSinceLastSpill > 2) {
                    spilledBatchGroups.add(mergeAndSpill(batchGroups));
                    batchesSinceLastSpill = 0;
                } else {
                    logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream");
                    return IterOutcome.OUT_OF_MEMORY;
                }
                break;
            default:
                throw new UnsupportedOperationException();
            }
        }

        if (totalCount == 0) {
            return IterOutcome.NONE;
        }
        if (spillCount == 0) {
            Stopwatch watch = new Stopwatch();
            watch.start();

            if (builder != null) {
                builder.clear();
                builder.close();
            }
            builder = new SortRecordBatchBuilder(oContext.getAllocator(), MAX_SORT_BYTES);

            for (BatchGroup group : batchGroups) {
                RecordBatchData rbd = new RecordBatchData(group.getContainer());
                rbd.setSv2(group.getSv2());
                builder.add(rbd);
            }

            builder.build(context, container);
            sv4 = builder.getSv4();
            mSorter = createNewMSorter();
            mSorter.setup(context, oContext.getAllocator(), getSelectionVector4(), this.container);

            // For testing memory-leak purpose, inject exception after mSorter finishes setup
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SETUP);
            mSorter.sort(this.container);

            // sort may have prematurely exited due to should continue returning false.
            if (!context.shouldContinue()) {
                return IterOutcome.STOP;
            }

            // For testing memory-leak purpose, inject exception after mSorter finishes sorting
            injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SORT);
            sv4 = mSorter.getSV4();

            long t = watch.elapsed(TimeUnit.MICROSECONDS);
            //        logger.debug("Took {} us to sort {} records", t, sv4.getTotalCount());
            container.buildSchema(SelectionVectorMode.FOUR_BYTE);
        } else {
            spilledBatchGroups.add(mergeAndSpill(batchGroups));
            batchGroups.addAll(spilledBatchGroups);
            logger.warn("Starting to merge. {} batch groups. Current allocated memory: {}", batchGroups.size(),
                    oContext.getAllocator().getAllocatedMemory());
            VectorContainer hyperBatch = constructHyperBatch(batchGroups);
            createCopier(hyperBatch, batchGroups, container, false);

            int estimatedRecordSize = 0;
            for (VectorWrapper w : batchGroups.get(0)) {
                try {
                    estimatedRecordSize += TypeHelper.getSize(w.getField().getType());
                } catch (UnsupportedOperationException e) {
                    estimatedRecordSize += 50;
                }
            }
            targetRecordCount = Math.min(MAX_BATCH_SIZE, Math.max(1, 250 * 1000 / estimatedRecordSize));
            int count = copier.next(targetRecordCount);
            container.buildSchema(SelectionVectorMode.NONE);
            container.setRecordCount(count);
        }

        return IterOutcome.OK_NEW_SCHEMA;

    } catch (SchemaChangeException ex) {
        kill(false);
        context.fail(UserException.unsupportedError(ex)
                .message("Sort doesn't currently support sorts with changing schemas").build(logger));
        return IterOutcome.STOP;
    } catch (ClassTransformationException | IOException ex) {
        kill(false);
        context.fail(ex);
        return IterOutcome.STOP;
    } catch (UnsupportedOperationException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.caleydo.core.util.clusterer.algorithm.tree.TreeClusterer.java

@Override
protected PerspectiveInitializationData cluster() {
    int r = 0;//ww w . j a va  2  s.  c o m

    Stopwatch w = new Stopwatch().start();
    r = determineSimilarities();
    System.out.println("determine similarties: " + w);
    w.stop().reset();
    if (r < 0) {
        progress(100);
        return null;
    }

    TreeClusterConfiguration tConfig = (TreeClusterConfiguration) config.getClusterAlgorithmConfiguration();

    Node[] result;

    w.start();
    switch (tConfig.getTreeClustererAlgo()) {
    case COMPLETE_LINKAGE:
        result = pmlcluster();
        System.out.println("pmlcluster: " + w);
        break;
    case AVERAGE_LINKAGE:

        result = palcluster();
        System.out.println("palcluster: " + w);
        break;
    case SINGLE_LINKAGE:
        result = pslcluster();
        System.out.println("pslcluster: " + w);
        break;
    default:
        throw new IllegalStateException("Unkonwn cluster type: " + tConfig.getTreeClustererAlgo());
    }
    if (result == null)
        return null;
    w.reset().start();
    PerspectiveInitializationData p = convert(result);
    System.out.println("convert: " + w);
    return p;
}

From source file:io.cloudex.framework.components.Processor.java

@Override
public void run() throws IOException {

    String status = null;/*from   w w  w. j  a va  2 s.  c o  m*/
    Stopwatch stopwatch = Stopwatch.createUnstarted();

    VmMetaData metaData = this.getMetaData();
    CloudService cloudService = this.getCloudService();

    while (true) {
        try {
            // only process tasks if the status is empty
            if (StringUtils.isBlank(status)) {

                // set status to BUSY
                metaData.setProcessorStatus(ProcessorStatus.BUSY);
                cloudService.updateMetadata(metaData);

                // run the task
                Task task = taskFactory.getTask(metaData, cloudService);
                if (task != null) {
                    stopwatch.start();
                    log.info("Starting processor task: " + task);

                    task.run();

                    log.info("TIMER# Task " + task + " completed in: " + stopwatch);
                    stopwatch.reset();

                } else {
                    //no task is set, just set status to ready and wait for tasks
                    log.info("No task is set!");
                }

                // finished processing
                // blank the task type and set the status to READY
                metaData.clearValues();
                metaData.setProcessorStatus(ProcessorStatus.READY);

                cloudService.updateMetadata(metaData);

            } else {
                log.info("will continue waiting for instructions as status is currently: " + status);
            }

            // now wait for any change in the metadata
            log.info("Waiting for new instructions from the Coordinator");

            // FIXME better solution for race condition
            // avoid race condition
            ApiUtils.block(2);
            metaData = cloudService.getMetaData(false);
            // if we still have a status then wait, otherwise proceed
            if (StringUtils.isNotBlank(metaData.getStatus())) {
                metaData = cloudService.getMetaData(true);
            }

            // check the status in the metadata
            status = metaData.getStatus();

        } catch (Exception e) {

            log.error("An error has occurred whilst running/waiting for tasks, setting status to ERROR", e);
            // the stopwatch wasn't stopped when an exception was thrown
            stopwatch.reset();
            // try to update the Metadata to a fail status
            try {

                metaData = cloudService.getMetaData(false);
                // blank the task type and set the status to ERROR
                metaData.clearValues();
                metaData.exceptionToCloudExError(e);
                cloudService.updateMetadata(metaData);

                // wait until we get further instructions
                // now wait for any change in the metadata
                log.info("Waiting for new instructions from the Coordinator");
                metaData = cloudService.getMetaData(true);
                status = metaData.getStatus();

            } catch (Exception e1) {
                // all has failed with no hope of recovery, retry a few times then terminate
                log.fatal("An error has occurred whilst trying to recover", e);
                // self terminate :-(
                // FIXME uncomment once testing is thoroughly done
                //this.service.shutdownInstance();
            }
        }

        if (this.stop) {
            break;
        }
    }

}

From source file:org.jenkinsci.plugins.relution_publisher.builder.ArtifactFileUploader.java

private JsonObject uploadAsset(final File directory, final String fileName)
        throws URISyntaxException, InterruptedException {

    try {/* w  w  w  .  j ava  2s  .c om*/
        final Stopwatch sw = new Stopwatch();
        final File file = new File(directory, fileName);
        final ApiRequest request = RequestFactory.createUploadRequest(this.store, file);

        this.log.write(this, "Uploading \"%s\" (%,d Byte)", fileName, file.length());

        sw.start();
        final ApiResponse response = this.requestManager.execute(request, this.log);
        sw.stop();

        final String speed = this.getUploadSpeed(sw, file);
        this.log.write(this, "Upload of file completed (%s, %s).", sw, speed);

        return this.extractAsset(response);

    } catch (final IOException e) {
        this.log.write(this, "Upload of file failed, error during execution:\n\n%s\n", e);
        Builds.setResult(this, Result.UNSTABLE, this.log);

    } catch (final ExecutionException e) {
        this.log.write(this, "Upload of file failed, error during execution:\n\n%s\n", e);
        Builds.setResult(this, Result.UNSTABLE, this.log);

    }
    return null;
}

From source file:org.geogig.osm.cli.commands.OSMHistoryImport.java

/**
 * @param primitive/*www  .  j a  va2s  .  com*/
 * @param thisChangePointCache
 * @return
 */
private Geometry parseGeometry(Context context, Primitive primitive,
        Map<Long, Coordinate> thisChangePointCache) {

    if (primitive instanceof Relation) {
        return null;
    }

    if (primitive instanceof Node) {
        Optional<Point> location = ((Node) primitive).getLocation();
        return location.orNull();
    }

    final Way way = (Way) primitive;
    final ImmutableList<Long> nodes = way.getNodes();

    List<Coordinate> coordinates = Lists.newArrayList(nodes.size());
    FindTreeChild findTreeChild = context.command(FindTreeChild.class);
    Optional<ObjectId> nodesTreeId = context.command(ResolveTreeish.class)
            .setTreeish(Ref.STAGE_HEAD + ":" + NODE_TYPE_NAME).call();
    if (nodesTreeId.isPresent()) {
        RevTree headTree = context.objectDatabase().getTree(nodesTreeId.get());
        findTreeChild.setParent(headTree);
    }
    int findTreeChildCalls = 0;
    Stopwatch findTreeChildSW = Stopwatch.createUnstarted();
    ObjectStore objectDatabase = context.objectDatabase();
    for (Long nodeId : nodes) {
        Coordinate coord = thisChangePointCache.get(nodeId);
        if (coord == null) {
            findTreeChildCalls++;
            String fid = String.valueOf(nodeId);
            findTreeChildSW.start();
            Optional<NodeRef> nodeRef = findTreeChild.setChildPath(fid).call();
            findTreeChildSW.stop();
            Optional<org.locationtech.geogig.model.Node> ref = Optional.absent();
            if (nodeRef.isPresent()) {
                ref = Optional.of(nodeRef.get().getNode());
            }

            if (ref.isPresent()) {
                final int locationAttIndex = 6;
                ObjectId objectId = ref.get().getObjectId();
                RevFeature revFeature = objectDatabase.getFeature(objectId);
                Point p = (Point) revFeature.get(locationAttIndex, GEOMF).orNull();
                if (p != null) {
                    coord = p.getCoordinate();
                    thisChangePointCache.put(Long.valueOf(nodeId), coord);
                }
            }
        }
        if (coord != null) {
            coordinates.add(coord);
        }
    }
    if (findTreeChildCalls > 0) {
        //            System.err.printf("%,d findTreeChild calls (%s)\n", findTreeChildCalls,
        //                    findTreeChildSW);
    }
    if (coordinates.size() < 2) {
        return null;
    }
    return GEOMF.createLineString(coordinates.toArray(new Coordinate[coordinates.size()]));
}