Example usage for org.apache.commons.lang.time StopWatch reset

List of usage examples for org.apache.commons.lang.time StopWatch reset

Introduction

In this page you can find the example usage for org.apache.commons.lang.time StopWatch reset.

Prototype

public void reset() 

Source Link

Document

Resets the stopwatch.

Usage

From source file:org.apache.eagle.alert.engine.serialization.PartitionedEventSerializerTest.java

@SuppressWarnings("deprecation")
@Test/*w  ww  .  j  a  v a2s .c  o  m*/
public void testPartitionEventSerializationEfficiency() throws IOException {
    PartitionedEvent partitionedEvent = MockSampleMetadataFactory
            .createPartitionedEventGroupedByName("sampleStream", System.currentTimeMillis());
    ;
    PartitionedEventSerializerImpl serializer = new PartitionedEventSerializerImpl(
            MockSampleMetadataFactory::createSampleStreamDefinition);

    int count = 100000;
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    int i = 0;
    while (i < count) {
        ByteArrayDataOutput dataOutput1 = ByteStreams.newDataOutput();
        serializer.serialize(partitionedEvent, dataOutput1);
        byte[] serializedBytes = dataOutput1.toByteArray();
        PartitionedEvent deserializedEvent = serializer.deserialize(ByteStreams.newDataInput(serializedBytes));
        Assert.assertEquals(partitionedEvent, deserializedEvent);
        i++;
    }
    stopWatch.stop();
    LOG.info("Cached Stream: {} ms", stopWatch.getTime());
    stopWatch.reset();
    PartitionedEventSerializerImpl compressSerializer = new PartitionedEventSerializerImpl(
            MockSampleMetadataFactory::createSampleStreamDefinition, true);
    i = 0;
    stopWatch.start();
    while (i < count) {
        byte[] serializedBytesCompressed = compressSerializer.serialize(partitionedEvent);
        PartitionedEvent deserializedEventCompressed = compressSerializer
                .deserialize(serializedBytesCompressed);
        Assert.assertEquals(partitionedEvent, deserializedEventCompressed);
        i++;
    }
    stopWatch.stop();
    LOG.info("Compressed Cached Stream: {} ms", stopWatch.getTime());
    stopWatch.reset();

    i = 0;
    stopWatch.start();
    while (i < count) {
        PartitionedEventDigestSerializer serializer2 = new PartitionedEventDigestSerializer(
                MockSampleMetadataFactory::createSampleStreamDefinition);
        ByteArrayDataOutput dataOutput2 = ByteStreams.newDataOutput();
        serializer2.serialize(partitionedEvent, dataOutput2);
        byte[] serializedBytes2 = dataOutput2.toByteArray();
        ByteArrayDataInput dataInput2 = ByteStreams.newDataInput(serializedBytes2);
        PartitionedEvent deserializedEvent2 = serializer2.deserialize(dataInput2);
        Assert.assertEquals(partitionedEvent, deserializedEvent2);
        i++;
    }
    stopWatch.stop();
    LOG.info("Cached Stream&Partition: {} ms", stopWatch.getTime());
    stopWatch.reset();
    i = 0;
    stopWatch.start();
    while (i < count) {
        byte[] javaSerialization = new DefaultSerializationDelegate().serialize(partitionedEvent);
        PartitionedEvent javaSerializedEvent = (PartitionedEvent) new DefaultSerializationDelegate()
                .deserialize(javaSerialization);
        Assert.assertEquals(partitionedEvent, javaSerializedEvent);
        i++;
    }
    stopWatch.stop();
    LOG.info("Java Native: {} ms", stopWatch.getTime());
    stopWatch.reset();
    i = 0;
    stopWatch.start();
    Kryo kryo = new DefaultKryoFactory.KryoSerializableDefault();
    while (i < count) {
        Output output = new Output(10000);
        kryo.writeClassAndObject(output, partitionedEvent);
        byte[] kryoBytes = output.toBytes();
        Input input = new Input(kryoBytes);
        PartitionedEvent kryoDeserializedEvent = (PartitionedEvent) kryo.readClassAndObject(input);
        Assert.assertEquals(partitionedEvent, kryoDeserializedEvent);
        i++;
    }
    stopWatch.stop();
    LOG.info("Kryo: {} ms", stopWatch.getTime());
}

From source file:org.apache.eagle.alert.engine.sorter.StreamWindowBenchmarkTest.java

public void sendDESCOrderedEventsToWindow(StreamWindow window, StreamWindowRepository.StorageType storageType,
        int num) {
    LOGGER.info("Sending {} events to {} ({})", num, window.getClass().getSimpleName(), storageType);
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();//w ww .j a v  a  2s  .c  o  m
    int i = 0;
    while (i < num) {
        PartitionedEvent event = MockSampleMetadataFactory.createPartitionedEventGroupedByName("sampleStream_1",
                (window.startTime() + i));
        window.add(event);
        i++;
    }
    stopWatch.stop();
    performanceReport.put(num + "\tInsertTime\t" + storageType, stopWatch.getTime());
    LOGGER.info("Inserted {} events in {} ms", num, stopWatch.getTime());
    stopWatch.reset();
    stopWatch.start();
    window.flush();
    stopWatch.stop();
    performanceReport.put(num + "\tReadTime\t" + storageType, stopWatch.getTime());
}

From source file:org.apache.hadoop.hbase.ipc.TestCellBlockBuilder.java

private static void timerTests(final CellBlockBuilder builder, final int count, final int size,
        final Codec codec, final CompressionCodec compressor) throws IOException {
    final int cycles = 1000;
    StopWatch timer = new StopWatch();
    timer.start();/*from w w  w  .j a v  a  2  s . com*/
    for (int i = 0; i < cycles; i++) {
        timerTest(builder, timer, count, size, codec, compressor, false);
    }
    timer.stop();
    LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count=" + count
            + ", size=" + size + ", + took=" + timer.getTime() + "ms");
    timer.reset();
    timer.start();
    for (int i = 0; i < cycles; i++) {
        timerTest(builder, timer, count, size, codec, compressor, true);
    }
    timer.stop();
    LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count=" + count
            + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}

From source file:org.apache.hadoop.hbase.ipc.TestIPCUtil.java

private static void timerTests(final IPCUtil util, final int count, final int size, final Codec codec,
        final CompressionCodec compressor) throws IOException {
    final int cycles = 1000;
    StopWatch timer = new StopWatch();
    timer.start();// w  w  w  .ja  va 2 s. c o m
    for (int i = 0; i < cycles; i++) {
        timerTest(util, timer, count, size, codec, compressor, false);
    }
    timer.stop();
    LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count=" + count
            + ", size=" + size + ", + took=" + timer.getTime() + "ms");
    timer.reset();
    timer.start();
    for (int i = 0; i < cycles; i++) {
        timerTest(util, timer, count, size, codec, compressor, true);
    }
    timer.stop();
    LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count=" + count
            + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}

From source file:org.apache.hadoop.hbase.tool.Canary.java

private static void sniffRegion(final HBaseAdmin admin, final Sink sink, HRegionInfo region, HTable table)
        throws Exception {
    HTableDescriptor tableDesc = table.getTableDescriptor();
    byte[] startKey = null;
    Get get = null;/* ww  w.j a  v  a  2s.  c o m*/
    Scan scan = null;
    ResultScanner rs = null;
    StopWatch stopWatch = new StopWatch();
    for (HColumnDescriptor column : tableDesc.getColumnFamilies()) {
        stopWatch.reset();
        startKey = region.getStartKey();
        // Can't do a get on empty start row so do a Scan of first element if any instead.
        if (startKey.length > 0) {
            get = new Get(startKey);
            get.addFamily(column.getName());
        } else {
            scan = new Scan();
            scan.setCaching(1);
            scan.addFamily(column.getName());
            scan.setMaxResultSize(1L);
        }

        try {
            if (startKey.length > 0) {
                stopWatch.start();
                table.get(get);
                stopWatch.stop();
                sink.publishReadTiming(region, column, stopWatch.getTime());
            } else {
                stopWatch.start();
                rs = table.getScanner(scan);
                stopWatch.stop();
                sink.publishReadTiming(region, column, stopWatch.getTime());
            }
        } catch (Exception e) {
            sink.publishReadFailure(region, column, e);
        } finally {
            if (rs != null) {
                rs.close();
            }
            scan = null;
            get = null;
            startKey = null;
        }
    }
}

From source file:org.apache.ojb.broker.QueryTest.java

/**
 * Run a query range test.//w  w w  . ja v a  2  s . c  om
 */
public void testQueryRangeMassTest() {
    String name = "testQueryRangeMassTest_" + System.currentTimeMillis();
    int objCount = 2000;

    broker.beginTransaction();
    for (int i = 0; i < objCount; i++) {
        Gourmet a = new Gourmet();
        a.setName(name);
        broker.store(a);
    }
    broker.commitTransaction();

    Criteria crit = new Criteria();
    crit.addEqualTo("name", name);
    QueryByCriteria q = QueryFactory.newQuery(Gourmet.class, crit);
    q.setStartAtIndex(100);
    q.setEndAtIndex(109);

    StopWatch watch = new StopWatch();
    watch.start();
    Collection c = broker.getCollectionByQuery(q);
    watch.stop();
    System.out.println("# Query 10 of " + objCount + " objects take " + watch.getTime() + " ms");
    assertNotNull(c);
    List result = new ArrayList(c);
    assertEquals(10, result.size());

    crit = new Criteria();
    crit.addEqualTo("name", name);
    q = QueryFactory.newQuery(Gourmet.class, crit);
    watch.reset();
    watch.start();
    c = broker.getCollectionByQuery(q);
    watch.stop();
    System.out.println("# Query all " + objCount + " objects take " + watch.getTime() + " ms");
    assertNotNull(c);
    result = new ArrayList(c);
    assertEquals(objCount, result.size());

    broker.beginTransaction();
    for (int i = 0; i < result.size(); i++) {
        broker.delete(result.get(i));
    }
    broker.commitTransaction();

    c = broker.getCollectionByQuery(q);
    assertNotNull(c);
    result = new ArrayList(c);
    assertEquals(0, result.size());
}

From source file:org.apache.solr.handler.component.RatiosComponent.java

@Override
public void process(ResponseBuilder rb) throws IOException {
    try {/*from  w w  w  .j  a  v a  2  s. c  o  m*/
        HashMap<String, Long> timers = new HashMap<String, Long>();

        if (rb.doRatios) {
            SolrParams params = rb.req.getParams();

            // in ratios the facet field is always the dimension field
            String dimension = params.get(RatiosParams.RATIOS_DIMENSION);
            String measure = params.get(RatiosParams.RATIOS_MEASURE);
            Double min = params.getDouble(RatiosParams.RATIOS_MIN, 0);
            Double max = params.getDouble(RatiosParams.RATIOS_MAX, 1);
            boolean debug = params.getBool(RatiosParams.RATIOS_DEBUG, false);
            boolean rows = params.getBool(RatiosParams.RATIOS_ROWS, false);

            HashMap<String, String[]> fieldFacets = new HashMap<String, String[]>();
            fieldFacets.put(measure, new String[] { dimension });

            SolrIndexSearcher searcher = rb.req.getSearcher();

            String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
            QParser q1 = QParser.getParser(
                    params.get("q") + " AND (" + params.get(RatiosParams.RATIOS_Q1) + ")", defType, rb.req);
            QParser q2 = QParser.getParser(
                    params.get("q") + " AND (" + params.get(RatiosParams.RATIOS_Q2) + ")", defType, rb.req);

            StopWatch stopwatch = new StopWatch();
            stopwatch.start();

            DocSet set1 = searcher.getDocSet(q1.getQuery());
            stopwatch.stop();
            timers.put("q1.ms", stopwatch.getTime());
            stopwatch.reset();

            stopwatch.start();
            DocSet set2 = searcher.getDocSet(q2.getQuery());
            stopwatch.stop();
            timers.put("q2.ms", stopwatch.getTime());
            stopwatch.reset();

            // ====== stats for 1st
            stopwatch.start();
            ModifiableSolrParams xp = new ModifiableSolrParams();
            xp.add(StatsParams.STATS_FIELD, measure);
            xp.add(StatsParams.STATS_FACET, dimension);
            xp.add(ShardParams.IS_SHARD, String.valueOf(params.getBool(ShardParams.IS_SHARD, false)));
            SimpleStats stats1 = new SimpleStats(rb.req, set1, xp);

            // TODO implement according to SOLR standard
            NamedList<?> map1 = stats1.getFieldCacheStats(measure, new String[] { dimension });
            if (map1 == null || map1.size() <= 0) {
                // empty do nothing
                return;
            }
            Map<String, Double> matrix1 = new HashMap<String, Double>(); // TODO map1.get(dimension);
            stopwatch.stop();
            timers.put("q1.stats.ms", stopwatch.getTime());
            stopwatch.reset();

            // ====== stats for 2nd
            stopwatch.start();
            SimpleStats stats2 = new SimpleStats(rb.req, set2, xp);
            NamedList<?> map2 = stats2.getFieldCacheStats(measure, new String[] { dimension });
            if (map2 == null || map2.size() <= 0) {
                // empty do nothing
                return;
            }
            Map<String, Double> matrix2 = new HashMap<String, Double>(); // TODO map2.get(dimension);
            stopwatch.stop();
            timers.put("q2.stats.ms", stopwatch.getTime());
            stopwatch.reset();

            // ====== ratios
            stopwatch.start();
            OpenBitSet ratios = new OpenBitSet();// TODO filter(matrix1, matrix2, min, max);
            stopwatch.stop();
            timers.put("ratio.ms", stopwatch.getTime());
            stopwatch.reset();

            // ====== done do payload extraction
            NamedList<Object> payload = new NamedList<Object>();
            if (debug) {
                // timer information
                NamedList<Object> performance = new NamedList<Object>();
                for (String key : timers.keySet()) {
                    performance.add(key, timers.get(key));
                }
                payload.add("debug", performance);
            }

            payload.add("count", ratios.cardinality());
            payload.add("union", set1.unionSize(set2));
            payload.add("intersection", set1.intersectionSize(set2));

            NamedList<Object> query1 = new NamedList<Object>();
            query1.add("rows", set1.size());
            query1.add("dimensions", matrix1.size());
            if (rows) {
                query1.add("results", toNamedList(matrix1));
            }

            NamedList<Object> query2 = new NamedList<Object>();
            query2.add("rows", set2.size());
            query2.add("dimensions", matrix2.size());
            if (rows) {
                query2.add("results", toNamedList(matrix2));
            }

            NamedList<Object> breakdown = new NamedList<Object>();
            breakdown.add("query1", query1);
            breakdown.add("query2", query2);

            payload.add("breakdown", breakdown);

            // TODO - output ratio bitset to hex for UX to do client side join
            // byte[] bytes = HexUtil.convertToGzipCompressedByte(ratios.getBits());
            // String x = javax.xml.bind.DatatypeConverter.printBase64Binary(bytes);
            // payload.add("base64", x);

            rb.rsp.add(RatiosParams.RATIOS, payload);
        }
    } catch (ParseException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.wiki.render.RenderingManagerTest.java

/**
 * Tests the relative speed of the DOM cache with respect to
 * page being parsed every single time.//from   w  ww .ja v a  2  s .  co m
 * @throws Exception
 */
public void testCache() throws Exception {
    m_engine.saveText("TestPage", TEST_TEXT);

    StopWatch sw = new StopWatch();

    System.out.println("DOM cache speed test:");
    sw.start();

    for (int i = 0; i < 100; i++) {
        WikiPage page = m_engine.getPage("TestPage");
        String pagedata = m_engine.getPureText(page);

        WikiContext context = new WikiContext(m_engine, page);

        MarkupParser p = m_manager.getParser(context, pagedata);

        WikiDocument d = p.parse();

        String html = m_manager.getHTML(context, d);
        assertNotNull("noncached got null response", html);
    }

    sw.stop();
    System.out.println("  Nocache took " + sw);

    long nocachetime = sw.getTime();

    sw.reset();
    sw.start();

    for (int i = 0; i < 100; i++) {
        WikiPage page = m_engine.getPage("TestPage");
        String pagedata = m_engine.getPureText(page);

        WikiContext context = new WikiContext(m_engine, page);

        String html = m_manager.getHTML(context, pagedata);

        assertNotNull("cached got null response", html);
    }

    sw.stop();
    System.out.println("  Cache took " + sw);

    long speedup = nocachetime / sw.getTime();
    System.out.println("  Approx speedup: " + speedup + "x");
}

From source file:org.bml.util.time.StopWatchPool.java

@Override
public void destroyObject(StopWatch obj) throws Exception {
    obj.reset();
}

From source file:org.bml.util.time.StopWatchPool.java

@Override
public void passivateObject(StopWatch obj) throws Exception {
    obj.reset();
}