Example usage for java.time Duration ofMillis

List of usage examples for java.time Duration ofMillis

Introduction

In this page you can find the example usage for java.time Duration ofMillis.

Prototype

public static Duration ofMillis(long millis) 

Source Link

Document

Obtains a Duration representing a number of milliseconds.

Usage

From source file:ai.grakn.engine.controller.TasksControllerTest.java

@Test
public void whenGettingTaskByIdRecurring_TaskIsReturned() {
    Duration duration = Duration.ofMillis(100);
    TaskState task = createTask(ShortExecutionMockTask.class, TaskSchedule.recurring(duration));

    when(manager.storage().getState(task.getId())).thenReturn(task);

    Response response = get(task.getId());
    Json json = response.as(Json.class, jsonMapper);

    assertThat(json.at("id").asString(), equalTo(task.getId().getValue()));
    assertThat(json.at(TASK_RUN_INTERVAL_PARAMETER).asLong(),
            equalTo(task.schedule().interval().get().toMillis()));
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testBatchesRequestsRespectsMax() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        assertSample(r.getTimersList(), "timer", 7d);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 9d);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Semaphore semaphore = new Semaphore(-2);
    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2)
            .setParallelism(1).setEmptyQueueInterval(Duration.ofMillis(1000))
            .setEventHandler(new CompletionHandler(semaphore)).build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(),
            createQuantityMap("timer", TsdQuantity.newInstance(7d, null)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(9d, null)));

    for (int x = 0; x < 5; x++) {
        sink.record(event);/*from w w w. j  a  va  2 s  .c o  m*/
    }
    semaphore.acquire();

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(3, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:com.joyent.manta.benchmark.Benchmark.java

/**
 * Measures the total time to put multiple directories to Manta.
 *
 * @param diretoryCount number of directories to create
 * @return two durations - full time in the JVM, -1 because server time is unavailable
 * @throws IOException thrown when we can't access Manta over the network
 *///from  ww  w .  j a  v a2s . c om
private static Duration[] measurePutDir(final int diretoryCount) throws IOException {
    final StringBuilder path = new StringBuilder().append(testDirectory);

    for (int i = 0; i < diretoryCount; i++) {
        path.append(MantaClient.SEPARATOR).append(STRING_GENERATOR.generate(2));
    }

    final long start = System.nanoTime();

    client.putDirectory(path.toString(), true);

    final long stop = System.nanoTime();

    Duration serverLatency = Duration.ofMillis(-1L);
    Duration fullLatency = Duration.ofNanos(stop - start);
    return new Duration[] { fullLatency, serverLatency };
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testRespectsBufferMax() throws InterruptedException {
    final AtomicInteger droppedEvents = new AtomicInteger(0);
    final Semaphore semaphoreA = new Semaphore(0);
    final Semaphore semaphoreB = new Semaphore(0);
    final Semaphore semaphoreC = new Semaphore(-2);
    final AtomicInteger recordsReceived = new AtomicInteger(0);

    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        recordsReceived.incrementAndGet();

        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        assertSample(r.getTimersList(), "timer", 7d);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 9d);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2)
            .setParallelism(1).setBufferSize(5).setEmptyQueueInterval(Duration.ofMillis(1000))
            .setEventHandler(//from  w  w  w.  j  a v a2 s  . c  o m
                    new RespectsMaxBufferEventHandler(semaphoreA, semaphoreB, semaphoreC, droppedEvents))
            .build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(),
            createQuantityMap("timer", TsdQuantity.newInstance(7d, null)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(9d, null)));

    // Add one event to be used as a synchronization point
    sink.record(event);
    semaphoreA.acquire();

    // Add the actual events to analyze
    for (int x = 0; x < 10; x++) {
        sink.record(event);
    }
    semaphoreB.release();
    semaphoreC.acquire();

    // Ensure expected handler was invoked
    Assert.assertEquals(5, droppedEvents.get());

    // Assert number of records received
    Assert.assertEquals(6, recordsReceived.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(4, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:com.amazonaws.services.kinesis.aggregators.StreamAggregator.java

/**
 * {@inheritDoc}/*w  w w.j  a va 2  s .co m*/
 */
public void aggregateEvents(List<InputEvent> events) throws Exception {
    start = System.currentTimeMillis();
    int aggregatedEventCount = 0;
    int aggregatedElementCount = 0;

    if (!online) {
        throw new Exception("Aggregator Not Initialised");
    }

    BigInteger thisSequence;
    List<AggregateData> extractedItems = null;
    OffsetDateTime eventDate = null;

    try {
        for (InputEvent event : events) {
            // reset extracted items
            extractedItems = null;

            if (event.getSequenceNumber() != null) {
                thisSequence = new BigInteger(event.getSequenceNumber());
                // ignore any records which are going backward with regard
                // to
                // the current hwm
                if (highSeq != null && highSeq.compareTo(thisSequence) != -1) {
                    ignoredRecordsBelowHWM++;
                    continue;
                }
            }

            // set the low sequence if this is the first record received
            // after a flush
            if (lowSeq == null)
                lowSeq = event.getSequenceNumber();

            // high sequence is always the latest value
            highSeq = new BigInteger(event.getSequenceNumber());

            // extract the data from the input event
            try {
                extractedItems = dataExtractor.getData(event);
            } catch (SerializationException se) {
                // customer may have elected to suppress serialisation
                // errors if the stream is expected have heterogenous data
                // on it
                if (this.raiseExceptionOnDataExtractionErrors) {
                    throw se;
                } else {
                    logWarn(String.format("Serialisation Exception Sequence %s Partition Key %s",
                            event.getSequenceNumber(), event.getPartitionKey()), se);
                }
            }

            // data extractor may have returned multiple data elements, or
            // be empty if there were serialisation problems which are
            // suppressed
            if (extractedItems != null) {
                aggregatedEventCount++;

                for (AggregateData data : extractedItems) {
                    // run the idempotency check
                    if (!this.idempotencyCheck.doProcess(event.getPartitionKey(), event.getSequenceNumber(),
                            data, event.getData())) {
                        logInfo(String.format("Ignoring Event %s as it failed Idempotency Check",
                                event.getPartitionKey()));
                        continue;
                    }

                    aggregatedElementCount++;

                    // if the data extractor didn't have a date value to
                    // extract, then use the current time
                    eventDate = data.getDate();
                    if (eventDate == null) {
                        eventDate = OffsetDateTime.now(ZoneId.of("UTC"));
                    }

                    // generate the local updates, one per time horizon that
                    // is requested
                    for (TimeHorizon h : timeHorizons) {
                        OffsetDateTime localEventDate = eventDate;
                        if (!h.isUTC())
                            localEventDate = eventDate.minus(Duration.ofMillis(data.getLocalOffset()));

                        // atomically update the aggregate table with event
                        // count or count + summaries
                        cache.update(aggregatorType, data.getLabels(),
                                (timeHorizons.size() > 1 ? h.getItemWithMultiValueFormat(localEventDate)
                                        : h.getValue(localEventDate)),
                                h, event.getSequenceNumber(), 1, data.getSummaries(),
                                dataExtractor.getSummaryConfig());
                    }
                }
            }
        }

        logInfo(String.format("Aggregation Complete - %s Records and %s Elements in %s ms",
                aggregatedEventCount, aggregatedElementCount, (System.currentTimeMillis() - start)));
    } catch (SerializationException se) {
        shutdown(true, InventoryModel.STATE.SERIALISATION_ERROR);
        LOG.error(se);
        throw se;
    } catch (Exception e) {
        shutdown(true, InventoryModel.STATE.UNKNOWN_ERROR);
        LOG.error(e);
        throw e;
    }
}

From source file:com.redskyit.scriptDriver.RunTests.java

private void sleep(long ms) {
    try {/*from   ww  w.j  a va 2s  .  com*/
        Sleeper.SYSTEM_SLEEPER.sleep(Duration.ofMillis(ms));
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

public void testPeriodicCheckpointForLedgerStorage(String ledgerStorageClassName) throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false).setFlushInterval(2000)
            .setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true).setLedgerStorageClass(ledgerStorageClassName);
    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();//  www. j  a va  2  s . c o m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    BookKeeper bkClient = new BookKeeper(clientConf);

    int numOfLedgers = 2;
    int numOfEntries = 5;
    byte[] dataBytes = "data".getBytes();

    for (int i = 0; i < numOfLedgers; i++) {
        int ledgerIndex = i;
        LedgerHandle handle = bkClient.createLedgerAdv((long) i, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(),
                null);
        for (int j = 0; j < numOfEntries; j++) {
            handle.addEntry(j, dataBytes);
        }
        handle.close();
    }

    LastLogMark lastLogMarkAfterFirstSetOfAdds = server.getBookie().journals.get(0).getLastLogMark();
    LogMark curMarkAfterFirstSetOfAdds = lastLogMarkAfterFirstSetOfAdds.getCurMark();

    File lastMarkFile = new File(ledgerDir, "lastMark");
    // lastMark file should be zero, because checkpoint hasn't happenend
    LogMark logMarkFileBeforeCheckpoint = readLastMarkFile(lastMarkFile);
    Assert.assertEquals("lastMarkFile before checkpoint should be zero", 0,
            logMarkFileBeforeCheckpoint.compare(new LogMark()));

    // wait for flushInterval for SyncThread to do next iteration of checkpoint
    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
    /*
     * since we have waited for more than flushInterval SyncThread should
     * have checkpointed. if entrylogperledger is not enabled, then we
     * checkpoint only when currentLog in EntryLogger is rotated. but if
     * entrylogperledger is enabled, then we checkpoint for every
     * flushInterval period
     */
    Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
            lastMarkFile.exists());

    LastLogMark lastLogMarkAfterCheckpoint = server.getBookie().journals.get(0).getLastLogMark();
    LogMark curMarkAfterCheckpoint = lastLogMarkAfterCheckpoint.getCurMark();

    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
            rolledLogMark.compare(new LogMark()));
    /*
     * Curmark should be equal before and after checkpoint, because we didnt
     * add new entries during this period
     */
    Assert.assertTrue("Curmark should be equal before and after checkpoint",
            curMarkAfterCheckpoint.compare(curMarkAfterFirstSetOfAdds) == 0);
    /*
     * Curmark after checkpoint should be equal to rolled logmark, because
     * we checkpointed
     */
    Assert.assertTrue("Curmark after first set of adds should be equal to rolled logmark",
            curMarkAfterCheckpoint.compare(rolledLogMark) == 0);

    // add more ledger/entries
    for (int i = numOfLedgers; i < 2 * numOfLedgers; i++) {
        int ledgerIndex = i;
        LedgerHandle handle = bkClient.createLedgerAdv((long) i, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(),
                null);
        for (int j = 0; j < numOfEntries; j++) {
            handle.addEntry(j, dataBytes);
        }
        handle.close();
    }

    // wait for flushInterval for SyncThread to do next iteration of checkpoint
    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));

    LastLogMark lastLogMarkAfterSecondSetOfAdds = server.getBookie().journals.get(0).getLastLogMark();
    LogMark curMarkAfterSecondSetOfAdds = lastLogMarkAfterSecondSetOfAdds.getCurMark();

    rolledLogMark = readLastMarkFile(lastMarkFile);
    /*
     * Curmark after checkpoint should be equal to rolled logmark, because
     * we checkpointed
     */
    Assert.assertTrue("Curmark after second set of adds should be equal to rolled logmark",
            curMarkAfterSecondSetOfAdds.compare(rolledLogMark) == 0);

    server.shutdown();
    bkClient.close();
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

public void testCheckpointofILSWhenEntryLogIsRotated(boolean entryLogPerLedgerEnabled) throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false)
            //set very high period for flushInterval
            .setFlushInterval(30000).setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(entryLogPerLedgerEnabled)
            .setLedgerStorageClass(InterleavedLedgerStorage.class.getName());

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();//from   w  w  w .j  ava2s  .c  o m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    BookKeeper bkClient = new BookKeeper(clientConf);
    InterleavedLedgerStorage ledgerStorage = (InterleavedLedgerStorage) server.getBookie().ledgerStorage;

    int numOfEntries = 5;
    byte[] dataBytes = "data".getBytes();

    long ledgerId = 10;
    LedgerHandle handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(),
            null);
    for (int j = 0; j < numOfEntries; j++) {
        handle.addEntry(j, dataBytes);
    }
    handle.close();
    // simulate rolling entrylog
    ((EntryLogManagerBase) ledgerStorage.getEntryLogger().getEntryLogManager()).createNewLog(ledgerId);
    // sleep for a bit for checkpoint to do its task
    executorController.advance(Duration.ofMillis(500));

    File lastMarkFile = new File(ledgerDir, "lastMark");
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    if (entryLogPerLedgerEnabled) {
        Assert.assertEquals(
                "rolledLogMark should be zero, since checkpoint"
                        + "shouldn't have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    } else {
        Assert.assertNotEquals(
                "rolledLogMark shouldn't be zero, since checkpoint"
                        + "should have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    }
    bkClient.close();
    server.shutdown();
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

public void testCheckpointOfSLSWhenEntryLogIsRotated(boolean entryLogPerLedgerEnabled) throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false)
            //set very high period for flushInterval
            .setFlushInterval(30000).setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(entryLogPerLedgerEnabled)
            .setLedgerStorageClass(SortedLedgerStorage.class.getName())
            // set very low skipListSizeLimit and entryLogSizeLimit to simulate log file rotation
            .setSkipListSizeLimit(1 * 1000 * 1000).setEntryLogSizeLimit(2 * 1000 * 1000);

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();// ww w . j  a v  a  2 s  .  c  o  m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    BookKeeper bkClient = new BookKeeper(clientConf);

    Random rand = new Random();
    byte[] dataBytes = new byte[10 * 1000];
    rand.nextBytes(dataBytes);
    int numOfEntries = ((int) conf.getEntryLogSizeLimit() + (100 * 1000)) / dataBytes.length;

    LedgerHandle handle = bkClient.createLedgerAdv(10, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
    for (int j = 0; j < numOfEntries; j++) {
        handle.addEntry(j, dataBytes);
    }
    handle.close();

    // sleep for a bit for checkpoint to do its task
    executorController.advance(Duration.ofMillis(500));

    File lastMarkFile = new File(ledgerDir, "lastMark");
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    if (entryLogPerLedgerEnabled) {
        Assert.assertEquals(
                "rolledLogMark should be zero, since checkpoint"
                        + "shouldn't have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    } else {
        Assert.assertNotEquals(
                "rolledLogMark shouldn't be zero, since checkpoint"
                        + "should have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    }
    bkClient.close();
    server.shutdown();
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

@Test
public void testIfEntryLogPerLedgerEnabledCheckpointFlushesAllLogs() throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false)
            //set flushInterval
            .setFlushInterval(3000).setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true).setLedgerStorageClass(InterleavedLedgerStorage.class.getName())
            // set setFlushIntervalInBytes to some very high number
            .setFlushIntervalInBytes(10000000);

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();/*from w ww . jav  a2s. c o  m*/
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    BookKeeper bkClient = new BookKeeper(clientConf);
    InterleavedLedgerStorage ledgerStorage = (InterleavedLedgerStorage) server.getBookie().ledgerStorage;
    EntryLogger entryLogger = ledgerStorage.entryLogger;
    EntryLogManagerForEntryLogPerLedger entryLogManager = (EntryLogManagerForEntryLogPerLedger) entryLogger
            .getEntryLogManager();

    Random rand = new Random();
    int numOfEntries = 5;
    byte[] dataBytes = "data".getBytes();

    int numOfLedgers = 3;
    long[] ledgerIds = new long[numOfLedgers];
    LedgerHandle handle;
    for (int i = 0; i < numOfLedgers; i++) {
        ledgerIds[i] = rand.nextInt(100000) + 1;
        handle = bkClient.createLedgerAdv(ledgerIds[i], 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
        for (int j = 0; j < numOfEntries; j++) {
            handle.addEntry(j, dataBytes);
        }
        // simulate rolling entrylog
        entryLogManager.createNewLog(ledgerIds[i]);
    }

    Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = entryLogManager.getCopyOfCurrentLogs();
    for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
        Assert.assertNotEquals("bytesWrittenSinceLastFlush shouldn't be zero", 0,
                currentLogWithDirInfo.getLogChannel().getUnpersistedBytes());
    }
    Assert.assertNotEquals("There should be logChannelsToFlush", 0,
            entryLogManager.getRotatedLogChannels().size());

    /*
     * wait for atleast flushInterval period, so that checkpoint can happen.
     */
    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));

    /*
     * since checkpoint happenend, there shouldn't be any logChannelsToFlush
     * and bytesWrittenSinceLastFlush should be zero.
     */
    List<BufferedLogChannel> copyOfRotatedLogChannels = entryLogManager.getRotatedLogChannels();
    Assert.assertTrue("There shouldn't be logChannelsToFlush",
            ((copyOfRotatedLogChannels == null) || (copyOfRotatedLogChannels.size() == 0)));

    copyOfCurrentLogsWithDirInfo = entryLogManager.getCopyOfCurrentLogs();
    for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
        Assert.assertEquals("bytesWrittenSinceLastFlush should be zero", 0,
                currentLogWithDirInfo.getLogChannel().getUnpersistedBytes());
    }
}