Example usage for java.util.concurrent.atomic AtomicBoolean compareAndSet

List of usage examples for java.util.concurrent.atomic AtomicBoolean compareAndSet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean compareAndSet.

Prototype

public final boolean compareAndSet(boolean expectedValue, boolean newValue) 

Source Link

Document

Atomically sets the value to newValue if the current value == expectedValue , with memory effects as specified by VarHandle#compareAndSet .

Usage

From source file:org.springframework.integration.file.FileWritingMessageHandlerTests.java

@Test
public void lockForFlush() throws Exception {
    File tempFolder = this.temp.newFolder();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    final BufferedOutputStream out = spy(new BufferedOutputStream(baos));
    FileWritingMessageHandler handler = new FileWritingMessageHandler(tempFolder) {

        @Override/*from ww  w .  j a v  a2  s  .c  o m*/
        protected BufferedOutputStream createOutputStream(File fileToWriteTo, boolean append) {
            return out;
        }

    };
    handler.setFileExistsMode(FileExistsMode.APPEND_NO_FLUSH);
    handler.setFileNameGenerator(message -> "foo.txt");
    ThreadPoolTaskScheduler taskScheduler = new ThreadPoolTaskScheduler();
    taskScheduler.afterPropertiesSet();
    handler.setTaskScheduler(taskScheduler);
    handler.setOutputChannel(new NullChannel());
    handler.setBeanFactory(mock(BeanFactory.class));
    handler.setFlushInterval(10);
    handler.setFlushWhenIdle(false);
    handler.afterPropertiesSet();
    handler.start();

    final AtomicBoolean writing = new AtomicBoolean();
    final AtomicBoolean closeWhileWriting = new AtomicBoolean();
    willAnswer(i -> {
        writing.set(true);
        Thread.sleep(500);
        writing.set(false);
        return null;
    }).given(out).write(any(byte[].class), anyInt(), anyInt());
    willAnswer(i -> {
        closeWhileWriting.compareAndSet(false, writing.get());
        return null;
    }).given(out).close();
    handler.handleMessage(new GenericMessage<>("foo".getBytes()));
    verify(out).write(any(byte[].class), anyInt(), anyInt());
    assertFalse(closeWhileWriting.get());
    handler.stop();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test// w w w .  jav a  2 s.co  m
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}

From source file:org.pentaho.osgi.platform.plugin.deployer.impl.PluginZipFileProcessor.java

private void processEntry(ZipOutputStream zipOutputStream, PluginMetadata pluginMetadata, boolean isDirectory,
        String name, byte[] bytes) throws IOException {
    logger.trace("Processing zip entry: {} ", name);
    AtomicBoolean output = new AtomicBoolean(false);
    boolean wasHandled = false;
    for (PluginFileHandler pluginFileHandler : pluginFileHandlers) {

        if (pluginFileHandler.handles(name)) {
            wasHandled = true;/*from  ww  w .  ja va 2s.c o  m*/
            logger.trace("Plugin file handler {} will handle {}", pluginFileHandler.toString(), name);
            try {
                // There is no short-circuit. Multiple handlers can do work on any given resource
                boolean handlerSaysOutput = pluginFileHandler.handle(name, bytes, pluginMetadata);
                logger.trace("Plugin file handler {} handled {}", pluginFileHandler.toString(), name);
                output.compareAndSet(false, handlerSaysOutput);
            } catch (PluginHandlingException e) {
                logger.error("Plugin file handler " + pluginFileHandler.toString()
                        + " threw exception when handling " + name, e);
                throw new IOException(e);
            }
        }
    }
    if (!wasHandled || output.get()) {
        zipOutputStream.putNextEntry(new ZipEntry(name));
        if (isDirectory == false) {
            IOUtils.write(bytes, zipOutputStream);
        }
        zipOutputStream.closeEntry();
    }
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * Test the case where we have multiple threads waiting on the
 * ShortCircuitCache delivering a certain ShortCircuitReplica.
 *
 * In this case, there should only be one call to
 * createShortCircuitReplicaInfo.  This one replica should be shared
 * by all threads./* w  ww  .  j a  v a  2s . c o  m*/
 */
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            Uninterruptibles.awaitUninterruptibly(latch);
            if (!creationIsBlocked.compareAndSet(true, false)) {
                Assert.fail("there were multiple calls to "
                        + "createShortCircuitReplicaInfo.  Only one was expected.");
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADED;
    final int NUM_THREADS = 10;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
                Assert.assertFalse(creationIsBlocked.get());
                byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
                Assert.assertTrue(Arrays.equals(contents, expected));
            } catch (Throwable e) {
                LOG.error("readerRunnable error", e);
                testFailed.set(true);
            }
        }
    };
    Thread threads[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    Thread.sleep(500);
    latch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}

From source file:org.pentaho.di.trans.dataservice.DataServiceExecutor.java

public DataServiceExecutor executeQuery(final DataOutputStream dos) throws IOException {

    writeMetadata(dos, getServiceName(), calculateTransname(getSql(), true),
            getServiceTrans().getContainerObjectId(), calculateTransname(getSql(), false),
            getGenTrans().getContainerObjectId());

    final AtomicBoolean rowMetaWritten = new AtomicBoolean(false);

    // When done, check if no row metadata was written.  The client is still going to expect it...
    // Since we know it, we'll pass it.
    ///*w  w  w . j a  v  a2 s.c  o  m*/
    getGenTrans().addTransListener(new TransAdapter() {
        @Override
        public void transFinished(Trans trans) throws KettleException {
            if (rowMetaWritten.compareAndSet(false, true)) {
                RowMetaInterface stepFields = trans.getTransMeta().getStepFields(getResultStepName());
                stepFields.writeMeta(dos);
            }
        }
    });

    // Now execute the query transformation(s) and pass the data to the output stream...
    return executeQuery(new RowAdapter() {
        @Override
        public void rowWrittenEvent(RowMetaInterface rowMeta, Object[] row) throws KettleStepException {

            // On the first row, write the metadata...
            //
            try {
                if (rowMetaWritten.compareAndSet(false, true)) {
                    rowMeta.writeMeta(dos);
                }
                rowMeta.writeData(dos, row);
            } catch (Exception e) {
                if (!getServiceTrans().isStopped()) {
                    throw new KettleStepException(e);
                }
            }
        }
    });
}

From source file:io.openvidu.server.recording.service.RecordingManager.java

private void checkRecordingPaths(String openviduRecordingPath, String openviduRecordingCustomLayout)
        throws OpenViduException {
    log.info("Initializing recording paths");

    Path recordingPath = null;/*from w  w w. j ava2  s. c  om*/
    try {
        recordingPath = Files.createDirectories(Paths.get(openviduRecordingPath));
    } catch (IOException e) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server cannot find path \"" + openviduRecordingPath
                + "\" and doesn't have permissions to create it";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    // Check OpenVidu Server write permissions in recording path
    if (!Files.isWritable(recordingPath)) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server needs write permissions. Try running command \"sudo chmod 777 "
                + openviduRecordingPath + "\"";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    } else {
        log.info("OpenVidu Server has write permissions on recording path: {}", openviduRecordingPath);
    }

    final String testFolderPath = openviduRecordingPath + "/TEST_RECORDING_PATH_" + System.currentTimeMillis();
    final String testFilePath = testFolderPath + "/TEST_RECORDING_PATH.webm";

    // Check Kurento Media Server write permissions in recording path
    KurentoClientSessionInfo kcSessionInfo = new OpenViduKurentoClientSessionInfo("TEST_RECORDING_PATH",
            "TEST_RECORDING_PATH");
    MediaPipeline pipeline = this.kcProvider.getKurentoClient(kcSessionInfo).createMediaPipeline();
    RecorderEndpoint recorder = new RecorderEndpoint.Builder(pipeline, "file://" + testFilePath).build();

    final AtomicBoolean kurentoRecorderError = new AtomicBoolean(false);

    recorder.addErrorListener(new EventListener<ErrorEvent>() {
        @Override
        public void onEvent(ErrorEvent event) {
            if (event.getErrorCode() == 6) {
                // KMS write permissions error
                kurentoRecorderError.compareAndSet(false, true);
            }
        }
    });

    recorder.record();

    try {
        // Give the error event some time to trigger if necessary
        Thread.sleep(500);
    } catch (InterruptedException e1) {
        e1.printStackTrace();
    }

    if (kurentoRecorderError.get()) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: Kurento Media Server needs write permissions. Try running command \"sudo chmod 777 "
                + openviduRecordingPath + "\"";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    recorder.stop();
    recorder.release();
    pipeline.release();

    log.info("Kurento Media Server has write permissions on recording path: {}", openviduRecordingPath);

    try {
        new CustomFileManager().deleteFolder(testFolderPath);
        log.info("OpenVidu Server has write permissions over files created by Kurento Media Server");
    } catch (IOException e) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server does not have write permissions over files created by Kurento Media Server. "
                + "Try running Kurento Media Server as user \"" + System.getProperty("user.name")
                + "\" or run OpenVidu Server as superuser";
        log.error(errorMessage);
        log.error(
                "Be aware that a folder \"{}\" was created and should be manually deleted (\"sudo rm -rf {}\")",
                testFolderPath, testFolderPath);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    if (openviduConfig.openviduRecordingCustomLayoutChanged(openviduRecordingCustomLayout)) {
        // Property openvidu.recording.custom-layout changed
        File dir = new File(openviduRecordingCustomLayout);
        if (dir.exists()) {
            if (!dir.isDirectory()) {
                String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                        + "\" is not valid. Reason: path already exists but it is not a directory";
                log.error(errorMessage);
                throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
            } else {
                if (dir.listFiles() == null) {
                    String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                            + "\" is not valid. Reason: OpenVidu Server needs read permissions. Try running command \"sudo chmod 755 "
                            + openviduRecordingCustomLayout + "\"";
                    log.error(errorMessage);
                    throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
                } else {
                    log.info("OpenVidu Server has read permissions on custom layout path: {}",
                            openviduRecordingCustomLayout);
                    log.info("Custom layouts path successfully initialized at {}",
                            openviduRecordingCustomLayout);
                }
            }
        } else {
            try {
                Files.createDirectories(dir.toPath());
                log.warn(
                        "OpenVidu custom layouts path (system property 'openvidu.recording.custom-layout') has been created, being folder {}. "
                                + "It is an empty folder, so no custom layout is currently present",
                        dir.getAbsolutePath());
            } catch (IOException e) {
                String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                        + "\" is not valid. Reason: OpenVidu Server cannot find path \""
                        + openviduRecordingCustomLayout + "\" and doesn't have permissions to create it";
                log.error(errorMessage);
                throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
            }
        }
    }

    log.info("Recording path successfully initialized at {}", openviduRecordingPath);
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessMutexBase.java

@Test
public void testReentrantSingleLock() throws Exception {
    final int THREAD_QTY = 10;

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();/*  w  ww  . j a  v  a 2 s  .  c  om*/
    try {
        final AtomicBoolean hasLock = new AtomicBoolean(false);
        final AtomicBoolean isFirst = new AtomicBoolean(true);
        final Semaphore semaphore = new Semaphore(1);
        final InterProcessLock mutex = makeLock(client);

        List<Future<Object>> threads = Lists.newArrayList();
        ExecutorService service = Executors.newCachedThreadPool();
        for (int i = 0; i < THREAD_QTY; ++i) {
            Future<Object> t = service.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    semaphore.acquire();
                    mutex.acquire();
                    Assert.assertTrue(hasLock.compareAndSet(false, true));
                    try {
                        if (isFirst.compareAndSet(true, false)) {
                            semaphore.release(THREAD_QTY - 1);
                            while (semaphore.availablePermits() > 0) {
                                Thread.sleep(100);
                            }
                        } else {
                            Thread.sleep(100);
                        }
                    } finally {
                        mutex.release();
                        hasLock.set(false);
                    }
                    return null;
                }
            });
            threads.add(t);
        }

        for (Future<Object> t : threads) {
            t.get();
        }
    } finally {
        client.close();
    }
}

From source file:org.mule.expression.DefaultExpressionManager.java

public void validateExpression(String expression) throws InvalidExpressionException {
    if (!muleContext.getConfiguration().isValidateExpressions()) {
        if (logger.isDebugEnabled()) {
            logger.debug("Validate expressions is turned off, no checking done for: " + expression);
        }/*  ww  w. j av  a 2 s  . co m*/
        return;
    }
    try {
        parser.validate(expression);
    } catch (IllegalArgumentException e) {
        throw new InvalidExpressionException(expression, e.getMessage());
    }

    final AtomicBoolean valid = new AtomicBoolean(true);
    final AtomicBoolean match = new AtomicBoolean(false);
    final StringBuffer message = new StringBuffer();
    parser.parse(new TemplateParser.TemplateCallback() {
        public Object match(String token) {
            match.set(true);
            if (!isEvaluatorExpression(token)) {
                if (valid.get()) {
                    try {
                        expressionLanguage.validate(token);
                    } catch (InvalidExpressionException e) {
                        valid.compareAndSet(true, false);
                        message.append(token).append(" is invalid\n");
                        message.append(e.getMessage());
                    }
                }
            }
            return null;
        }
    }, expression);

    if (message.length() > 0) {
        throw new InvalidExpressionException(expression, message.toString());
    } else if (!match.get()) {
        throw new InvalidExpressionException(expression,
                "Expression string is not an expression.  Use isExpression(String) to validate first");
    }
}

From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java

@Test
public void testFlush() throws Exception {
    final Timing timing = new Timing();
    final CountDownLatch latch = new CountDownLatch(1);
    DistributedQueue<TestQueueItem> queue = null;
    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    client.start();/*  www  .  j  a  v a2 s  . com*/
    try {
        final AtomicBoolean firstTime = new AtomicBoolean(true);
        queue = new DistributedQueue<TestQueueItem>(client, null, serializer, "/test",
                new ThreadFactoryBuilder().build(), MoreExecutors.sameThreadExecutor(), 10, true, null,
                QueueBuilder.NOT_SET, true, 0) {
            @Override
            void internalCreateNode(final String path, final byte[] bytes, final BackgroundCallback callback)
                    throws Exception {
                if (firstTime.compareAndSet(true, false)) {
                    Executors.newSingleThreadExecutor().submit(new Callable<Object>() {
                        @Override
                        public Object call() throws Exception {
                            latch.await();
                            timing.sleepABit();
                            client.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).inBackground(callback)
                                    .forPath(path, bytes);
                            return null;
                        }
                    });
                } else {
                    super.internalCreateNode(path, bytes, callback);
                }
            }
        };
        queue.start();

        queue.put(new TestQueueItem("1"));
        Assert.assertFalse(queue.flushPuts(timing.forWaiting().seconds(), TimeUnit.SECONDS));
        latch.countDown();

        Assert.assertTrue(queue.flushPuts(timing.forWaiting().seconds(), TimeUnit.SECONDS));
    } finally {
        if (latch.getCount() > 0) {
            latch.countDown();
        }

        IOUtils.closeQuietly(queue);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

@Test
public void testCheckPointForEntryLoggerWithMultipleActiveEntryLogs() throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false).setFlushInterval(3000)
            .setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true)
            .setLedgerStorageClass(MockInterleavedLedgerStorage.class.getName());

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();//from w  ww  .ja v a  2  s.c  o  m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    final BookKeeper bkClient = new BookKeeper(clientConf);

    int numOfLedgers = 12;
    int numOfEntries = 100;
    byte[] dataBytes = "data".getBytes();
    AtomicBoolean receivedExceptionForAdd = new AtomicBoolean(false);
    LongStream.range(0, numOfLedgers).parallel().mapToObj((ledgerId) -> {
        LedgerHandle handle = null;
        try {
            handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
        } catch (BKException | InterruptedException exc) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to create LedgerHandle for ledgerId: " + ledgerId, exc);
        }
        return handle;
    }).forEach((writeHandle) -> {
        IntStream.range(0, numOfEntries).forEach((entryId) -> {
            try {
                writeHandle.addEntry(entryId, dataBytes);
            } catch (BKException | InterruptedException exc) {
                receivedExceptionForAdd.compareAndSet(false, true);
                LOG.error("Got Exception while trying to AddEntry of ledgerId: " + writeHandle.getId()
                        + " entryId: " + entryId, exc);
            }
        });
        try {
            writeHandle.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to close writeHandle of ledgerId: " + writeHandle.getId(), e);
        }
    });

    Assert.assertFalse(
            "There shouldn't be any exceptions while creating writeHandle and adding entries to writeHandle",
            receivedExceptionForAdd.get());

    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
    // since we have waited for more than flushInterval SyncThread should have checkpointed.
    // if entrylogperledger is not enabled, then we checkpoint only when currentLog in EntryLogger
    // is rotated. but if entrylogperledger is enabled, then we checkpoint for every flushInterval period
    File lastMarkFile = new File(ledgerDir, "lastMark");
    Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
            lastMarkFile.exists());
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
            rolledLogMark.compare(new LogMark()));

    bkClient.close();
    // here we are calling shutdown, but MockInterleavedLedgerStorage shudown/flush
    // methods are noop, so entrylogger is not flushed as part of this shutdown
    // here we are trying to simulate Bookie crash, but there is no way to
    // simulate bookie abrupt crash
    server.shutdown();

    // delete journal files and lastMark, to make sure that we are not reading from
    // Journal file
    File[] journalDirs = conf.getJournalDirs();
    for (File journalDir : journalDirs) {
        File journalDirectory = Bookie.getCurrentDirectory(journalDir);
        List<Long> journalLogsId = Journal.listJournalIds(journalDirectory, null);
        for (long journalId : journalLogsId) {
            File journalFile = new File(journalDirectory, Long.toHexString(journalId) + ".txn");
            journalFile.delete();
        }
    }

    // we know there is only one ledgerDir
    lastMarkFile = new File(ledgerDir, "lastMark");
    lastMarkFile.delete();

    // now we are restarting BookieServer
    conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
    server = new BookieServer(conf);
    server.start();
    BookKeeper newBKClient = new BookKeeper(clientConf);
    // since Bookie checkpointed successfully before shutdown/crash,
    // we should be able to read from entryLogs though journal is deleted

    AtomicBoolean receivedExceptionForRead = new AtomicBoolean(false);

    LongStream.range(0, numOfLedgers).parallel().forEach((ledgerId) -> {
        try {
            LedgerHandle lh = newBKClient.openLedger(ledgerId, DigestType.CRC32, "passwd".getBytes());
            Enumeration<LedgerEntry> entries = lh.readEntries(0, numOfEntries - 1);
            while (entries.hasMoreElements()) {
                LedgerEntry entry = entries.nextElement();
                byte[] readData = entry.getEntry();
                Assert.assertEquals("Ledger Entry Data should match", new String("data".getBytes()),
                        new String(readData));
            }
            lh.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForRead.compareAndSet(false, true);
            LOG.error("Got Exception while trying to read entries of ledger, ledgerId: " + ledgerId, e);
        }
    });
    Assert.assertFalse("There shouldn't be any exceptions while creating readHandle and while reading"
            + "entries using readHandle", receivedExceptionForRead.get());

    newBKClient.close();
    server.shutdown();
}