List of usage examples for java.util.concurrent.atomic AtomicBoolean getAndSet
public final boolean getAndSet(boolean newValue)
From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java
@Test public void should_dsl_delete_if_exists() throws Exception { //Given//from w w w .j av a 2s. c o m final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); final AtomicBoolean error = new AtomicBoolean(false); final LWTResultListener lwtResultListener = new LWTResultListener() { @Override public void onSuccess() { } @Override public void onError(LWTResult lwtResult) { error.getAndSet(true); } }; //When manager.dsl().delete().value().fromBaseTable().where().id_Eq(id).date_Eq(date).ifExists() .withResultSetAsyncListener(rs -> { assertThat(rs.wasApplied()).isFalse(); return rs; }).withLwtResultListener(lwtResultListener).execute(); //Then assertThat(error.get()).isTrue(); }
From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java
@Test public void should_dsl_update_value_if_exists() throws Exception { //Given//from ww w . j a v a 2 s . c o m final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); final AtomicBoolean error = new AtomicBoolean(false); //When manager.dsl().update().fromBaseTable().value_Set("new value").where().id_Eq(id).date_Eq(date).ifExists() .withLwtResultListener(new LWTResultListener() { @Override public void onSuccess() { } @Override public void onError(LWTResult lwtResult) { error.getAndSet(true); } }).withResultSetAsyncListener(rs -> { assertThat(rs.wasApplied()).isFalse(); return rs; }).execute(); //Then final Row row = session.execute("SELECT simplemap FROM simple WHERE id = " + id).one(); assertThat(row).isNull(); assertThat(error.get()).isTrue(); }
From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java
@Test public void should_dsl_update_value_if_equal() throws Exception { //Given//from ww w . j a v a2 s. c om final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); final AtomicBoolean success = new AtomicBoolean(false); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql", ImmutableMap.of("id", id, "table", "simple")); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevelForDriverConnection(); //When manager.dsl().update().fromBaseTable().value_Set("new value").where().id_Eq(id).date_Eq(date) .ifValue_Eq("0 AM").withLwtResultListener(new LWTResultListener() { @Override public void onSuccess() { success.getAndSet(true); } @Override public void onError(LWTResult lwtResult) { } }).withResultSetAsyncListener(rs -> { assertThat(rs.wasApplied()).isTrue(); return rs; }).withSerialConsistencyLevel(SERIAL).execute(); //Then final Row row = session.execute("SELECT value FROM simple WHERE id = " + id).one(); assertThat(row).isNotNull(); assertThat(row.getString("value")).isEqualTo("new value"); assertThat(success.get()).isTrue(); logAsserter.assertSerialConsistencyLevels(SERIAL); }
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors. */// ww w . j av a2 s . c o m @Test public void testSealWithStorageErrors() throws Exception { // Add some appends and seal, and then flush together. Verify that everything got flushed in one go. final int appendCount = 1000; final WriterConfig config = WriterConfig.builder() .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold. .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000) .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build(); @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); // Part 1: flush triggered by accumulated size. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that). StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); } // Generate and add a Seal Operation. StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context); context.segmentAggregator.add(sealOp); // Have the writes fail every few attempts with a well known exception. AtomicBoolean generateSyncException = new AtomicBoolean(true); AtomicBoolean generateAsyncException = new AtomicBoolean(true); AtomicReference<IntentionalException> setException = new AtomicReference<>(); Supplier<Exception> exceptionSupplier = () -> { IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis())); setException.set(ex); return ex; }; context.storage.setSealSyncErrorInjector( new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier)); context.storage.setSealAsyncErrorInjector( new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier)); // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage. int attemptCount = 4; for (int i = 0; i < attemptCount; i++) { // Repeat a number of times, at least once should work. setException.set(null); try { FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); Assert.assertNull("An exception was expected, but none was thrown.", setException.get()); Assert.assertNotNull("No FlushResult provided.", flushResult); } catch (Exception ex) { if (setException.get() != null) { Assert.assertEquals("Unexpected exception thrown.", setException.get(), ExceptionHelpers.getRealException(ex)); } else { // Not expecting any exception this time. throw ex; } } if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) { // We are done. We got at least one through. break; } } // Verify data. byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; SegmentProperties storageInfo = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength()); Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed()); Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage()); context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java
/** * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads. * This behavior makes the test a bit racy, since we need to ensure a specific order of events. * /*from ww w . j a v a 2s.co m*/ * @throws Exception */ @Test public final void testWorkerForcefulShutdown() throws Exception { final List<Shard> shardList = createShardListWithOneShard(); final boolean callProcessRecordsForEmptyRecordList = true; final long failoverTimeMillis = 50L; final int numberOfRecordsPerShard = 10; final List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>(); for (Shard shard : shardList) { KinesisClientLease lease = ShardSyncer.newKCLLease(shard); lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); initialLeases.add(lease); } final File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard, "normalShutdownUnitTest"); final IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); // Get executor service that will be owned by the worker, so we can get interrupts. ExecutorService executorService = getWorkerThreadPoolExecutor(); // Make test case as efficient as possible. final CountDownLatch processRecordsLatch = new CountDownLatch(1); final AtomicBoolean recordProcessorInterrupted = new AtomicBoolean(false); when(v2RecordProcessorFactory.createProcessor()).thenReturn(v2RecordProcessor); final Semaphore actionBlocker = new Semaphore(1); final Semaphore shutdownBlocker = new Semaphore(1); actionBlocker.acquire(); doAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { // Signal that record processor has started processing records. processRecordsLatch.countDown(); // Block for some time now to test forceful shutdown. Also, check if record processor // was interrupted or not. final long startTimeMillis = System.currentTimeMillis(); long elapsedTimeMillis = 0; LOG.info("Entering sleep @ " + startTimeMillis + " with elapsedMills: " + elapsedTimeMillis); shutdownBlocker.acquire(); try { actionBlocker.acquire(); } catch (InterruptedException e) { LOG.info("Sleep interrupted @ " + System.currentTimeMillis() + " elapsedMillis: " + (System.currentTimeMillis() - startTimeMillis)); recordProcessorInterrupted.getAndSet(true); } shutdownBlocker.release(); elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis; LOG.info( "Sleep completed @ " + System.currentTimeMillis() + " elapsedMillis: " + elapsedTimeMillis); return null; } }).when(v2RecordProcessor).processRecords(any(ProcessRecordsInput.class)); WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList, failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, v2RecordProcessorFactory, executorService, nullMetricsFactory); // Only sleep for time that is required. processRecordsLatch.await(); // Make sure record processor is initialized and processing records. verify(v2RecordProcessorFactory, times(1)).createProcessor(); verify(v2RecordProcessor, times(1)).initialize(any(InitializationInput.class)); verify(v2RecordProcessor, atLeast(1)).processRecords(any(ProcessRecordsInput.class)); verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class)); workerThread.getWorker().shutdown(); workerThread.join(); Assert.assertTrue(workerThread.getState() == State.TERMINATED); // Shutdown should not be called in this case because record processor is blocked. verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class)); // // Release the worker thread // actionBlocker.release(); // // Give the worker thread time to execute it's interrupted handler. // shutdownBlocker.tryAcquire(100, TimeUnit.MILLISECONDS); // // Now we can see if it was actually interrupted. It's possible it wasn't and this will fail. // assertThat(recordProcessorInterrupted.get(), equalTo(true)); }
From source file:org.hyperledger.fabric.sdk.PeerEventServiceClient.java
/** * Get the last block received by this peer. * * @return The last block received by this peer. May return null if no block has been received since first reactivated. *///from w w w .j a v a 2s .com void connectEnvelope(Envelope envelope) throws TransactionException { if (shutdown) { logger.warn(format("%s not connecting is shutdown.", toString())); return; } final AtomicBoolean retry = new AtomicBoolean(true); // make sure we only retry connection once for each connection attempt. ManagedChannel lmanagedChannel = managedChannel; if (lmanagedChannel == null || lmanagedChannel.isTerminated() || lmanagedChannel.isShutdown()) { lmanagedChannel = channelBuilder.build(); managedChannel = lmanagedChannel; } try { DeliverGrpc.DeliverStub broadcast = DeliverGrpc.newStub(lmanagedChannel); // final DeliverResponse[] ret = new DeliverResponse[1]; // final List<DeliverResponse> retList = new ArrayList<>(); final List<Throwable> throwableList = new ArrayList<>(); final CountDownLatch finishLatch = new CountDownLatch(1); so = new StreamObserver<DeliverResponse>() { @Override public void onNext(DeliverResponse resp) { // logger.info("Got Broadcast response: " + resp); logger.trace(format("DeliverResponse %s resp status value:%d status %s, typecase %s ", PeerEventServiceClient.this.toString(), resp.getStatusValue(), resp.getStatus(), resp.getTypeCase())); final DeliverResponse.TypeCase typeCase = resp.getTypeCase(); if (typeCase == STATUS) { logger.debug(format("DeliverResponse %s setting done.", PeerEventServiceClient.this.toString())); if (resp.getStatus() == Common.Status.SUCCESS) { // unlike you may think this only happens when all blocks are fetched. peer.setLastConnectTime(System.currentTimeMillis()); peer.resetReconnectCount(); } else { final long rec = peer.getReconnectCount(); PeerEventingServiceException peerEventingServiceException = new PeerEventingServiceException( format("%s attempts %s Status returned failure code %d (%s) during peer service event registration", PeerEventServiceClient.this.toString(), rec, resp.getStatusValue(), resp.getStatus().name())); peerEventingServiceException.setResponse(resp); if (rec % 10 == 0) { logger.warn(PeerEventServiceClient.this.toString() + " " + peerEventingServiceException.getMessage()); } throwableList.add(peerEventingServiceException); } } else if (typeCase == FILTERED_BLOCK || typeCase == BLOCK) { if (typeCase == BLOCK) { logger.trace(format("%s got event block hex hashcode: %016x, block number: %d", PeerEventServiceClient.this.toString(), resp.getBlock().hashCode(), resp.getBlock().getHeader().getNumber())); } else { logger.trace(format("%s got event block hex hashcode: %016x, block number: %d", PeerEventServiceClient.this.toString(), resp.getFilteredBlock().hashCode(), resp.getFilteredBlock().getNumber())); } peer.setLastConnectTime(System.currentTimeMillis()); long reconnectCount = peer.getReconnectCount(); if (reconnectCount > 1) { logger.info(format("%s reconnected after %d attempts on channel %s, peer %s, url %s", PeerEventServiceClient.this.toString(), reconnectCount, channelName, name, url)); } peer.resetReconnectCount(); BlockEvent blockEvent = new BlockEvent(peer, resp); peer.setLastBlockSeen(blockEvent); channelEventQue.addBEvent(blockEvent); } else { logger.error(format("%s got event block with unknown type: %s, %d", PeerEventServiceClient.this.toString(), typeCase.name(), typeCase.getNumber())); PeerEventingServiceException peerEventingServiceException = new PeerEventingServiceException( format("% got event block with unknown type: %s, %d", PeerEventServiceClient.this.toString(), typeCase.name(), typeCase.getNumber())); peerEventingServiceException.setResponse(resp); throwableList.add(peerEventingServiceException); } finishLatch.countDown(); } @Override public void onError(Throwable t) { ManagedChannel llmanagedChannel = managedChannel; if (llmanagedChannel != null) { try { llmanagedChannel.shutdownNow(); } catch (Exception e) { logger.warn(format("Received error on %s, attempts %d. %s shut down of grpc channel.", PeerEventServiceClient.this.toString(), peer == null ? -1 : peer.getReconnectCount(), e.getMessage()), e); } managedChannel = null; } if (!shutdown) { final long reconnectCount = peer.getReconnectCount(); if (PEER_EVENT_RECONNECTION_WARNING_RATE > 1 && reconnectCount % PEER_EVENT_RECONNECTION_WARNING_RATE == 1) { logger.warn(format("Received error on %s, attempts %d. %s", PeerEventServiceClient.this.toString(), reconnectCount, t.getMessage())); } else { logger.trace(format("Received error on %s, attempts %d. %s", PeerEventServiceClient.this.toString(), reconnectCount, t.getMessage())); } if (retry.getAndSet(false)) { peer.reconnectPeerEventServiceClient(PeerEventServiceClient.this, t); } } finishLatch.countDown(); } @Override public void onCompleted() { logger.debug(format("DeliverResponse onCompleted %s setting done.", PeerEventServiceClient.this.toString())); // done = true; //There should have been a done before this... finishLatch.countDown(); } }; nso = filterBlock ? broadcast.deliverFiltered(so) : broadcast.deliver(so); nso.onNext(envelope); // try { if (!finishLatch.await(peerEventRegistrationWaitTimeMilliSecs, TimeUnit.MILLISECONDS)) { PeerEventingServiceException ex = new PeerEventingServiceException( format("Channel %s connect time exceeded for peer eventing service %s, timed out at %d ms.", channelName, name, peerEventRegistrationWaitTimeMilliSecs)); ex.setTimedOut(peerEventRegistrationWaitTimeMilliSecs); logger.warn(toString() + " " + ex.getMessage()); throwableList.add(0, ex); } logger.trace(toString() + " done waiting for reply!"); if (!throwableList.isEmpty()) { ManagedChannel llmanagedChannel = managedChannel; if (llmanagedChannel != null) { llmanagedChannel.shutdownNow(); managedChannel = null; } Throwable throwable = throwableList.get(0); if (retry.getAndSet(false)) { peer.reconnectPeerEventServiceClient(this, throwable); } } } catch (InterruptedException e) { ManagedChannel llmanagedChannel = managedChannel; if (llmanagedChannel != null) { llmanagedChannel.shutdownNow(); managedChannel = null; } logger.error(toString() + " error message: " + e.getMessage(), e); // not likely if (retry.getAndSet(false)) { peer.reconnectPeerEventServiceClient(this, e); } } finally { if (null != nso) { try { nso.onCompleted(); } catch (Exception e) { //Best effort only report on debug logger.debug(format("Exception completing connect with %s %s", toString(), e.getMessage()), e); } } } }