Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_delete_with_inequal_condition() throws Exception {
    //Given//from w w  w  .j a  v  a 2  s  .  c  om
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final AtomicBoolean success = new AtomicBoolean(false);
    final LWTResultListener lwtResultListener = new LWTResultListener() {

        @Override
        public void onSuccess() {
            success.getAndSet(true);
        }

        @Override
        public void onError(LWTResult lwtResult) {

        }
    };
    //When
    manager.dsl().delete().allColumns_FromBaseTable().where().id_Eq(id).date_Eq(date).ifValue_Lt("_")
            .withLwtResultListener(lwtResultListener).execute();

    //Then
    final Row row = session.execute("SELECT * FROM simple WHERE id = " + id).one();
    assertThat(row).isNull();
    assertThat(success.get()).isTrue();
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaIntegrationTest.java

@Ignore("long running test")
@Test//from  ww  w.j a  v  a 2  s  . co  m
public void partitionAssignmentChange() throws InterruptedException {
    ServiceProperties serviceProperties = new ServiceProperties();
    serviceProperties.initialize(new String[] {}); // Reads environment variables set by DockerComposeHelper

    // Topics are created with 3 partitions - see docker-compose-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);
    final AtomicInteger sentMessages = new AtomicInteger(0);

    final AtomicInteger receivedMessagesConsumer1 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer1 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer2 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer2 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer3 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer3 = new CountDownLatch(1);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            try {
                while (produceMessages.get()) {
                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);

                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(250);
                }
            } catch (Throwable t) {
                logger.error("Exception in producer loop", t);
            }
        }
    });

    // Start first producer. It should get all 3 partitions assigned.
    Consumer consumer1 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer1.incrementAndGet();
                    firstMessageProcessedConsumer1.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until consumer 1 is up.
    firstMessageProcessedConsumer1.await();
    Thread.sleep(5000); // consume some messages

    // Now, start second processor. It should get at least one partition assigned.
    Consumer consumer2 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer2.incrementAndGet();
                    firstMessageProcessedConsumer2.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until the second consumer is up.
    firstMessageProcessedConsumer2.await();
    Thread.sleep(5000); // let both consumers run a bit

    brutallyKillConsumer("pool-14-thread-1"); // consumer2 thread, HACKY: if this is too brittle, change the test to shutdown()

    //Need to wait a bit longer while Kafka "restabilizes the group" after consumer 2 was killed.
    // -> Consumer 1 should now get all three partitions back again.
    Thread.sleep(30000); // must be > than max.poll.interval.ms

    // Now, start third processor. It should get at least one partition assigned.
    Consumer consumer3 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer3.incrementAndGet();
                    firstMessageProcessedConsumer3.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());
    firstMessageProcessedConsumer3.await();
    Thread.sleep(5000);

    // Now shut down the first consumer.
    consumer1.shutdown();
    Thread.sleep(10000);

    // Stop the producer.
    produceMessages.set(false);
    producer.shutdown();
    producerExecutor.shutdown();

    Thread.sleep(3000); // give the remaining consumer the chance to consume all messages
    consumer3.shutdown(); // no assignment any longer

    // Finally, the assertions:
    int receivedMessagesTotal = receivedMessagesConsumer1.get() + receivedMessagesConsumer2.get()
            + receivedMessagesConsumer3.get();
    assertEquals(sentMessages.get(), receivedMessagesTotal);

    assertTrue(receivedMessagesConsumer1.get() > 0);
    assertTrue(receivedMessagesConsumer2.get() > 0);
    assertTrue(receivedMessagesConsumer3.get() > 0);
}

From source file:com.alibaba.wasp.fserver.handler.OpenEntityGroupHandler.java

/**
 * Update ZK, ROOT or META. This can take a while if for example the .META. is
 * not available -- if server hosting .META. crashed and we are waiting on it
 * to come back -- so run in a thread and keep updating znode state meantime
 * so master doesn't timeout our entityGroup-in-transition. Caller must
 * cleanup entityGroup if this fails./*from  w w w  .j a  v  a  2s. com*/
 */
boolean updateMeta(final EntityGroup entityGroup) {
    if (this.server.isStopped() || this.fsServices.isStopping()) {
        return false;
    }
    // Object we do wait/notify on. Make it boolean. If set, we're done.
    // Else, wait.
    final AtomicBoolean signaller = new AtomicBoolean(false);
    PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(entityGroup, this.server, this.fsServices,
            signaller);
    t.start();
    int assignmentTimeout = this.server.getConfiguration()
            .getInt("wasp.master.assignment.timeoutmonitor.period", 10000);
    // Total timeout for meta edit. If we fail adding the edit then close out
    // the entityGroup and let it be assigned elsewhere.
    long timeout = assignmentTimeout * 10;
    long now = System.currentTimeMillis();
    long endTime = now + timeout;
    // Let our period at which we update OPENING state to be be 1/3rd of the
    // entityGroups-in-transition timeout period.
    long period = Math.max(1, assignmentTimeout / 3);
    long lastUpdate = now;
    boolean tickleOpening = true;
    while (!signaller.get() && t.isAlive() && !this.server.isStopped() && !this.fsServices.isStopping()
            && (endTime > now)) {
        long elapsed = now - lastUpdate;
        if (elapsed > period) {
            // Only tickle OPENING if postOpenDeployTasks is taking some time.
            lastUpdate = now;
            tickleOpening = tickleOpening("post_open_deploy");
        }
        synchronized (signaller) {
            try {
                signaller.wait(period);
            } catch (InterruptedException e) {
                // Go to the loop check.
            }
        }
        now = System.currentTimeMillis();
    }
    // Is thread still alive? We may have left above loop because server is
    // stopping or we timed out the edit. Is so, interrupt it.
    if (t.isAlive()) {
        if (!signaller.get()) {
            // Thread still running; interrupt
            LOG.debug("Interrupting thread " + t);
            t.interrupt();
        }
        try {
            t.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted joining " + entityGroup.getEntityGroupInfo().getEntityGroupNameAsString(),
                    ie);
            Thread.currentThread().interrupt();
        }
    }

    // Was there an exception opening the entityGroup? This should trigger on
    // InterruptedException too. If so, we failed. Even if tickle opening fails
    // then it is a failure.
    return ((!Thread.interrupted() && t.getException() == null) && tickleOpening);
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.enable() method
 * which must only trigger @OnEnabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*from   ww  w . j  a  v  a  2 s  .  c  o m*/
 */
@Test
public void validateServiceEnablementLogicHappensOnlyOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    assertFalse(serviceNode.isActive());
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.enableControllerService(serviceNode);
                    assertTrue(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.enableInvocationCount());
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable(..) method
 * which must never trigger @OnDisabled, regardless of how many threads may
 * have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*from ww w  .j  av a2  s  .c  o m*/
 */
@Test
public void validateDisabledServiceCantBeDisabled() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(0, ts.disableInvocationCount());
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable() method
 * which must only trigger @OnDisabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*from ww w.j a v a  2s .c o  m*/
 */
@Test
public void validateEnabledServiceCanOnlyBeDisabledOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    scheduler.enableControllerService(serviceNode);
    assertTrue(serviceNode.isActive());
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.disableInvocationCount());
}

From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java

public void testConcurrentCreateNewLog(boolean entryLogFilePreAllocationEnabled) throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();

    // Creating a new configuration with a number of
    // ledger directories.
    conf.setLedgerDirNames(ledgerDirs);/*w ww. j a  v  a 2 s  .c  o  m*/
    conf.setEntryLogFilePreAllocationEnabled(entryLogFilePreAllocationEnabled);
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));

    EntryLogger el = new EntryLogger(conf, ledgerDirsManager);
    EntryLogManagerBase entryLogManager = (EntryLogManagerBase) el.getEntryLogManager();
    // set same thread executor for entryLoggerAllocator's allocatorExecutor
    setSameThreadExecutorForEntryLoggerAllocator(el.getEntryLoggerAllocator());

    Assert.assertEquals("previousAllocatedEntryLogId after initialization", -1,
            el.getPreviousAllocatedEntryLogId());
    Assert.assertEquals("leastUnflushedLogId after initialization", 0, el.getLeastUnflushedLogId());
    int createNewLogNumOfTimes = 10;
    AtomicBoolean receivedException = new AtomicBoolean(false);

    IntStream.range(0, createNewLogNumOfTimes).parallel().forEach((i) -> {
        try {
            (entryLogManager).createNewLog((long) i);
        } catch (IOException e) {
            LOG.error("Received exception while creating newLog", e);
            receivedException.set(true);
        }
    });

    Assert.assertFalse("There shouldn't be any exceptions while creating newlog", receivedException.get());
    int expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes - 1;
    if (entryLogFilePreAllocationEnabled) {
        expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes;
    }

    Assert.assertEquals(
            "previousAllocatedEntryLogId after " + createNewLogNumOfTimes
                    + " number of times createNewLog is called",
            expectedPreviousAllocatedEntryLogId, el.getPreviousAllocatedEntryLogId());
    Assert.assertEquals("Number of RotatedLogChannels", createNewLogNumOfTimes - 1,
            entryLogManager.getRotatedLogChannels().size());
}

From source file:com.spotify.helios.testing.TemporaryJob.java

private void awaitUp(final String host) throws TimeoutException {
    final TemporaryJobReports.Step startContainer = reportWriter.step("start container")
            .tag("jobId", job.getId()).tag("host", host).tag("image", job.getImage());
    try {/*from   w w w. j  a v a 2 s.c  o m*/
        final AtomicBoolean messagePrinted = new AtomicBoolean(false);
        final TaskStatus status = Polling.awaitUnchecked(deployTimeoutMillis, MILLISECONDS,
                job.getId() + " was not up within %d %s", new Callable<TaskStatus>() {
                    @Override
                    public TaskStatus call() throws Exception {
                        final JobStatus status = Futures.getUnchecked(client.jobStatus(job.getId()));
                        if (status == null) {
                            log.debug("Job status not available");
                            return null;
                        }
                        final TaskStatus taskStatus = status.getTaskStatuses().get(host);
                        if (taskStatus == null) {
                            log.debug("Task status not available on {}", host);
                            return null;
                        }

                        if (!messagePrinted.get() && !isNullOrEmpty(jobDeployedMessageFormat)
                                && !isNullOrEmpty(taskStatus.getContainerId())) {
                            outputDeployedMessage(host, taskStatus.getContainerId());
                            messagePrinted.set(true);
                        }

                        verifyHealthy(host, taskStatus);

                        final TaskStatus.State state = taskStatus.getState();
                        log.info("Job state of {}: {}", job.getImage(), state);

                        if (state == TaskStatus.State.RUNNING) {
                            return taskStatus;
                        }

                        return null;
                    }
                });

        statuses.put(host, status);

        startContainer.markSuccess();
    } finally {
        startContainer.finish();
    }

    final TemporaryJobReports.Step probe = reportWriter.step("probe").tag("jobId", job.getId()).tag("host",
            host);
    try {
        for (final String port : waitPorts) {
            awaitPort(port, host);
        }

        probe.markSuccess();
    } finally {
        probe.finish();
    }
}

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_delete_with_equal_condition() throws Exception {
    //Given//from  w ww. j  av a  2  s  . c  om
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final AtomicBoolean success = new AtomicBoolean(false);
    final LWTResultListener lwtResultListener = new LWTResultListener() {

        @Override
        public void onSuccess() {
            success.getAndSet(true);
        }

        @Override
        public void onError(LWTResult lwtResult) {

        }
    };
    //When
    manager.dsl().delete().allColumns_FromBaseTable().where().id_Eq(id).date_Eq(date)
            .ifSimpleSet_Eq(Sets.newHashSet(1.0, 2.0)).withLwtResultListener(lwtResultListener).execute();

    //Then
    final Row row = session.execute("SELECT * FROM simple WHERE id = " + id).one();
    assertThat(row).isNull();
    assertThat(success.get()).isTrue();
}

From source file:com.vmware.admiral.request.ContainerControlLoopServiceTest.java

@SuppressWarnings("unchecked")
@Test//  w w  w .j av a  2 s . c om
public void testRedeploymentOfAContainerInCluster() throws Throwable {

    containerDescription1 = createContainerDescription(false);
    containerDescription1._cluster = 2;
    doPut(containerDescription1);

    // provision 2 containers in cluster
    ContainerState state = provisionContainer(containerDescription1.documentSelfLink);
    // change the power state of one of them
    state.powerState = PowerState.ERROR;
    doPut(state);

    Map<String, List<String>> containersPerContextId = new HashMap<>();

    retrieveContainerStates(containerDescription1.documentSelfLink).thenAccept(containerStates -> {
        List<String> containersFromDesc1 = containerStates.stream().map(cs -> cs.documentSelfLink)
                .collect(Collectors.toList());
        assertEquals(2, containersFromDesc1.size());

        // clustered containers have same context_id
        containersPerContextId.put(
                containerStates.get(0).customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY),
                containersFromDesc1);
    });

    doOperation(new ContainerControlLoopState(),
            UriUtils.buildUri(host, ContainerControlLoopService.CONTROL_LOOP_INFO_LINK), false,
            Service.Action.PATCH);

    Map<String, List<String>> redeployedContainersPerContextId = new HashMap<>();

    AtomicBoolean containerFromDesc1Redeployed = new AtomicBoolean(false);

    waitFor(() -> {
        // get all containers from containerDescription1
        retrieveContainerStates(containerDescription1.documentSelfLink).thenAccept(containerStates -> {
            long healthyContainers = containerStates.stream()
                    .filter(cs -> PowerState.RUNNING.equals(cs.powerState)).count();
            host.log("Healthy containers from %s : %d", containerDescription1.documentSelfLink,
                    healthyContainers);
            containerFromDesc1Redeployed.set(containerDescription1._cluster == healthyContainers
                    && containerDescription1._cluster == containerStates.size());

            List<String> containersFromDesc1 = containerStates.stream().map(cs -> cs.documentSelfLink)
                    .collect(Collectors.toList());
            redeployedContainersPerContextId.put(
                    containerStates.get(0).customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY),
                    containersFromDesc1);
        });

        if (containerFromDesc1Redeployed.get()) {
            containersPerContextId.entrySet().stream().forEach(m -> {
                String contextId = m.getKey();
                List<String> redeployedContainers = redeployedContainersPerContextId.get(contextId);
                host.log("Redeployed container: %s -> %s", StringUtils.join(m.getValue()),
                        StringUtils.join(redeployedContainers));
            });
        }

        return containerFromDesc1Redeployed.get();
    });
}