Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:io.nats.client.ITClusterTest.java

@Test
public void testBasicClusterReconnect() throws Exception {
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {

            Options opts = new Options.Builder(Nats.defaultOptions()).dontRandomize().build();

            final AtomicBoolean dcbCalled = new AtomicBoolean(false);
            final CountDownLatch dcLatch = new CountDownLatch(1);
            opts.disconnectedCb = new DisconnectedCallback() {
                public void onDisconnect(ConnectionEvent event) {
                    // Suppress any additional calls
                    if (dcbCalled.get()) {
                        return;
                    }//from  w  ww  . j av a  2  s. c om
                    dcbCalled.set(true);
                    dcLatch.countDown();
                }
            };

            final CountDownLatch rcLatch = new CountDownLatch(1);
            opts.reconnectedCb = new ReconnectedCallback() {
                public void onReconnect(ConnectionEvent event) {
                    logger.info("rcb called");
                    rcLatch.countDown();
                }
            };

            try (Connection c = Nats.connect(servers, opts)) {
                assertNotNull(c.getConnectedUrl());

                s1.shutdown();

                // wait for disconnect
                assertTrue("Did not receive a disconnect callback message",
                        await(dcLatch, 2, TimeUnit.SECONDS));

                long reconnectTimeStart = System.nanoTime();

                assertTrue("Did not receive a reconnect callback message: ",
                        await(rcLatch, 2, TimeUnit.SECONDS));

                assertTrue(c.getConnectedUrl().equals(testServers[2]));

                // Make sure we did not wait on reconnect for default time.
                // Reconnect should be fast since it will be a switch to the
                // second server and not be dependent on server restart time.
                // assertTrue(reconElapsed.get() <= cf.getReconnectWait());

                long maxDuration = 100;
                long reconnectTime = System.nanoTime() - reconnectTimeStart;
                assertFalse(
                        String.format("Took longer than expected to reconnect: %dms\n",
                                TimeUnit.NANOSECONDS.toMillis(reconnectTime)),
                        TimeUnit.NANOSECONDS.toMillis(reconnectTime) > maxDuration);
            }
        }
    }
}

From source file:com.netflix.curator.framework.imps.TestFrameworkEdges.java

@Test
public void testSessionKilled() throws Exception {
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//  w w  w  .j  a v a2s .c o  m
    try {
        client.create().forPath("/sessionTest");

        final AtomicBoolean sessionDied = new AtomicBoolean(false);
        Watcher watcher = new Watcher() {
            @Override
            public void process(WatchedEvent event) {
                if (event.getState() == Event.KeeperState.Expired) {
                    sessionDied.set(true);
                }
            }
        };
        client.checkExists().usingWatcher(watcher).forPath("/sessionTest");
        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());
        Assert.assertNotNull(client.checkExists().forPath("/sessionTest"));
        Assert.assertTrue(sessionDied.get());
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:org.piraso.server.service.ResponseLoggerServiceImplTest.java

@Test
public void testLogging() throws IOException, TransformerConfigurationException, ParserConfigurationException,
        ExecutionException, InterruptedException, SAXException {
    final AtomicBoolean fail = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(2);

    final List<MessageEntry> expectedEntries = new ArrayList<MessageEntry>() {
        {//from w  w w .j  av a  2 s. c om
            for (int i = 0; i < 1000; i++) {
                add(new MessageEntry(1l, "test_" + (i + 1)));
            }
        }
    };

    // stop the service when number of entries is reached.
    stopOnWriteTimes(expectedEntries.size());

    Runnable startServiceRunnable = new Runnable() {
        public void run() {
            try {
                service.start();
            } catch (Exception e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Runnable logMessagesRunnable = new Runnable() {
        public void run() {
            try {
                // this entry should be ignored since this will throw an exception
                service.log(new ExceptionThrowEntry(1l));

                // these entries should succeed
                for (MessageEntry entry : expectedEntries) {
                    service.log(entry);
                }
            } catch (IOException e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Future future = executor.submit(startServiceRunnable);
    executor.submit(logMessagesRunnable);

    future.get();
    executor.shutdown();

    if (fail.get()) {
        fail("failure see exception trace.");
    }

    final List<Entry> entriesRead = new ArrayList<Entry>();
    PirasoEntryReader reader = new PirasoEntryReader(
            new ByteArrayInputStream(response.getContentAsByteArray()));
    reader.addListener(new EntryReadAdapter() {
        @Override
        public void readEntry(EntryReadEvent evt) {
            entriesRead.add(evt.getEntry());
        }
    });

    // start reading
    reader.start();

    assertEquals(service.getId(), reader.getId());
    assertEquals(expectedEntries.size(), entriesRead.size());
}

From source file:org.venice.piazza.servicecontroller.messaging.ServiceMessageThreadManagerTest.java

@Test
/**//  w  w  w  . ja v  a2 s. c  o  m
 * Test Polling
 */
public void testPollingClosedConnection() {

    final ServiceMessageThreadManager smtmMock = Mockito.spy(smtManager);
    try {
        Mockito.doReturn(new AtomicBoolean(true)).when(smtmMock).makeAtomicBoolean();
        smtmMock.pollServiceJobs();
    } catch (Exception ex) {
        ex.printStackTrace();
    }

}

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_insert_if_not_exists() throws Exception {
    //Given/* w  ww.ja  v a2s.c o  m*/
    final long id = 100L;
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final SimpleEntity entity = new SimpleEntity(id, date, "value");
    final AtomicBoolean error = new AtomicBoolean(false);
    final AtomicLong currentId = new AtomicLong(0L);

    final LWTResultListener lwtListener = new LWTResultListener() {

        @Override
        public void onSuccess() {

        }

        @Override
        public void onError(LWTResult lwtResult) {
            error.getAndSet(true);
            currentId.getAndSet(lwtResult.currentValues().getTyped("id"));
        }
    };

    //When
    manager.crud().insert(entity).ifNotExists().withLwtResultListener(lwtListener).execute();

    //Then
    assertThat(error.get()).isTrue();
    assertThat(currentId.get()).isEqualTo(id);
}

From source file:com.alibaba.jstorm.daemon.worker.WorkerData.java

@SuppressWarnings({ "rawtypes", "unchecked" })
public WorkerData(Map conf, IContext context, String topology_id, String supervisor_id, int port,
        String worker_id, String jar_path) throws Exception {

    this.conf = conf;
    this.context = context;
    this.topologyId = topology_id;
    this.supervisorId = supervisor_id;
    this.port = port;
    this.workerId = worker_id;

    this.shutdown = new AtomicBoolean(false);

    this.monitorEnable = new AtomicBoolean(true);
    this.topologyStatus = StatusType.active;

    if (StormConfig.cluster_mode(conf).equals("distributed")) {
        String pidDir = StormConfig.worker_pids_root(conf, worker_id);
        JStormServerUtils.createPid(pidDir);
    }//  w  w  w .ja v a 2 s  . co  m

    // create zk interface
    this.zkClusterstate = ZkTool.mk_distributed_cluster_state(conf);
    this.zkCluster = Cluster.mk_storm_cluster_state(zkClusterstate);

    Map rawConf = StormConfig.read_supervisor_topology_conf(conf, topology_id);
    this.stormConf = new HashMap<Object, Object>();
    this.stormConf.putAll(conf);
    this.stormConf.putAll(rawConf);

    JStormMetrics.setTopologyId(topology_id);
    JStormMetrics.setPort(port);
    JStormMetrics.setDebug(ConfigExtension.isEnableMetricDebug(stormConf));
    JStormMetrics.setEnabled(ConfigExtension.isEnableMetrics(stormConf));
    JStormMetrics.addDebugMetrics(ConfigExtension.getDebugMetricNames(stormConf));
    AsmMetric.setSampleRate(ConfigExtension.getMetricSampleRate(stormConf));

    ConfigExtension.setLocalSupervisorId(stormConf, supervisorId);
    ConfigExtension.setLocalWorkerId(stormConf, workerId);
    ConfigExtension.setLocalWorkerPort(stormConf, port);
    ControlMessage.setPort(port);

    JStormMetrics.registerWorkerTopologyMetric(
            JStormMetrics.workerMetricName(MetricDef.CPU_USED_RATIO, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getCpuUsage();
                }
            }));

    JStormMetrics.registerWorkerTopologyMetric(
            JStormMetrics.workerMetricName(MetricDef.MEMORY_USED, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getMemUsage();
                }
            }));

    JStormMetrics.registerWorkerMetric(JStormMetrics.workerMetricName(MetricDef.DISK_USAGE, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getDiskUsage();
                }
            }));

    LOG.info("Worker Configuration " + stormConf);

    try {
        boolean enableClassloader = ConfigExtension.isEnableTopologyClassLoader(stormConf);
        boolean enableDebugClassloader = ConfigExtension.isEnableClassloaderDebug(stormConf);

        if (jar_path == null && enableClassloader == true
                && !conf.get(Config.STORM_CLUSTER_MODE).equals("local")) {
            LOG.error("enable classloader, but not app jar");
            throw new InvalidParameterException();
        }

        URL[] urlArray = new URL[0];
        if (jar_path != null) {
            String[] paths = jar_path.split(":");
            Set<URL> urls = new HashSet<URL>();
            for (String path : paths) {
                if (StringUtils.isBlank(path))
                    continue;
                URL url = new URL("File:" + path);
                urls.add(url);
            }
            urlArray = urls.toArray(new URL[0]);
        }

        WorkerClassLoader.mkInstance(urlArray, ClassLoader.getSystemClassLoader(),
                ClassLoader.getSystemClassLoader().getParent(), enableClassloader, enableDebugClassloader);
    } catch (Exception e) {
        LOG.error("init jarClassLoader error!", e);
        throw new InvalidParameterException();
    }

    if (this.context == null) {
        this.context = TransportFactory.makeContext(stormConf);
    }

    boolean disruptorUseSleep = ConfigExtension.isDisruptorUseSleep(stormConf);
    DisruptorQueue.setUseSleep(disruptorUseSleep);
    boolean isLimited = ConfigExtension.getTopologyBufferSizeLimited(stormConf);
    DisruptorQueue.setLimited(isLimited);
    LOG.info("Disruptor use sleep:" + disruptorUseSleep + ", limited size:" + isLimited);

    // this.transferQueue = new LinkedBlockingQueue<TransferData>();
    int buffer_size = Utils.getInt(stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE));
    WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
    this.transferQueue = DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI, buffer_size,
            waitStrategy);
    this.transferQueue.consumerStarted();
    this.sendingQueue = DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI, buffer_size,
            waitStrategy);
    this.sendingQueue.consumerStarted();

    this.nodeportSocket = new ConcurrentHashMap<WorkerSlot, IConnection>();
    this.taskNodeport = new ConcurrentHashMap<Integer, WorkerSlot>();
    this.workerToResource = new ConcurrentSkipListSet<ResourceWorkerSlot>();
    this.innerTaskTransfer = new ConcurrentHashMap<Integer, DisruptorQueue>();
    this.deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();
    this.tasksToComponent = new ConcurrentHashMap<Integer, String>();
    this.componentToSortedTasks = new ConcurrentHashMap<String, List<Integer>>();

    Assignment assignment = zkCluster.assignment_info(topologyId, null);
    if (assignment == null) {
        String errMsg = "Failed to get Assignment of " + topologyId;
        LOG.error(errMsg);
        throw new RuntimeException(errMsg);
    }
    workerToResource.addAll(assignment.getWorkers());

    // get current worker's task list

    this.taskids = assignment.getCurrentWorkerTasks(supervisorId, port);
    if (taskids.size() == 0) {
        throw new RuntimeException("No tasks running current workers");
    }
    LOG.info("Current worker taskList:" + taskids);

    // deserialize topology code from local dir
    rawTopology = StormConfig.read_supervisor_topology_code(conf, topology_id);
    sysTopology = Common.system_topology(stormConf, rawTopology);

    generateMaps();

    contextMaker = new ContextMaker(this);

    outTaskStatus = new ConcurrentHashMap<Integer, Boolean>();

    threadPool = Executors.newScheduledThreadPool(THREAD_POOL_NUM);
    TimerTrigger.setScheduledExecutorService(threadPool);

    if (!StormConfig.local_mode(stormConf)) {
        healthReporterThread = new AsyncLoopThread(new JStormHealthReporter(this));
    }

    try {
        Long tmp = StormConfig.read_supervisor_topology_timestamp(conf, topology_id);
        assignmentTS = (tmp == null ? System.currentTimeMillis() : tmp);
    } catch (FileNotFoundException e) {
        assignmentTS = System.currentTimeMillis();
    }

    outboundTasks = new HashSet<Integer>();

    LOG.info("Successfully create WorkerData");

}

From source file:io.cloudslang.worker.management.services.OutboundBufferTest.java

@Test
public void longevityTest() throws InterruptedException {
    int THREADS_NUM = 5;
    long CHECK_DURATION = 5 * 1000L;
    long INFO_FREQUENCY = 2 * 1000L;

    final AtomicBoolean run = new AtomicBoolean(true);
    final CountDownLatch latch = new CountDownLatch(THREADS_NUM + 1);

    for (int i = 1; i <= THREADS_NUM; i++) {
        final int index = i;
        new Thread(new Runnable() {
            private final Class<? extends Message> messageClass = (index % 2) != 0 ? DummyMsg1.class
                    : DummyMsg2.class;

            @Override//from   w  ww . j  a v  a2s  . com
            public void run() {
                int counter = 0;
                try {
                    logger.debug("started, will generate messages of " + messageClass.getSimpleName());

                    while (run.get()) {
                        buffer.put(messageClass.newInstance());
                        counter++;
                        Thread.sleep(5L);
                    }
                    logger.debug("thread finished. processed " + counter + " messages");
                } catch (Exception ex) {
                    logger.error("thread finished", ex);
                } finally {
                    latch.countDown();
                }
            }
        }, "T-" + i).start();
    }

    final DrainStatistics statistics = new DrainStatistics();
    //noinspection unchecked
    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            @SuppressWarnings("unchecked")
            List<Message> messages = (List<Message>) invocation.getArguments()[0];
            int weight = 0;
            for (Message message : messages)
                weight += message.getWeight();
            statistics.add(messages.size(), weight);
            return null;
        }
    }).when(dispatcherService).dispatch(anyList(), anyString(), anyString(), anyString());

    new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                logger.debug("started");

                while (run.get()) {
                    buffer.drain();
                    Thread.sleep(30L);
                }

                while (buffer.getSize() > 0)
                    buffer.drain();
            } catch (Exception ex) {
                logger.error("thread finished", ex);
            } finally {
                latch.countDown();
            }
        }
    }, "T-D").start();

    long t = System.currentTimeMillis();
    while (System.currentTimeMillis() - t < CHECK_DURATION) {
        Thread.sleep(INFO_FREQUENCY);
        logger.debug(buffer.getStatus());
    }
    run.set(false);
    latch.await();

    System.out.println("Drain statistics: " + statistics.report());
}

From source file:com.fluxtion.learning.fx.utils.PriceOrderGenerator.java

public void init() {
    run = new AtomicBoolean(false);
    configArray = new CcyConfig[0];
    _random = new Random(2000);
    orderHelper = new PriceOrderHelper(biasCheck);

    addCcy(EURUSD);/*from   ww  w  .j av a2s.c  o m*/
    addCcy(GBPUSD);
    addCcy(EURCHF);
    addCcy(USDCHF);
    addCcy(EURJPY);
    addCcy(USDJPY);
    addCcy(EURHUF);
    addCcy(EURDKK);
    addCcy(EURNOK);
    addCcy(AUDUSD);

}

From source file:com.gochinatv.datasync.util.Shell.java

/**
 * Run a command/*from  w w w  .j  a v a  2 s .c o  m*/
 */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;
    ShellTimeoutTimerTask timeoutTimerTask;
    timedOut = new AtomicBoolean(false);
    completed = new AtomicBoolean(false);

    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }

    process = builder.start();
    if (timeOutInterval > 0) {
        timeOutTimer = new Timer();
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        //One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
    BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
    final StringBuffer errMsg = new StringBuffer();

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ignored) {
    }
    try {
        parseExecResult(inReader); // parse the output
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        try {
            // make sure that the error thread exits
            errThread.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while reading the error stream", ie);
        }
        completed.set(true);
        //the timeout thread handling
        //taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        throw new IOException(ie.toString());
    } finally {
        if ((timeOutTimer != null) && !timedOut.get()) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            inReader.close();
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        if (!completed.get()) {
            errThread.interrupt();
        }
        try {
            errReader.close();
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        lastTime = System.currentTimeMillis();
    }
}

From source file:fr.wseduc.webutils.email.SendInBlueSender.java

@Override
protected void sendEmail(JsonObject json, final Handler<Message<JsonObject>> handler) {
    if (json == null || json.getArray("to") == null || json.getString("from") == null
            || json.getString("subject") == null || json.getString("body") == null) {
        handler.handle(new ResultMessage().error("invalid.parameters"));
        return;//from   ww w.j  a  v a 2s . c  o  m
    }
    if (splitRecipients && json.getArray("to").size() > 1) {
        final AtomicInteger count = new AtomicInteger(json.getArray("to").size());
        final AtomicBoolean success = new AtomicBoolean(true);
        final JsonArray errors = new JsonArray();
        final Handler<Message<JsonObject>> h = new Handler<Message<JsonObject>>() {
            @Override
            public void handle(Message<JsonObject> message) {
                if (!"ok".equals(message.body().getString("status"))) {
                    success.set(false);
                    errors.addString(message.body().getString("message"));
                }
                if (count.decrementAndGet() == 0) {
                    if (success.get()) {
                        handler.handle(new ResultMessage());
                    } else {
                        handler.handle(new ResultMessage().error(errors.encode()));
                    }
                }
            }
        };
        for (Object to : json.getArray("to")) {
            send(json.copy().putArray("to", new JsonArray().addString(to.toString())), h);
        }
    } else {
        send(json, handler);
    }
}