Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:net.sourceforge.ganttproject.io.CsvImportTest.java

public void testSkipLinesWithEmptyMandatoryFields() throws IOException {
    String header = "A, B, C";
    String data1 = "a1,,c1";
    String data2 = "a2,b2,c2";
    String data3 = ",b3,c3";
    final AtomicBoolean wasCalled = new AtomicBoolean(false);
    GanttCSVOpen.RecordGroup recordGroup = new GanttCSVOpen.RecordGroup("ABC",
            ImmutableSet.<String>of("A", "B", "C"), ImmutableSet.<String>of("A", "B")) {
        @Override//from  ww w  . j  a v a2s.  com
        protected boolean doProcess(CSVRecord record) {
            if (!hasMandatoryFields(record)) {
                return false;
            }
            wasCalled.set(true);
            assertEquals("a2", record.get("A"));
            assertEquals("b2", record.get("B"));
            return true;
        }
    };
    GanttCSVOpen importer = new GanttCSVOpen(createSupplier(Joiner.on('\n').join(header, data1, data2, data3)),
            recordGroup);
    importer.load();
    assertTrue(wasCalled.get());
    assertEquals(2, importer.getSkippedLineCount());
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * Test the case where we have a failure to complete a short circuit read
 * that occurs, and then later on, we have a success.
 * Any thread waiting on a cache load should receive the failure (if it
 * occurs);  however, the failure result should not be cached.  We want 
 * to be able to retry later and succeed.
 *//*from www  . j a v a 2s . c  om*/
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            if (replicaCreationShouldFail.get()) {
                // Insert a short delay to increase the chance that one client
                // thread waits for the other client thread's failure via
                // a condition variable.
                Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
                return new ShortCircuitReplicaInfo();
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int NUM_THREADS = 2;
    final int SEED = 0xFADED;
    final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
    final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                // First time should fail.
                List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
                        .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
                LocatedBlock lblock = locatedBlocks.get(0); // first block
                BlockReader blockReader = null;
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                    Assert.fail("expected getBlockReader to fail the first time.");
                } catch (Throwable t) {
                    Assert.assertTrue(
                            "expected to see 'TCP reads were disabled " + "for testing' in exception " + t,
                            t.getMessage().contains("TCP reads were disabled for testing"));
                } finally {
                    if (blockReader != null)
                        blockReader.close(); // keep findbugs happy
                }
                gotFailureLatch.countDown();
                shouldRetryLatch.await();

                // Second time should succeed.
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                } catch (Throwable t) {
                    LOG.error("error trying to retrieve a block reader " + "the second time.", t);
                    throw t;
                } finally {
                    if (blockReader != null)
                        blockReader.close();
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
            }
        }
    };
    Thread threads[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    gotFailureLatch.await();
    replicaCreationShouldFail.set(false);
    shouldRetryLatch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}

From source file:biz.ganttproject.impex.csv.CsvImportTest.java

public void testIncompleteHeader() throws IOException {
    String header = "A, B";
    String data = "a1, b1";
    final AtomicBoolean wasCalled = new AtomicBoolean(false);
    RecordGroup recordGroup = new RecordGroup("ABC", ImmutableSet.<String>of("A", "B", "C"), // all fields
            ImmutableSet.<String>of("A", "B")) { // mandatory fields
        @Override/*from   w  w w .  java  2s  . co m*/
        protected boolean doProcess(CSVRecord record) {
            if (!super.doProcess(record)) {
                return false;
            }
            wasCalled.set(true);
            assertEquals("a1", record.get("A"));
            assertEquals("b1", record.get("B"));
            return true;
        }
    };
    GanttCSVOpen importer = new GanttCSVOpen(createSupplier(Joiner.on('\n').join(header, data)), recordGroup);
    importer.load();
    assertTrue(wasCalled.get());
}

From source file:com.netflix.curator.framework.imps.TestFrameworkEdges.java

@Test
public void testSessionKilled() throws Exception {
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//  w w w .jav a  2 s .com
    try {
        client.create().forPath("/sessionTest");

        final AtomicBoolean sessionDied = new AtomicBoolean(false);
        Watcher watcher = new Watcher() {
            @Override
            public void process(WatchedEvent event) {
                if (event.getState() == Event.KeeperState.Expired) {
                    sessionDied.set(true);
                }
            }
        };
        client.checkExists().usingWatcher(watcher).forPath("/sessionTest");
        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());
        Assert.assertNotNull(client.checkExists().forPath("/sessionTest"));
        Assert.assertTrue(sessionDied.get());
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:de.jackwhite20.japs.client.cache.impl.PubSubCacheImpl.java

@Override
public Future<Boolean> has(String key) {

    if (key == null || key.isEmpty()) {
        throw new IllegalArgumentException("key cannot be null or empty");
    }/*from   w w w  .java 2  s.c om*/

    return executorService.submit(() -> {

        int id = CALLBACK_COUNTER.getAndIncrement();

        AtomicBoolean has = new AtomicBoolean(false);

        CountDownLatch countDownLatch = new CountDownLatch(1);

        callbacks.put(id, new Consumer<JSONObject>() {

            @Override
            public void accept(JSONObject jsonObject) {

                has.set(jsonObject.getBoolean("has"));

                countDownLatch.countDown();
            }
        });

        JSONObject jsonObject = new JSONObject().put("op", OpCode.OP_CACHE_HAS.getCode()).put("key", key)
                .put("id", id);

        write(jsonObject);

        countDownLatch.await();

        return has.get();
    });
}

From source file:com.rapleaf.hank.ZkTestCase.java

@Override
protected void setUp() throws Exception {
    super.setUp();
    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN);

    setupZkServer();/*from   ww w. j  a  va  2 s .  c om*/

    final Object lock = new Object();
    final AtomicBoolean connected = new AtomicBoolean(false);

    zk = new ZooKeeperPlus("127.0.0.1:" + zkClientPort, 1000000, new Watcher() {
        @Override
        public void process(WatchedEvent event) {
            switch (event.getType()) {
            case None:
                if (event.getState() == KeeperState.SyncConnected) {
                    connected.set(true);
                    synchronized (lock) {
                        lock.notifyAll();
                    }
                }
            }
            LOG.debug(event.toString());
        }
    });

    synchronized (lock) {
        lock.wait(2000);
    }
    if (!connected.get()) {
        fail("timed out waiting for the zk client connection to come online!");
    }
    LOG.debug("session timeout: " + zk.getSessionTimeout());

    zk.deleteNodeRecursively(zkRoot);
    createNodeRecursively(zkRoot);
}

From source file:com.google.gdt.eclipse.designer.util.Utils.java

/**
 * @return <code>true</code> if given {@link IPackageFragment} is "source" package of some GWT
 *         module./*from w  w  w .j  av a 2 s  .c o m*/
 */
public static boolean isModuleSourcePackage(IPackageFragment packageFragment) throws Exception {
    final String packageName = packageFragment.getElementName();
    // check enclosing module
    ModuleDescription module = getSingleModule(packageFragment);
    if (module != null) {
        final AtomicBoolean result = new AtomicBoolean();
        ModuleVisitor.accept(module, new ModuleVisitor() {
            @Override
            public boolean visitModule(ModuleElement moduleElement) {
                String modulePackage = CodeUtils.getPackage(moduleElement.getId()) + ".";
                if (packageName.startsWith(modulePackage)) {
                    String folderInModule = packageName.substring(modulePackage.length()).replace('.', '/');
                    if (moduleElement.isInSourceFolder(folderInModule)) {
                        result.set(true);
                        return false;
                    }
                }
                return true;
            }
        });
        return result.get();
    }
    // no enclosing module
    return false;
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * Test the case where we have a failure to complete a short circuit read
 * that occurs, and then later on, we have a success.
 * Any thread waiting on a cache load should receive the failure (if it
 * occurs);  however, the failure result should not be cached.  We want
 * to be able to retry later and succeed.
 *///  www  . j  a  v a  2 s  . c om
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            if (replicaCreationShouldFail.get()) {
                // Insert a short delay to increase the chance that one client
                // thread waits for the other client thread's failure via
                // a condition variable.
                Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
                return new ShortCircuitReplicaInfo();
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int NUM_THREADS = 2;
    final int SEED = 0xFADED;
    final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
    final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                // First time should fail.
                List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
                        .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
                LocatedBlock lblock = locatedBlocks.get(0); // first block
                BlockReader blockReader = null;
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                            TEST_FILE_LEN);
                    Assert.fail("expected getBlockReader to fail the first time.");
                } catch (Throwable t) {
                    Assert.assertTrue(
                            "expected to see 'TCP reads were disabled " + "for testing' in exception " + t,
                            t.getMessage().contains("TCP reads were disabled for testing"));
                } finally {
                    if (blockReader != null)
                        blockReader.close(); // keep findbugs happy
                }
                gotFailureLatch.countDown();
                shouldRetryLatch.await();

                // Second time should succeed.
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                            TEST_FILE_LEN);
                } catch (Throwable t) {
                    LOG.error("error trying to retrieve a block reader " + "the second time.", t);
                    throw t;
                } finally {
                    if (blockReader != null)
                        blockReader.close();
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
            }
        }
    };
    Thread threads[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    gotFailureLatch.await();
    replicaCreationShouldFail.set(false);
    shouldRetryLatch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testBasicClusterReconnect() throws Exception {
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {

            Options opts = new Options.Builder(Nats.defaultOptions()).dontRandomize().build();

            final AtomicBoolean dcbCalled = new AtomicBoolean(false);
            final CountDownLatch dcLatch = new CountDownLatch(1);
            opts.disconnectedCb = new DisconnectedCallback() {
                public void onDisconnect(ConnectionEvent event) {
                    // Suppress any additional calls
                    if (dcbCalled.get()) {
                        return;
                    }/*from w  w w.  j a va 2s.c om*/
                    dcbCalled.set(true);
                    dcLatch.countDown();
                }
            };

            final CountDownLatch rcLatch = new CountDownLatch(1);
            opts.reconnectedCb = new ReconnectedCallback() {
                public void onReconnect(ConnectionEvent event) {
                    logger.info("rcb called");
                    rcLatch.countDown();
                }
            };

            try (Connection c = Nats.connect(servers, opts)) {
                assertNotNull(c.getConnectedUrl());

                s1.shutdown();

                // wait for disconnect
                assertTrue("Did not receive a disconnect callback message",
                        await(dcLatch, 2, TimeUnit.SECONDS));

                long reconnectTimeStart = System.nanoTime();

                assertTrue("Did not receive a reconnect callback message: ",
                        await(rcLatch, 2, TimeUnit.SECONDS));

                assertTrue(c.getConnectedUrl().equals(testServers[2]));

                // Make sure we did not wait on reconnect for default time.
                // Reconnect should be fast since it will be a switch to the
                // second server and not be dependent on server restart time.
                // assertTrue(reconElapsed.get() <= cf.getReconnectWait());

                long maxDuration = 100;
                long reconnectTime = System.nanoTime() - reconnectTimeStart;
                assertFalse(
                        String.format("Took longer than expected to reconnect: %dms\n",
                                TimeUnit.NANOSECONDS.toMillis(reconnectTime)),
                        TimeUnit.NANOSECONDS.toMillis(reconnectTime) > maxDuration);
            }
        }
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

public static void checkDirectoryStructure(File dir) throws IOException {
    if (!dir.exists()) {
        File parent = dir.getParentFile();
        File preV3versionFile = new File(dir.getParent(), BookKeeperConstants.VERSION_FILENAME);

        final AtomicBoolean oldDataExists = new AtomicBoolean(false);
        parent.list(new FilenameFilter() {
            @Override//from ww w . j  a va  2  s  .  co  m
            public boolean accept(File dir, String name) {
                if (name.endsWith(".txn") || name.endsWith(".idx") || name.endsWith(".log")) {
                    oldDataExists.set(true);
                }
                return true;
            }
        });
        if (preV3versionFile.exists() || oldDataExists.get()) {
            String err = "Directory layout version is less than 3, upgrade needed";
            LOG.error(err);
            throw new IOException(err);
        }
        if (!dir.mkdirs()) {
            String err = "Unable to create directory " + dir;
            LOG.error(err);
            throw new IOException(err);
        }
    }
}