Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:com.netflix.dyno.connectionpool.impl.lb.CircularListTest.java

@Test
public void testSingleThreadWithElementAdd() throws Exception {

    final AtomicBoolean stop = new AtomicBoolean(false);

    Future<Map<Integer, Integer>> future = threadPool.submit(new Callable<Map<Integer, Integer>>() {

        @Override/*from  ww w. j  a  v  a 2  s  .  co  m*/
        public Map<Integer, Integer> call() throws Exception {

            TestWorker worker = new TestWorker();

            while (!stop.get()) {
                worker.process();
            }

            return worker.map;
        }
    });

    Thread.sleep(500);

    List<Integer> newList = new ArrayList<Integer>();
    newList.addAll(iList);
    for (int i = 10; i < 15; i++) {
        newList.add(i);
    }

    cList.swapWithList(newList);

    Thread.sleep(100);

    stop.set(true);

    Map<Integer, Integer> result = future.get();

    Map<Integer, Integer> subMap = CollectionUtils.filterKeys(result, new Predicate<Integer>() {
        @Override
        public boolean apply(Integer input) {
            return input != null && input < 10;
        }
    });

    List<Integer> list = new ArrayList<Integer>(subMap.values());
    checkValues(list);

    subMap = CollectionUtils.difference(result, subMap).entriesOnlyOnLeft();
    list = new ArrayList<Integer>(subMap.values());
    checkValues(list);
}

From source file:com.nridge.connector.common.con_com.crawl.CrawlQueue.java

/**
  * Constructor accepts an application manager parameter and initializes
 * the object accordingly.//w ww  .j a v a2s  . co  m
 *
 * @param anAppMgr Application manager.
*/
public CrawlQueue(final AppMgr anAppMgr) {
    mAppMgr = anAppMgr;
    mCrawlType = StringUtils.EMPTY;
    mPhaseComplete = new HashMap<String, AtomicBoolean>();
    mPhaseComplete.put(Connector.PHASE_EXTRACT, new AtomicBoolean(false));
    mPhaseComplete.put(Connector.PHASE_TRANSFORM, new AtomicBoolean(false));
    mPhaseComplete.put(Connector.PHASE_PUBLISH, new AtomicBoolean(false));
    mCrawlLastModified = DateUtils.addYears(new Date(), CRAWL_BEGINNING_OF_TIME);
}

From source file:com.nesscomputing.jackson.datatype.TestCustomUuidModule.java

@Test
public void testCustomUUIDSerialization() throws Exception {
    final AtomicBoolean called = new AtomicBoolean(false);
    ObjectMapper mapper = getObjectMapper(new AbstractModule() {
        @Override//from   www  .jav a2 s  .c om
        protected void configure() {
            bind(new TypeLiteral<JsonSerializer<UUID>>() {
            }).toInstance(new CustomUuidSerializer() {
                @Override
                public void serialize(UUID value, JsonGenerator jgen, SerializerProvider provider)
                        throws IOException, JsonGenerationException {
                    called.set(true);
                    super.serialize(value, jgen, provider);
                }
            });
        }
    });
    final UUID id = new UUID(9, 9);
    Assert.assertEquals('"' + id.toString() + '"', mapper.writeValueAsString(id));
    Assert.assertTrue(called.get());
}

From source file:com.greplin.zookeeper.RobustZooKeeper.java

public RobustZooKeeper(String ensembleAddresses) throws IOException {
    this.reconnectCount = new AtomicInteger(-1); // start at -1 so that the initial connection doesn't count
    this.shutdown = new AtomicBoolean(false);
    this.reconnectLock = new ReentrantLock();
    this.ensembleAddress = ensembleAddresses;
    this.client = null;
    clientNumber = INSTANCE_COUNTER.incrementAndGet();
}

From source file:com.splout.db.integration.TestMultiThreadedFailover.java

@Test
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }//  w ww. ja v a2s .co m

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via
        // Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // This is the "mother-fucker" thread.
        // It will bring DNodes down on purpose.
        // And then bring them up again.
        service.submit(new Runnable() {

            @Override
            public void run() {

                while (true) {
                    try {
                        Thread.sleep(1000);
                        log.info("Time to kill some DNode...");
                        int whichOne = (int) (Math.random() * getdNodes().size());
                        getdNodes().get(whichOne).testCommand(TestCommands.SHUTDOWN.toString());
                        Thread.sleep(1000);
                        log.info("Time to bring the DNode back to life...");
                        getdNodes().get(whichOne).testCommand(TestCommands.RESTART.toString());
                    } catch (InterruptedException e) {
                        log.info("MFT - Bye bye!");
                    } catch (DNodeException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    } catch (TException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }

        });

        // These threads will continuously perform queries and check that the
        // results are consistent.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @SuppressWarnings("unchecked")
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, ((randomDNode * 10) - 1) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        Thread.sleep(15000);

        assertEquals(false, failed.get());

    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:de.undercouch.gradle.tasks.download.InterceptorTest.java

/**
 * Tests if an interceptor can be used to manipulate a request before
 * it is sent/*from w w  w.j a va  2  s  .  c  om*/
 * @throws Exception if anything goes wrong
 */
@Test
public void interceptRequest() throws Exception {
    final AtomicBoolean interceptorCalled = new AtomicBoolean(false);

    Download t = makeProjectAndTask();
    t.src(makeSrc(INTERCEPTOR));
    File dst = folder.newFile();
    t.dest(dst);
    t.requestInterceptor(new HttpRequestInterceptor() {
        @Override
        public void process(HttpRequest request, HttpContext context) throws HttpException, IOException {
            assertFalse(interceptorCalled.get());
            interceptorCalled.set(true);
            request.addHeader(ADDITIONAL_REQUEST_HEADER_KEY, ADDITIONAL_REQUEST_HEADER_VALUE);
        }
    });
    t.execute();

    assertTrue(interceptorCalled.get());

    String dstContents = FileUtils.readFileToString(dst);
    assertEquals(UNINTERCEPTED + ":" + ADDITIONAL_REQUEST_HEADER_VALUE, dstContents);
}

From source file:com.adaptris.core.lms.StreamWrapperCase.java

@Test
public void testInputStream_Read() throws Exception {
    StreamWrapper wrapper = createWrapper(false);
    AtomicBoolean callback = new AtomicBoolean(false);
    File file = writeFile(TempFileUtils.createTrackedFile(wrapper));
    InputStream in = wrapper.openInputStream(file, () -> {
        callback.set(true);//from  www.ja  v a 2 s . c  o  m
    });
    try (InputStream closeable = in) {
        int read = 0;
        while (read != -1) {
            read = closeable.read();
        }
    }
    tryQuietly(() -> {
        in.close();
    });
    assertTrue(callback.get());
}

From source file:com.openteach.diamond.network.waverider.network.DefaultNetWorkServer.java

public DefaultNetWorkServer(String hostName, int port) {
    this.hostName = hostName;
    this.port = port;
    isWeakuped = new AtomicBoolean(false);
}

From source file:com.ebay.cloud.cms.entmgr.entity.impl.EntityFieldTargetMerger.java

/**
 * Update the field operation entity based on the found entity. This step is
 * to make sure/*from  w ww . ja v a2  s.c  om*/
 * <ul>
 * <li>1. the array length would be updated correctly</li>
 * <li>2. support reference update based on reference ID matching</li>
 * </ul>
 */
public boolean mergeEntityOnField(IEntity givenEntity, String fieldName, IEntity foundEntity) {
    MetaClass metaClass = givenEntity.getMetaClass();
    MetaField field = metaClass.getFieldByName(fieldName);
    boolean isRelation = DataTypeEnum.RELATIONSHIP.equals(field.getDataType());
    boolean array = CardinalityEnum.Many.equals(field.getCardinality());
    boolean hasFoundField = foundEntity.hasField(fieldName);
    List<?> givenValues = givenEntity.getFieldValues(fieldName);
    List<?> foundValues = foundEntity.getFieldValues(fieldName);

    AtomicBoolean hasChange = new AtomicBoolean(false);
    if (array) {
        boolean hasGivenField = givenEntity.hasField(fieldName);
        if (!hasGivenField || givenValues.isEmpty()) {
            return false;
        }

        // do merge only when we found both for array
        if (hasFoundField) {
            List<?> mergeValues = null;
            if (isRelation) {
                // relation will merge based on OID
                mergeValues = mergeTargetReference(givenValues, foundValues, hasChange);
            } else {
                // normal entity will merge by content
                mergeValues = mergeTargetContent(givenValues, foundValues, hasChange);
            }
            if (!hasChange.get()) {
                return false;
            }
            givenEntity.setFieldValues(fieldName, mergeValues);
        }
        return true;
    } else {
        // cardinality=ONE
        return mergeTargetSingle(givenEntity, fieldName, isRelation, hasFoundField, givenValues, foundValues);
    }
}