Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hive.ql.txn.compactor.TestCleaner.java

@Test
public void partitionNotBlockedBySubsequentLock() throws Exception {
    Table t = newTable("default", "bblt", true);
    Partition p = newPartition(t, "today");

    // Set the run frequency low on this test so it doesn't take long
    conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS);

    addBaseFile(t, p, 20L, 20);/*from  w  w w .ja va  2  s.c o m*/
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    addDeltaFile(t, p, 21L, 24L, 4);

    burnThroughTransactions(25);

    CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
    rqst.setPartitionname("ds=today");
    txnHandler.compact(rqst);
    CompactionInfo ci = txnHandler.findNextToCompact("fred");
    txnHandler.markCompacted(ci);
    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));

    LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
    comp.setTablename("bblt");
    comp.setPartitionname("ds=today");
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    LockResponse res = txnHandler.lock(req);

    AtomicBoolean looped = new AtomicBoolean();
    looped.set(false);
    startCleaner(looped);

    // Make sure the compactor has a chance to run once
    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }

    // There should still be one request, as the locks still held.
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());

    // obtain a second lock.  This shouldn't block cleaner as it was acquired after the initial
    // clean request
    LockComponent comp2 = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
    comp2.setTablename("bblt");
    comp2.setPartitionname("ds=today");
    List<LockComponent> components2 = new ArrayList<LockComponent>(1);
    components2.add(comp2);
    LockRequest req2 = new LockRequest(components, "me", "localhost");
    LockResponse res2 = txnHandler.lock(req2);

    // Unlock the previous lock
    txnHandler.unlock(new UnlockRequest(res.getLockid()));
    looped.set(false);

    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }
    stopThread();
    Thread.currentThread().sleep(200);

    // Check there are no compactions requests left.
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    compacts = rsp.getCompacts();
    Assert.assertEquals(0, compacts.size());
}

From source file:com.netflix.dyno.connectionpool.impl.lb.CircularListTest.java

@Test
public void testSingleThreadWithElementRemove() throws Exception {

    final AtomicBoolean stop = new AtomicBoolean(false);

    Future<Map<Integer, Integer>> future = threadPool.submit(new Callable<Map<Integer, Integer>>() {

        @Override/* w  w  w .ja  v a2  s  . c o m*/
        public Map<Integer, Integer> call() throws Exception {

            TestWorker worker = new TestWorker();

            while (!stop.get()) {
                worker.process();
            }

            return worker.map;
        }
    });

    Thread.sleep(200);

    List<Integer> newList = new ArrayList<Integer>();
    newList.addAll(iList);

    final List<Integer> removedElements = new ArrayList<Integer>();
    removedElements.add(newList.remove(2));
    removedElements.add(newList.remove(5));
    removedElements.add(newList.remove(6));

    cList.swapWithList(newList);

    Thread.sleep(200);
    stop.set(true);

    Map<Integer, Integer> result = future.get();

    Map<Integer, Integer> subMap = CollectionUtils.filterKeys(result, new Predicate<Integer>() {
        @Override
        public boolean apply(Integer input) {
            return !removedElements.contains(input);
        }
    });

    checkValues(new ArrayList<Integer>(subMap.values()));
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureSchedulerConcurrency.java

@Test(timeout = 60000)
public void testConcurrentCreateDelete() throws Exception {
    final MasterProcedureScheduler procQueue = queue;
    final TableName table = TableName.valueOf("testtb");
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicBoolean failure = new AtomicBoolean(false);
    Thread createThread = new Thread() {
        @Override/* w ww  . jav  a 2 s  .  co m*/
        public void run() {
            try {
                TestTableProcedure proc = new TestTableProcedure(1, table,
                        TableProcedureInterface.TableOperationType.CREATE);
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
                        procQueue.releaseTableExclusiveLock(proc, table);
                    }
                }
            } catch (Throwable e) {
                LOG.error("create failed", e);
                failure.set(true);
            }
        }
    };

    Thread deleteThread = new Thread() {
        @Override
        public void run() {
            try {
                TestTableProcedure proc = new TestTableProcedure(2, table,
                        TableProcedureInterface.TableOperationType.DELETE);
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
                        procQueue.releaseTableExclusiveLock(proc, table);
                    }
                    procQueue.markTableAsDeleted(table, proc);
                }
            } catch (Throwable e) {
                LOG.error("delete failed", e);
                failure.set(true);
            }
        }
    };

    createThread.start();
    deleteThread.start();
    for (int i = 0; i < 100 && running.get() && !failure.get(); ++i) {
        Thread.sleep(100);
    }
    running.set(false);
    createThread.join();
    deleteThread.join();
    assertEquals(false, failure.get());
}

From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java

public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound,
        final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) {

    // TODO if there's a bulk API to do this, that would be good... :)

    final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>();
    final AtomicBoolean stillOkay = new AtomicBoolean(true);
    final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size());
    final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE);

    final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() {
        @Override/*from w w w  . ja  v  a  2  s . c  o m*/
        public void onRequestFailed(SubredditRequestFailure failureReason) {
            synchronized (result) {
                if (stillOkay.get()) {
                    stillOkay.set(false);
                    handler.onRequestFailed(failureReason);
                }
            }
        }

        @Override
        public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) {
            synchronized (result) {
                if (stillOkay.get()) {

                    result.put(innerResult.getKey(), innerResult);
                    oldestResult.set(Math.min(oldestResult.get(), timeCached));

                    if (requestsToGo.decrementAndGet() == 0) {
                        handler.onRequestSuccess(result, oldestResult.get());
                    }
                }
            }
        }
    };

    for (String subredditCanonicalId : subredditCanonicalIds) {
        performRequest(subredditCanonicalId, timestampBound, innerHandler);
    }
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaFailoverIntegrationTest.java

@Test
public void manualKafkaTest() throws InterruptedException {

    ServiceProperties serviceProperties = fillServiceProperties();

    // Topics are created with 3 partitions - see docker-compose-kafkafailover-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    AtomicInteger sentMessages = new AtomicInteger(0);
    AtomicInteger sendFailures = new AtomicInteger(0);
    AtomicInteger recievedMessages = new AtomicInteger(0);

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override/*  w w  w  .j  a  v  a  2  s.  c  o  m*/
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            while (produceMessages.get()) {
                try {

                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);
                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(1000);
                } catch (Throwable t) {
                    sendFailures.incrementAndGet();
                    logger.error("Caught exception in producer loop", t);
                }
            }
        }
    });

    Consumer consumer = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    recievedMessages.incrementAndGet();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // Wait to allow manual fiddling with Kafka. Sync with global test timeout above.
    Thread.sleep(2 * 60 * 1000);

    produceMessages.set(false);
    producer.shutdown();

    Thread.sleep(10_000);

    consumer.shutdown();

    logger.info("sentMessages: " + sentMessages.get());
    logger.info("sendFailures: " + sendFailures.get());
    logger.info("recievedMessages: " + recievedMessages.get());
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.TestZKRMStateStoreZKClientConnections.java

@Test(timeout = 20000)
public void testZKClientRetry() throws Exception {
    TestZKClient zkClientTester = new TestZKClient();
    final String path = "/test";
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS);
    conf.setLong(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS, 100);
    final ZKRMStateStore store = (ZKRMStateStore) zkClientTester.getRMStateStore(conf);
    TestDispatcher dispatcher = new TestDispatcher();
    store.setRMDispatcher(dispatcher);// w  w  w  . j a va  2 s.  co m
    final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);

    testingServer.stop();
    Thread clientThread = new Thread() {
        @Override
        public void run() {
            try {
                store.getData(path);
            } catch (Exception e) {
                e.printStackTrace();
                assertionFailedInThread.set(true);
            }
        }
    };
    Thread.sleep(2000);
    testingServer.start();
    clientThread.join();
    Assert.assertFalse(assertionFailedInThread.get());
}

From source file:com.netflix.dyno.connectionpool.impl.lb.CircularListTest.java

@Test
public void testSingleThreadWithElementAdd() throws Exception {

    final AtomicBoolean stop = new AtomicBoolean(false);

    Future<Map<Integer, Integer>> future = threadPool.submit(new Callable<Map<Integer, Integer>>() {

        @Override//from  ww  w  .ja  va 2 s . c  o m
        public Map<Integer, Integer> call() throws Exception {

            TestWorker worker = new TestWorker();

            while (!stop.get()) {
                worker.process();
            }

            return worker.map;
        }
    });

    Thread.sleep(500);

    List<Integer> newList = new ArrayList<Integer>();
    newList.addAll(iList);
    for (int i = 10; i < 15; i++) {
        newList.add(i);
    }

    cList.swapWithList(newList);

    Thread.sleep(100);

    stop.set(true);

    Map<Integer, Integer> result = future.get();

    Map<Integer, Integer> subMap = CollectionUtils.filterKeys(result, new Predicate<Integer>() {
        @Override
        public boolean apply(Integer input) {
            return input != null && input < 10;
        }
    });

    List<Integer> list = new ArrayList<Integer>(subMap.values());
    checkValues(list);

    subMap = CollectionUtils.difference(result, subMap).entriesOnlyOnLeft();
    list = new ArrayList<Integer>(subMap.values());
    checkValues(list);
}

From source file:io.pravega.client.stream.impl.ReaderGroupStateManager.java

/**
 * Handles a segment being completed by calling the controller to gather all successors to the completed segment.
 *///  ww  w  .j a  v  a 2 s. co m
void handleEndOfSegment(Segment segmentCompleted) throws ReinitializationRequiredException {
    val successors = getAndHandleExceptions(controller.getSuccessors(segmentCompleted), RuntimeException::new);
    AtomicBoolean reinitRequired = new AtomicBoolean(false);
    sync.updateState(state -> {
        if (!state.isReaderOnline(readerId)) {
            reinitRequired.set(true);
            return null;
        }
        return Collections.singletonList(
                new SegmentCompleted(readerId, segmentCompleted, successors.getSegmentToPredecessor()));
    });
    if (reinitRequired.get()) {
        throw new ReinitializationRequiredException();
    }
    acquireTimer.zero();
}

From source file:io.pravega.client.stream.impl.ReaderGroupStateManager.java

void checkpoint(String checkpointName, PositionInternal lastPosition) throws ReinitializationRequiredException {
    AtomicBoolean reinitRequired = new AtomicBoolean(false);
    sync.updateState(state -> {//from w  w w  .  ja  v  a  2 s .  c  om
        if (!state.isReaderOnline(readerId)) {
            reinitRequired.set(true);
            return null;
        }
        return Collections.singletonList(
                new CheckpointReader(checkpointName, readerId, lastPosition.getOwnedSegmentsWithOffsets()));
    });
    if (reinitRequired.get()) {
        throw new ReinitializationRequiredException();
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureQueue.java

@Test
public void testConcurrentCreateDelete() throws Exception {
    final MasterProcedureQueue procQueue = queue;
    final TableName table = TableName.valueOf("testtb");
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicBoolean failure = new AtomicBoolean(false);
    Thread createThread = new Thread() {
        @Override/*from  ww  w . j  a  va2 s.  c om*/
        public void run() {
            try {
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(table, "create")) {
                        procQueue.releaseTableExclusiveLock(table);
                    }
                }
            } catch (Throwable e) {
                LOG.error("create failed", e);
                failure.set(true);
            }
        }
    };

    Thread deleteThread = new Thread() {
        @Override
        public void run() {
            try {
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(table, "delete")) {
                        procQueue.releaseTableExclusiveLock(table);
                    }
                    procQueue.markTableAsDeleted(table);
                }
            } catch (Throwable e) {
                LOG.error("delete failed", e);
                failure.set(true);
            }
        }
    };

    createThread.start();
    deleteThread.start();
    for (int i = 0; i < 100 && running.get() && !failure.get(); ++i) {
        Thread.sleep(100);
    }
    running.set(false);
    createThread.join();
    deleteThread.join();
    assertEquals(false, failure.get());
}