Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.sixt.service.framework.kafka.messaging.KafkaIntegrationTest.java

@Ignore("long running test")
@Test//from w  w  w. java2s.  c o m
public void partitionAssignmentChange() throws InterruptedException {
    ServiceProperties serviceProperties = new ServiceProperties();
    serviceProperties.initialize(new String[] {}); // Reads environment variables set by DockerComposeHelper

    // Topics are created with 3 partitions - see docker-compose-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);
    final AtomicInteger sentMessages = new AtomicInteger(0);

    final AtomicInteger receivedMessagesConsumer1 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer1 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer2 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer2 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer3 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer3 = new CountDownLatch(1);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            try {
                while (produceMessages.get()) {
                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);

                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(250);
                }
            } catch (Throwable t) {
                logger.error("Exception in producer loop", t);
            }
        }
    });

    // Start first producer. It should get all 3 partitions assigned.
    Consumer consumer1 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer1.incrementAndGet();
                    firstMessageProcessedConsumer1.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until consumer 1 is up.
    firstMessageProcessedConsumer1.await();
    Thread.sleep(5000); // consume some messages

    // Now, start second processor. It should get at least one partition assigned.
    Consumer consumer2 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer2.incrementAndGet();
                    firstMessageProcessedConsumer2.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until the second consumer is up.
    firstMessageProcessedConsumer2.await();
    Thread.sleep(5000); // let both consumers run a bit

    brutallyKillConsumer("pool-14-thread-1"); // consumer2 thread, HACKY: if this is too brittle, change the test to shutdown()

    //Need to wait a bit longer while Kafka "restabilizes the group" after consumer 2 was killed.
    // -> Consumer 1 should now get all three partitions back again.
    Thread.sleep(30000); // must be > than max.poll.interval.ms

    // Now, start third processor. It should get at least one partition assigned.
    Consumer consumer3 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer3.incrementAndGet();
                    firstMessageProcessedConsumer3.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());
    firstMessageProcessedConsumer3.await();
    Thread.sleep(5000);

    // Now shut down the first consumer.
    consumer1.shutdown();
    Thread.sleep(10000);

    // Stop the producer.
    produceMessages.set(false);
    producer.shutdown();
    producerExecutor.shutdown();

    Thread.sleep(3000); // give the remaining consumer the chance to consume all messages
    consumer3.shutdown(); // no assignment any longer

    // Finally, the assertions:
    int receivedMessagesTotal = receivedMessagesConsumer1.get() + receivedMessagesConsumer2.get()
            + receivedMessagesConsumer3.get();
    assertEquals(sentMessages.get(), receivedMessagesTotal);

    assertTrue(receivedMessagesConsumer1.get() > 0);
    assertTrue(receivedMessagesConsumer2.get() > 0);
    assertTrue(receivedMessagesConsumer3.get() > 0);
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always
 * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round
 * (either afterFailure or ordinary) gets cancelled.
 *///from  w w  w . j  av a2  s  . co m
public void testSniffOnFailure() throws Exception {
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final AtomicBoolean initializing = new AtomicBoolean(true);
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    int minNumOnFailureRounds = randomIntBetween(5, 10);
    final CountDownLatch initializingLatch = new CountDownLatch(1);
    final Set<Sniffer.ScheduledTask> ordinaryRoundsTasks = new CopyOnWriteArraySet<>();
    final AtomicReference<Future<?>> initializingFuture = new AtomicReference<>();
    final Set<Sniffer.ScheduledTask> onFailureTasks = new CopyOnWriteArraySet<>();
    final Set<Sniffer.ScheduledTask> afterFailureTasks = new CopyOnWriteArraySet<>();
    final AtomicBoolean onFailureCompleted = new AtomicBoolean(false);
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    try {
        Scheduler scheduler = new Scheduler() {
            @Override
            public Future<?> schedule(final Sniffer.Task task, long delayMillis) {
                if (initializing.compareAndSet(true, false)) {
                    assertEquals(0L, delayMillis);
                    Future<?> future = executor.submit(new Runnable() {
                        @Override
                        public void run() {
                            try {
                                task.run();
                            } finally {
                                //we need to make sure that the sniffer is initialized, so the sniffOnFailure
                                //call does what it needs to do. Otherwise nothing happens until initialized.
                                initializingLatch.countDown();
                            }
                        }
                    });
                    assertTrue(initializingFuture.compareAndSet(null, future));
                    return future;
                }
                if (delayMillis == 0L) {
                    Future<?> future = executor.submit(task);
                    onFailureTasks.add(new Sniffer.ScheduledTask(task, future));
                    return future;
                }
                if (delayMillis == sniffAfterFailureDelay) {
                    Future<?> future = scheduleOrSubmit(task);
                    afterFailureTasks.add(new Sniffer.ScheduledTask(task, future));
                    return future;
                }

                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);

                if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) {
                    completionLatch.countDown();
                    return mock(Future.class);
                }

                Future<?> future = scheduleOrSubmit(task);
                ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future));
                return future;
            }

            private Future<?> scheduleOrSubmit(Sniffer.Task task) {
                if (randomBoolean()) {
                    return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS);
                } else {
                    return executor.submit(task);
                }
            }

            @Override
            public void shutdown() {
            }
        };
        final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval,
                sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffer to get initialized",
                initializingLatch.await(1000, TimeUnit.MILLISECONDS));

        ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20));
        Set<Future<?>> onFailureFutures = new CopyOnWriteArraySet<>();
        try {
            //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped
            //as another round is already running. We retry till enough runs get through as that's what we want to test.
            while (onFailureTasks.size() < minNumOnFailureRounds) {
                onFailureFutures.add(onFailureExecutor.submit(new Runnable() {
                    @Override
                    public void run() {
                        sniffer.sniffOnFailure();
                    }
                }));
            }
            assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
            for (Future<?> onFailureFuture : onFailureFutures) {
                assertNull(onFailureFuture.get());
            }
            onFailureCompleted.set(true);
        } finally {
            onFailureExecutor.shutdown();
            onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS);
        }

        assertFalse(initializingFuture.get().isCancelled());
        assertTrue(initializingFuture.get().isDone());
        assertNull(initializingFuture.get().get());

        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
        assertEquals(onFailureTasks.size(), afterFailureTasks.size());

        for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) {
            assertFalse(onFailureTask.future.isCancelled());
            assertTrue(onFailureTask.future.isDone());
            assertNull(onFailureTask.future.get());
            assertTrue(onFailureTask.task.hasStarted());
            assertFalse(onFailureTask.task.isSkipped());
        }

        int cancelledTasks = 0;
        int completedTasks = onFailureTasks.size() + 1;
        for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) {
            if (assertTaskCancelledOrCompleted(afterFailureTask)) {
                completedTasks++;
            } else {
                cancelledTasks++;
            }
        }

        assertThat(ordinaryRoundsTasks.size(), greaterThan(0));
        for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) {
            if (assertTaskCancelledOrCompleted(task)) {
                completedTasks++;
            } else {
                cancelledTasks++;
            }
        }
        assertEquals(onFailureTasks.size(), cancelledTasks);

        assertEquals(completedTasks, hostsSniffer.runs.get());
        int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
        verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
        verifyNoMoreInteractions(restClient);
    } finally {
        executor.shutdown();
        executor.awaitTermination(1000L, TimeUnit.MILLISECONDS);
    }
}

From source file:com.machinepublishers.jbrowserdriver.JBrowserDriver.java

private String launchProcess(final Settings settings, final PortGroup portGroup) {
    final AtomicBoolean ready = new AtomicBoolean();
    final AtomicReference<String> logPrefix = new AtomicReference<String>("");
    new Thread(new Runnable() {
        @Override/*from  w ww. j a  v a2  s .co  m*/
        public void run() {
            List<String> myArgs = new ArrayList<String>();
            myArgs.add(settings.javaBinary() == null ? JAVA_BIN : settings.javaBinary());
            myArgs.addAll(inheritedArgs);
            if (!settings.customClasspath()) {
                myArgs.addAll(classpathArgs.get());
            }
            if (settings.javaExportModules()) {
                myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network=ALL-UNNAMED");
                myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network.about=ALL-UNNAMED");
                myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network.data=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.http=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.https=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.file=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.ftp=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.jar=ALL-UNNAMED");
                myArgs.add("-XaddExports:java.base/sun.net.www.protocol.mailto=ALL-UNNAMED");
                myArgs.add("-XaddExports:javafx.graphics/com.sun.glass.ui=ALL-UNNAMED");
                myArgs.add("-XaddExports:javafx.web/com.sun.javafx.webkit=ALL-UNNAMED");
                myArgs.add("-XaddExports:javafx.web/com.sun.webkit=ALL-UNNAMED");
            }
            myArgs.add("-Djava.io.tmpdir=" + tmpDir.getAbsolutePath());
            myArgs.add("-Djava.rmi.server.hostname=" + settings.host());
            myArgs.addAll(settings.javaOptions());
            myArgs.add(JBrowserDriverServer.class.getName());
            myArgs.add(Long.toString(portGroup.child));
            myArgs.add(Long.toString(portGroup.parent));
            myArgs.add(Long.toString(portGroup.parentAlt));
            try {
                new ProcessExecutor().addListener(new ProcessListener() {
                    @Override
                    public void afterStart(Process proc, ProcessExecutor executor) {
                        process.set(proc);
                    }
                }).redirectOutput(new LogOutputStream() {
                    boolean done = false;

                    @Override
                    protected void processLine(String line) {
                        if (line != null && !line.isEmpty()) {
                            if (!done) {
                                synchronized (ready) {
                                    if (line.startsWith("ready on ports ")) {
                                        String[] parts = line.substring("ready on ports ".length()).split("/");
                                        actualPortGroup.set(new PortGroup(Integer.parseInt(parts[0]),
                                                Integer.parseInt(parts[1]), Integer.parseInt(parts[2])));
                                        logPrefix.set(new StringBuilder().append("[Instance ")
                                                .append(sessionIdCounter.incrementAndGet()).append("][Port ")
                                                .append(actualPortGroup.get().child).append("]").toString());
                                        ready.set(true);
                                        ready.notifyAll();
                                        done = true;
                                    } else {
                                        log(settings.logger(), logPrefix.get(), line);
                                    }
                                }
                            } else {
                                log(settings.logger(), logPrefix.get(), line);
                            }
                        }
                    }
                }).redirectError(new LogOutputStream() {
                    @Override
                    protected void processLine(String line) {
                        log(settings.logger(), logPrefix.get(), line);
                    }
                }).destroyOnExit().command(myArgs).execute();
            } catch (Throwable t) {
                Util.handleException(t);
            }
            synchronized (ready) {
                ready.set(true);
                ready.notifyAll();
            }
        }
    }).start();
    synchronized (ready) {
        while (!ready.get()) {
            try {
                ready.wait();
                break;
            } catch (InterruptedException e) {
            }
        }
    }
    return logPrefix.get();
}

From source file:com.cloudera.whirr.cm.server.impl.CmServerImpl.java

@Override
@CmServerCommandMethod(name = "client")
public boolean getServiceConfigs(final CmServerCluster cluster, final File directory) throws CmServerException {

    final AtomicBoolean executed = new AtomicBoolean(false);
    try {//w w w. j  av a  2s  .c  o  m

        if (isProvisioned(cluster)) {
            logger.logOperation("GetConfig", new CmServerLogSyncCommand() {
                @Override
                public void execute() throws IOException {
                    for (ApiService apiService : apiResourceRootV3.getClustersResource()
                            .getServicesResource(getName(cluster)).readServices(DataView.SUMMARY)) {
                        CmServerServiceType type = CmServerServiceType.valueOfId(apiService.getType());
                        if (type.equals(CmServerServiceType.HDFS) || type.equals(CmServerServiceType.MAPREDUCE)
                                || type.equals(CmServerServiceType.YARN)
                                || type.equals(CmServerServiceType.HBASE)
                                || versionApi >= 4 && type.equals(CmServerServiceType.HIVE)
                                || versionApi >= 5 && type.equals(CmServerServiceType.SOLR)) {
                            ZipInputStream configInputZip = null;
                            try {
                                InputStreamDataSource configInput = apiResourceRootV3.getClustersResource()
                                        .getServicesResource(getName(cluster))
                                        .getClientConfig(apiService.getName());
                                if (configInput != null) {
                                    configInputZip = new ZipInputStream(configInput.getInputStream());
                                    ZipEntry configInputZipEntry = null;
                                    while ((configInputZipEntry = configInputZip.getNextEntry()) != null) {
                                        String configFile = configInputZipEntry.getName();
                                        if (configFile.contains(File.separator)) {
                                            configFile = configFile.substring(
                                                    configFile.lastIndexOf(File.separator),
                                                    configFile.length());
                                        }
                                        directory.mkdirs();
                                        BufferedWriter configOutput = null;
                                        try {
                                            int read;
                                            configOutput = new BufferedWriter(
                                                    new FileWriter(new File(directory, configFile)));
                                            while (configInputZip.available() > 0) {
                                                if ((read = configInputZip.read()) != -1) {
                                                    configOutput.write(read);
                                                }
                                            }
                                        } finally {
                                            configOutput.close();
                                        }
                                    }
                                }
                            } finally {
                                if (configInputZip != null) {
                                    configInputZip.close();
                                }
                            }
                            executed.set(true);
                        }
                    }
                }
            });
        }

    } catch (Exception e) {
        throw new CmServerException("Failed to get cluster config", e);
    }

    return executed.get();

}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

/**
 * Test case to check increment function with memstore flushing
 * @throws Exception//from  ww w .  j av  a 2  s  . co  m
 */
@Test
public void testParallelIncrementWithMemStoreFlush() throws Exception {
    byte[] family = Incrementer.family;
    this.region = initHRegion(tableName, method, CONF, family);
    final HRegion region = this.region;
    final AtomicBoolean incrementDone = new AtomicBoolean(false);
    Runnable flusher = new Runnable() {
        @Override
        public void run() {
            while (!incrementDone.get()) {
                try {
                    region.flushcache();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    };

    // after all increment finished, the row will increment to 20*100 = 2000
    int threadNum = 20;
    int incCounter = 100;
    long expected = threadNum * incCounter;
    Thread[] incrementers = new Thread[threadNum];
    Thread flushThread = new Thread(flusher);
    for (int i = 0; i < threadNum; i++) {
        incrementers[i] = new Thread(new Incrementer(this.region, incCounter));
        incrementers[i].start();
    }
    flushThread.start();
    for (int i = 0; i < threadNum; i++) {
        incrementers[i].join();
    }

    incrementDone.set(true);
    flushThread.join();

    Get get = new Get(Incrementer.incRow);
    get.addColumn(Incrementer.family, Incrementer.qualifier);
    get.setMaxVersions(1);
    Result res = this.region.get(get);
    List<Cell> kvs = res.getColumnCells(Incrementer.family, Incrementer.qualifier);

    // we just got the latest version
    assertEquals(kvs.size(), 1);
    Cell kv = kvs.get(0);
    assertEquals(expected, Bytes.toLong(kv.getValueArray(), kv.getValueOffset()));
    this.region = null;
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.VersionControlClient.java

/**
 * Gets {@link ServerSettings} from the Server if available If not, it will
 * return a settings object with appropriate defaults.
 *
 * @param fallbackUsed/*from   w  ww.  j  a va  2  s . com*/
 *        Returns true if the default was used due to the server not
 *        supporting this feature
 */
public ServerSettings getServerSettingsWithFallback(final AtomicBoolean fallbackUsed) {
    if (serverSettings == null) {
        synchronized (serverSettingsLock) {
            if (serverSettings == null) {
                fallbackUsed.set(false);

                serverSettings = getWebServiceLayer().getServerSettings();
                if (serverSettings == null) {
                    // If settings are null that indicates that the we are
                    // talking to a server that doesn't support this web
                    // method.
                    fallbackUsed.set(true);
                    serverSettings = new ServerSettings(WorkspaceLocation.SERVER);
                }
            }
        }
    }

    return serverSettings;
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

/**
 * Test case to check append function with memstore flushing
 * @throws Exception//from  ww w  . ja v  a2  s  .  c  o  m
 */
@Test
public void testParallelAppendWithMemStoreFlush() throws Exception {
    byte[] family = Appender.family;
    this.region = initHRegion(tableName, method, CONF, family);
    final HRegion region = this.region;
    final AtomicBoolean appendDone = new AtomicBoolean(false);
    Runnable flusher = new Runnable() {
        @Override
        public void run() {
            while (!appendDone.get()) {
                try {
                    region.flushcache();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    };

    // after all append finished, the value will append to threadNum *
    // appendCounter Appender.CHAR
    int threadNum = 20;
    int appendCounter = 100;
    byte[] expected = new byte[threadNum * appendCounter];
    for (int i = 0; i < threadNum * appendCounter; i++) {
        System.arraycopy(Appender.CHAR, 0, expected, i, 1);
    }
    Thread[] appenders = new Thread[threadNum];
    Thread flushThread = new Thread(flusher);
    for (int i = 0; i < threadNum; i++) {
        appenders[i] = new Thread(new Appender(this.region, appendCounter));
        appenders[i].start();
    }
    flushThread.start();
    for (int i = 0; i < threadNum; i++) {
        appenders[i].join();
    }

    appendDone.set(true);
    flushThread.join();

    Get get = new Get(Appender.appendRow);
    get.addColumn(Appender.family, Appender.qualifier);
    get.setMaxVersions(1);
    Result res = this.region.get(get);
    List<Cell> kvs = res.getColumnCells(Appender.family, Appender.qualifier);

    // we just got the latest version
    assertEquals(kvs.size(), 1);
    Cell kv = kvs.get(0);
    byte[] appendResult = new byte[kv.getValueLength()];
    System.arraycopy(kv.getValueArray(), kv.getValueOffset(), appendResult, 0, kv.getValueLength());
    assertArrayEquals(expected, appendResult);
    this.region = null;
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

@Test
public void testGetWhileRegionClose() throws IOException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    Configuration hc = initSplit();
    int numRows = 100;
    byte[][] families = { fam1, fam2, fam3 };

    // Setting up region
    String method = name.getMethodName();
    this.region = initHRegion(tableName, method, hc, families);
    try {/*  w  w  w  . ja va  2  s. com*/
        // Put data in region
        final int startRow = 100;
        putData(startRow, numRows, qual1, families);
        putData(startRow, numRows, qual2, families);
        putData(startRow, numRows, qual3, families);
        final AtomicBoolean done = new AtomicBoolean(false);
        final AtomicInteger gets = new AtomicInteger(0);
        GetTillDoneOrException[] threads = new GetTillDoneOrException[10];
        try {
            // Set ten threads running concurrently getting from the region.
            for (int i = 0; i < threads.length / 2; i++) {
                threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets);
                threads[i].setDaemon(true);
                threads[i].start();
            }
            // Artificially make the condition by setting closing flag explicitly.
            // I can't make the issue happen with a call to region.close().
            this.region.closing.set(true);
            for (int i = threads.length / 2; i < threads.length; i++) {
                threads[i] = new GetTillDoneOrException(i, Bytes.toBytes("" + startRow), done, gets);
                threads[i].setDaemon(true);
                threads[i].start();
            }
        } finally {
            if (this.region != null) {
                HRegion.closeHRegion(this.region);
            }
        }
        done.set(true);
        for (GetTillDoneOrException t : threads) {
            try {
                t.join();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            if (t.e != null) {
                LOG.info("Exception=" + t.e);
                assertFalse("Found a NPE in " + t.getName(), t.e instanceof NullPointerException);
            }
        }
    } finally {
        HRegion.closeHRegion(this.region);
        this.region = null;
    }
}

From source file:org.apache.hadoop.hive.metastore.HiveMetaStore.java

private static void signalOtherThreadsToStart(final TServer server, final Lock startLock,
        final Condition startCondition, final AtomicBoolean startedServing) {
    // A simple thread to wait until the server has started and then signal the other threads to
    // begin//from  w  w  w. j  a v  a  2 s  .  c  om
    Thread t = new Thread() {
        @Override
        public void run() {
            do {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    LOG.warn("Signalling thread was interuppted: " + e.getMessage());
                }
            } while (!server.isServing());
            startLock.lock();
            try {
                startedServing.set(true);
                startCondition.signalAll();
            } finally {
                startLock.unlock();
            }
        }
    };
    t.start();
}

From source file:org.apache.tez.dag.app.rm.TestTaskScheduler.java

@SuppressWarnings({ "unchecked" })
@Test(timeout = 10000)//from w ww . j a v a  2 s  . c om
public void testTaskSchedulerWithReuse() throws Exception {
    RackResolver.init(new YarnConfiguration());

    TezAMRMClientAsync<CookieContainerRequest> mockRMClient = mock(TezAMRMClientAsync.class);

    String appHost = "host";
    int appPort = 0;
    String appUrl = "url";

    Configuration conf = new Configuration();
    // to match all in the same pass
    conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_REUSE_LOCALITY_DELAY_ALLOCATION_MILLIS, 0);
    // to release immediately after deallocate
    conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, 0);
    conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MAX_MILLIS, 0);

    TaskSchedulerContext mockApp = setupMockTaskSchedulerContext(appHost, appPort, appUrl, conf);
    final TaskSchedulerContextDrainable drainableAppCallback = createDrainableContext(mockApp);

    TaskSchedulerWithDrainableContext scheduler = new TaskSchedulerWithDrainableContext(drainableAppCallback,
            mockRMClient);

    scheduler.initialize();
    drainableAppCallback.drain();

    RegisterApplicationMasterResponse mockRegResponse = mock(RegisterApplicationMasterResponse.class);
    Resource mockMaxResource = mock(Resource.class);
    Map<ApplicationAccessType, String> mockAcls = mock(Map.class);
    when(mockRegResponse.getMaximumResourceCapability()).thenReturn(mockMaxResource);
    when(mockRegResponse.getApplicationACLs()).thenReturn(mockAcls);
    when(mockRMClient.registerApplicationMaster(anyString(), anyInt(), anyString()))
            .thenReturn(mockRegResponse);
    Resource mockClusterResource = mock(Resource.class);
    when(mockRMClient.getAvailableResources()).thenReturn(mockClusterResource);

    scheduler.start();
    drainableAppCallback.drain();

    Object mockTask1 = mock(Object.class);
    when(mockTask1.toString()).thenReturn("task1");
    Object mockCookie1 = mock(Object.class);
    Resource mockCapability = mock(Resource.class);
    String[] hosts = { "host1", "host5" };
    String[] racks = { "/default-rack", "/default-rack" };
    final Priority mockPriority1 = Priority.newInstance(1);
    final Priority mockPriority2 = Priority.newInstance(2);
    final Priority mockPriority3 = Priority.newInstance(3);
    final Priority mockPriority4 = Priority.newInstance(4);
    final Priority mockPriority5 = Priority.newInstance(5);
    Object mockTask2 = mock(Object.class);
    when(mockTask2.toString()).thenReturn("task2");
    Object mockCookie2 = mock(Object.class);
    Object mockTask3 = mock(Object.class);
    when(mockTask3.toString()).thenReturn("task3");
    Object mockCookie3 = mock(Object.class);
    ArgumentCaptor<CookieContainerRequest> requestCaptor = ArgumentCaptor
            .forClass(CookieContainerRequest.class);

    scheduler.allocateTask(mockTask1, mockCapability, hosts, racks, mockPriority1, null, mockCookie1);
    drainableAppCallback.drain();
    verify(mockRMClient, times(1)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request1 = requestCaptor.getValue();
    scheduler.allocateTask(mockTask2, mockCapability, hosts, racks, mockPriority2, null, mockCookie2);
    drainableAppCallback.drain();
    verify(mockRMClient, times(2)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request2 = requestCaptor.getValue();
    scheduler.allocateTask(mockTask3, mockCapability, hosts, racks, mockPriority3, null, mockCookie3);
    drainableAppCallback.drain();
    verify(mockRMClient, times(3)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request3 = requestCaptor.getValue();

    List<Container> containers = new ArrayList<Container>();
    // sending lower priority container first to make sure its not matched
    Container mockContainer4 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer4.getNodeId().getHost()).thenReturn("host4");
    when(mockContainer4.toString()).thenReturn("container4");
    when(mockContainer4.getPriority()).thenReturn(mockPriority4);
    ContainerId mockCId4 = mock(ContainerId.class);
    when(mockContainer4.getId()).thenReturn(mockCId4);
    when(mockCId4.toString()).thenReturn("container4");
    containers.add(mockContainer4);
    Container mockContainer1 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer1.getNodeId().getHost()).thenReturn("host1");
    when(mockContainer1.getPriority()).thenReturn(mockPriority1);
    when(mockContainer1.toString()).thenReturn("container1");
    ContainerId mockCId1 = mock(ContainerId.class);
    when(mockContainer1.getId()).thenReturn(mockCId1);
    when(mockCId1.toString()).thenReturn("container1");
    containers.add(mockContainer1);
    Container mockContainer2 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer2.getNodeId().getHost()).thenReturn("host2");
    when(mockContainer2.getPriority()).thenReturn(mockPriority2);
    when(mockContainer2.toString()).thenReturn("container2");
    ContainerId mockCId2 = mock(ContainerId.class);
    when(mockContainer2.getId()).thenReturn(mockCId2);
    when(mockCId2.toString()).thenReturn("container2");
    containers.add(mockContainer2);
    Container mockContainer3 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer3.getNodeId().getHost()).thenReturn("host3");
    when(mockContainer3.getPriority()).thenReturn(mockPriority3);
    when(mockContainer3.toString()).thenReturn("container3");
    ContainerId mockCId3 = mock(ContainerId.class);
    when(mockContainer3.getId()).thenReturn(mockCId3);
    when(mockCId3.toString()).thenReturn("container3");
    containers.add(mockContainer3);

    ArrayList<CookieContainerRequest> hostContainers = new ArrayList<CookieContainerRequest>();
    hostContainers.add(request1);
    ArrayList<CookieContainerRequest> rackContainers = new ArrayList<CookieContainerRequest>();
    rackContainers.add(request2);
    ArrayList<CookieContainerRequest> anyContainers = new ArrayList<CookieContainerRequest>();
    anyContainers.add(request3);

    final List<ArrayList<CookieContainerRequest>> hostList = new LinkedList<ArrayList<CookieContainerRequest>>();
    hostList.add(hostContainers);
    final List<ArrayList<CookieContainerRequest>> rackList = new LinkedList<ArrayList<CookieContainerRequest>>();
    rackList.add(rackContainers);
    final List<ArrayList<CookieContainerRequest>> anyList = new LinkedList<ArrayList<CookieContainerRequest>>();
    anyList.add(anyContainers);
    final List<ArrayList<CookieContainerRequest>> emptyList = new LinkedList<ArrayList<CookieContainerRequest>>();
    // return pri1 requests for host1
    when(mockRMClient.getMatchingRequestsForTopPriority(eq("host1"), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return hostList;
                }

            });
    // second request matched to rack. RackResolver by default puts hosts in
    // /default-rack. We need to workaround by returning rack matches only once
    when(mockRMClient.getMatchingRequestsForTopPriority(eq("/default-rack"), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return rackList;
                }

            }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return emptyList;
                }

            });
    // third request matched to ANY
    when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return anyList;
                }

            }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return emptyList;
                }

            });

    when(mockRMClient.getTopPriority()).then(new Answer<Priority>() {
        @Override
        public Priority answer(InvocationOnMock invocation) throws Throwable {
            int allocations = drainableAppCallback.count.get();
            if (allocations == 0) {
                return mockPriority1;
            }
            if (allocations == 1) {
                return mockPriority2;
            }
            if (allocations == 2) {
                return mockPriority3;
            }
            if (allocations == 3) {
                return mockPriority4;
            }
            return null;
        }
    });

    AtomicBoolean drainNotifier = new AtomicBoolean(false);
    scheduler.delayedContainerManager.drainedDelayedContainersForTest = drainNotifier;

    scheduler.onContainersAllocated(containers);
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    // exact number allocations returned
    verify(mockApp, times(3)).taskAllocated(any(), any(), (Container) any());
    // first container allocated
    verify(mockApp).taskAllocated(mockTask1, mockCookie1, mockContainer1);
    verify(mockApp).taskAllocated(mockTask2, mockCookie2, mockContainer2);
    verify(mockApp).taskAllocated(mockTask3, mockCookie3, mockContainer3);
    verify(mockRMClient).removeContainerRequest(request1);
    verify(mockRMClient).removeContainerRequest(request2);
    verify(mockRMClient).removeContainerRequest(request3);
    // verify unwanted container released
    verify(mockRMClient).releaseAssignedContainer(mockCId4);

    // deallocate allocated task
    assertTrue(scheduler.deallocateTask(mockTask1, true, null, null));
    drainableAppCallback.drain();
    verify(mockApp).containerBeingReleased(mockCId1);
    verify(mockRMClient).releaseAssignedContainer(mockCId1);
    // deallocate allocated container
    Assert.assertEquals(mockTask2, scheduler.deallocateContainer(mockCId2));
    drainableAppCallback.drain();
    verify(mockRMClient).releaseAssignedContainer(mockCId2);
    verify(mockRMClient, times(3)).releaseAssignedContainer((ContainerId) any());

    List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
    ContainerStatus mockStatus1 = mock(ContainerStatus.class);
    when(mockStatus1.getContainerId()).thenReturn(mockCId1);
    statuses.add(mockStatus1);
    ContainerStatus mockStatus2 = mock(ContainerStatus.class);
    when(mockStatus2.getContainerId()).thenReturn(mockCId2);
    statuses.add(mockStatus2);
    ContainerStatus mockStatus3 = mock(ContainerStatus.class);
    when(mockStatus3.getContainerId()).thenReturn(mockCId3);
    statuses.add(mockStatus3);
    ContainerStatus mockStatus4 = mock(ContainerStatus.class);
    when(mockStatus4.getContainerId()).thenReturn(mockCId4);
    statuses.add(mockStatus4);

    scheduler.onContainersCompleted(statuses);
    drainableAppCallback.drain();
    // released container status returned
    verify(mockApp).containerCompleted(mockTask1, mockStatus1);
    verify(mockApp).containerCompleted(mockTask2, mockStatus2);
    // currently allocated container status returned and not released
    verify(mockApp).containerCompleted(mockTask3, mockStatus3);
    // no other statuses returned
    verify(mockApp, times(3)).containerCompleted(any(), (ContainerStatus) any());
    verify(mockRMClient, times(3)).releaseAssignedContainer((ContainerId) any());

    // verify blacklisting
    verify(mockRMClient, times(0)).addNodeToBlacklist((NodeId) any());
    String badHost = "host6";
    NodeId badNodeId = mock(NodeId.class);
    when(badNodeId.getHost()).thenReturn(badHost);
    scheduler.blacklistNode(badNodeId);
    verify(mockRMClient, times(1)).addNodeToBlacklist(badNodeId);
    Object mockTask4 = mock(Object.class);
    when(mockTask4.toString()).thenReturn("task4");
    Object mockCookie4 = mock(Object.class);
    scheduler.allocateTask(mockTask4, mockCapability, null, null, mockPriority4, null, mockCookie4);
    drainableAppCallback.drain();
    verify(mockRMClient, times(4)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request4 = requestCaptor.getValue();
    anyContainers.clear();
    anyContainers.add(request4);
    Container mockContainer5 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer5.getNodeId().getHost()).thenReturn(badHost);
    when(mockContainer5.getNodeId()).thenReturn(badNodeId);
    ContainerId mockCId5 = mock(ContainerId.class);
    when(mockContainer5.toString()).thenReturn("container5");
    when(mockCId5.toString()).thenReturn("container5");
    when(mockContainer5.getId()).thenReturn(mockCId5);
    when(mockContainer5.getPriority()).thenReturn(mockPriority4);
    containers.clear();
    containers.add(mockContainer5);
    when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return anyList;
                }

            }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return emptyList;
                }

            });
    drainNotifier.set(false);
    scheduler.onContainersAllocated(containers);
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    // no new allocation
    verify(mockApp, times(3)).taskAllocated(any(), any(), (Container) any());
    // verify blacklisted container released
    verify(mockRMClient).releaseAssignedContainer(mockCId5);
    verify(mockRMClient, times(4)).releaseAssignedContainer((ContainerId) any());
    // verify request added back
    verify(mockRMClient, times(5)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request5 = requestCaptor.getValue();
    anyContainers.clear();
    anyContainers.add(request5);
    Container mockContainer6 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer6.getNodeId().getHost()).thenReturn("host7");
    ContainerId mockCId6 = mock(ContainerId.class);
    when(mockContainer6.getId()).thenReturn(mockCId6);
    when(mockContainer6.toString()).thenReturn("container6");
    when(mockCId6.toString()).thenReturn("container6");
    containers.clear();
    containers.add(mockContainer6);
    when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return anyList;
                }

            }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return emptyList;
                }

            });
    drainNotifier.set(false);
    scheduler.onContainersAllocated(containers);
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    // new allocation
    verify(mockApp, times(4)).taskAllocated(any(), any(), (Container) any());
    verify(mockApp).taskAllocated(mockTask4, mockCookie4, mockContainer6);
    // deallocate allocated task
    assertTrue(scheduler.deallocateTask(mockTask4, true, null, null));
    drainableAppCallback.drain();
    verify(mockApp).containerBeingReleased(mockCId6);
    verify(mockRMClient).releaseAssignedContainer(mockCId6);
    verify(mockRMClient, times(5)).releaseAssignedContainer((ContainerId) any());
    // test unblacklist
    scheduler.unblacklistNode(badNodeId);
    verify(mockRMClient, times(1)).removeNodeFromBlacklist(badNodeId);
    assertEquals(0, scheduler.blacklistedNodes.size());

    // verify container level matching
    // fudge the top level priority to prevent containers from being released
    // if top level priority is higher than newly allocated containers then 
    // they will not be released
    final AtomicBoolean fudgePriority = new AtomicBoolean(true);
    when(mockRMClient.getTopPriority()).then(new Answer<Priority>() {
        @Override
        public Priority answer(InvocationOnMock invocation) throws Throwable {
            if (fudgePriority.get()) {
                return mockPriority4;
            }
            return mockPriority5;
        }
    });
    // add a dummy task to prevent release of allocated containers
    Object mockTask5 = mock(Object.class);
    when(mockTask5.toString()).thenReturn("task5");
    Object mockCookie5 = mock(Object.class);
    scheduler.allocateTask(mockTask5, mockCapability, hosts, racks, mockPriority5, null, mockCookie5);
    verify(mockRMClient, times(6)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request6 = requestCaptor.getValue();
    drainableAppCallback.drain();
    // add containers so that we can reference one of them for container specific
    // allocation
    containers.clear();
    Container mockContainer7 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer7.getNodeId().getHost()).thenReturn("host5");
    ContainerId mockCId7 = mock(ContainerId.class);
    when(mockContainer7.toString()).thenReturn("container7");
    when(mockCId7.toString()).thenReturn("container7");
    when(mockContainer7.getId()).thenReturn(mockCId7);
    when(mockContainer7.getPriority()).thenReturn(mockPriority5);
    containers.add(mockContainer7);
    Container mockContainer8 = mock(Container.class, RETURNS_DEEP_STUBS);
    when(mockContainer8.getNodeId().getHost()).thenReturn("host5");
    ContainerId mockCId8 = mock(ContainerId.class);
    when(mockContainer8.toString()).thenReturn("container8");
    when(mockCId8.toString()).thenReturn("container8");
    when(mockContainer8.getId()).thenReturn(mockCId8);
    when(mockContainer8.getPriority()).thenReturn(mockPriority5);
    containers.add(mockContainer8);
    drainNotifier.set(false);
    scheduler.onContainersAllocated(containers);
    drainableAppCallback.drain();
    verify(mockRMClient, times(5)).releaseAssignedContainer((ContainerId) any());
    Object mockTask6 = mock(Object.class);
    when(mockTask6.toString()).thenReturn("task6");
    Object mockCookie6 = mock(Object.class);
    // allocate request with container affinity
    scheduler.allocateTask(mockTask6, mockCapability, mockCId7, mockPriority5, null, mockCookie6);
    drainableAppCallback.drain();
    verify(mockRMClient, times(7)).addContainerRequest(requestCaptor.capture());
    CookieContainerRequest request7 = requestCaptor.getValue();
    hostContainers.clear();
    hostContainers.add(request6);
    hostContainers.add(request7);

    when(mockRMClient.getMatchingRequestsForTopPriority(eq("host5"), (Resource) any()))
            .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() {
                @Override
                public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation)
                        throws Throwable {
                    return hostList;
                }

            });
    // stop fudging top priority
    fudgePriority.set(false);
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(mockApp, times(6)).taskAllocated(any(), any(), (Container) any());
    // container7 allocated to the task with affinity for it
    verify(mockApp).taskAllocated(mockTask6, mockCookie6, mockContainer7);
    // deallocate allocated task
    assertTrue(scheduler.deallocateTask(mockTask5, true, null, null));
    assertTrue(scheduler.deallocateTask(mockTask6, true, null, null));
    drainableAppCallback.drain();
    verify(mockApp).containerBeingReleased(mockCId7);
    verify(mockApp).containerBeingReleased(mockCId8);
    verify(mockRMClient).releaseAssignedContainer(mockCId7);
    verify(mockRMClient).releaseAssignedContainer(mockCId8);
    verify(mockRMClient, times(7)).releaseAssignedContainer((ContainerId) any());

    float progress = 0.5f;
    when(mockApp.getProgress()).thenReturn(progress);
    Assert.assertEquals(progress, scheduler.getProgress(), 0);

    List<NodeReport> mockUpdatedNodes = mock(List.class);
    scheduler.onNodesUpdated(mockUpdatedNodes);
    drainableAppCallback.drain();
    verify(mockApp).nodesUpdated(mockUpdatedNodes);

    ArgumentCaptor<String> argumentCaptor = ArgumentCaptor.forClass(String.class);
    Exception mockException = new IOException("mockexception");
    scheduler.onError(mockException);
    drainableAppCallback.drain();
    verify(mockApp).reportError(eq(YarnTaskSchedulerServiceError.RESOURCEMANAGER_ERROR),
            argumentCaptor.capture(), any(DagInfo.class));
    assertTrue(argumentCaptor.getValue().contains("mockexception"));

    scheduler.onShutdownRequest();
    drainableAppCallback.drain();
    verify(mockApp).appShutdownRequested();

    String appMsg = "success";
    AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl);
    when(mockApp.getFinalAppStatus()).thenReturn(finalStatus);
    scheduler.shutdown();
    drainableAppCallback.drain();
    verify(mockRMClient).unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl);
    verify(mockRMClient).stop();
}