Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.photon.maven.plugins.android.AbstractAndroidMojo.java

/**
 * Undeploys an apk, specified by package name, from a connected emulator or
 * usb device. Also deletes the application's data and cache directories on
 * the device.//from  w w w  .  j ava2s .c  o m
 * 
 * @param packageName
 *            the package name to undeploy.
 * @return <code>true</code> if successfully undeployed, <code>false</code>
 *         otherwise.
 */
protected boolean undeployApk(final String packageName) throws MojoExecutionException, MojoFailureException {

    final AtomicBoolean result = new AtomicBoolean(true); // if no devices
    // are present,
    // it counts as
    // successful

    doWithDevices(new DeviceCallback() {
        @Override
        public void doWithDevice(final IDevice device) throws MojoExecutionException {
            try {
                device.uninstallPackage(packageName);
                getLog().info("Successfully uninstalled " + packageName + " from "
                        + DeviceHelper.getDescriptiveName(device));
                result.set(true);
            } catch (InstallException e) {
                result.set(false);
                throw new MojoExecutionException("Uninstall of " + packageName + "failed.", e);
            }
        }
    });

    return result.get();
}

From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java

/**
 * Requests all updates from each peer in the provided list of peers.
 * <p>/*from   w  w  w  . jav  a2s. com*/
 * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers
 * fail.
 *
 * @param peers the list of peers from which to request updates
 * @return a future to be completed once updates have been received from at least one peer
 */
private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) {
    if (peers.isEmpty()) {
        return CompletableFuture.completedFuture(null);
    }
    CompletableFuture<Void> future = new CompletableFuture<>();
    final int totalPeers = peers.size();
    AtomicBoolean successful = new AtomicBoolean();
    AtomicInteger totalCount = new AtomicInteger();
    AtomicReference<Throwable> lastError = new AtomicReference<>();

    // Iterate through all of the peers and send a bootstrap request. On the first peer that returns
    // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any
    // successful bootstrap response, the future will be completed with the last exception.
    for (MemberId peer : peers) {
        requestBootstrapFromPeer(peer).whenComplete((result, error) -> {
            if (error == null) {
                if (successful.compareAndSet(false, true)) {
                    future.complete(null);
                } else if (totalCount.incrementAndGet() == totalPeers) {
                    Throwable e = lastError.get();
                    if (e != null) {
                        future.completeExceptionally(e);
                    }
                }
            } else {
                if (!successful.get() && totalCount.incrementAndGet() == totalPeers) {
                    future.completeExceptionally(error);
                } else {
                    lastError.set(error);
                }
            }
        });
    }
    return future;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.java

static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo, int timeoutMs,
        DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Void> saslPromise) {
    SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(client);
    TrustedChannelResolver trustedChannelResolver = SASL_ADAPTOR.getTrustedChannelResolver(client);
    AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(client);
    InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
    if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
        saslPromise.trySuccess(null);/*  w  w w.j  a v a  2  s .com*/
        return;
    }
    DataEncryptionKey encryptionKey;
    try {
        encryptionKey = SASL_ADAPTOR.createDataEncryptionKey(client);
    } catch (Exception e) {
        saslPromise.tryFailure(e);
        return;
    }
    if (encryptionKey != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
                encryptionKeyToPassword(encryptionKey.encryptionKey),
                createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise);
    } else if (!UserGroupInformation.isSecurityEnabled()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
                    + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (dnInfo.getXferPort() < 1024) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with "
                    + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with "
                    + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    } else if (saslPropsResolver != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
                buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise);
    } else {
        // It's a secured cluster using non-privileged ports, but no SASL. The only way this can
        // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
        // edge case.
        if (LOG.isDebugEnabled()) {
            LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
                    + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
        }
        saslPromise.trySuccess(null);
    }
}

From source file:org.apache.hadoop.hbase.client.SpeculativeRequester.java

public ResultWrapper<T> request(final HBaseTableFunction<T> function, final HTableInterface primaryTable,
        final Collection<HTableInterface> failoverTables) {

    ExecutorCompletionService<ResultWrapper<T>> exeS = new ExecutorCompletionService<ResultWrapper<T>>(exe);

    final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false);
    final long startTime = System.currentTimeMillis();

    ArrayList<Callable<ResultWrapper<T>>> callables = new ArrayList<Callable<ResultWrapper<T>>>();

    if (System.currentTimeMillis() - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail) {
        callables.add(new Callable<ResultWrapper<T>>() {
            public ResultWrapper<T> call() throws Exception {
                try {
                    T t = function.call(primaryTable);
                    isPrimarySuccess.set(true);
                    return new ResultWrapper(true, t);
                } catch (java.io.InterruptedIOException e) {
                    Thread.currentThread().interrupt();
                } catch (Exception e) {
                    lastPrimaryFail.set(System.currentTimeMillis());
                    Thread.currentThread().interrupt();
                }//from w  ww. j av a  2 s.c  om
                return null;
            }
        });
    }

    for (final HTableInterface failoverTable : failoverTables) {
        callables.add(new Callable<ResultWrapper<T>>() {

            public ResultWrapper<T> call() throws Exception {

                long waitToRequest = (System.currentTimeMillis()
                        - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail)
                                ? waitTimeBeforeRequestingFailover - (System.currentTimeMillis() - startTime)
                                : 0;

                if (waitToRequest > 0) {
                    Thread.sleep(waitToRequest);
                }
                if (isPrimarySuccess.get() == false) {
                    T t = function.call(failoverTable);

                    long waitToAccept = (System.currentTimeMillis()
                            - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail)
                                    ? waitTimeBeforeAcceptingResults - (System.currentTimeMillis() - startTime)
                                    : 0;
                    if (isPrimarySuccess.get() == false) {
                        if (waitToAccept > 0) {
                            Thread.sleep(waitToAccept);
                        }
                    }

                    return new ResultWrapper(false, t);
                } else {
                    throw new RuntimeException("Not needed");
                }

            }
        });
    }
    try {

        //ResultWrapper<T> t = exe.invokeAny(callables);
        for (Callable<ResultWrapper<T>> call : callables) {
            exeS.submit(call);
        }

        ResultWrapper<T> result = exeS.take().get();
        //exe.shutdownNow();

        return result;
    } catch (InterruptedException e) {
        e.printStackTrace();
        LOG.error(e);
    } catch (ExecutionException e) {
        e.printStackTrace();
        LOG.error(e);
    }
    return null;

}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildNoRm() throws Exception {
    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final String removingContainers = "Removing intermediate container";

    // Test that intermediate containers are removed with FORCE_RM by parsing output. We must
    // set NO_CACHE so that docker will generate some containers to remove.
    final AtomicBoolean removedContainer = new AtomicBoolean(false);
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override/*  w  ww  .j  a  v a  2s. com*/
        public void progress(ProgressMessage message) throws DockerException {
            if (containsIgnoreCase(message.stream(), removingContainers)) {
                removedContainer.set(true);
            }
        }
    }, NO_CACHE, FORCE_RM);
    assertTrue(removedContainer.get());

    // Set NO_RM and verify we don't get message that containers were removed.
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override
        public void progress(ProgressMessage message) throws DockerException {
            assertThat(message.stream(), not(containsString(removingContainers)));
        }
    }, NO_CACHE, NO_RM);
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.java

/**
 * Test to verify the race between finalizeBlock and Lease recovery
 *
 * @throws Exception//from  w  w w.ja  v  a2  s .c o m
 */
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
    tearDown();// Stop the Mocked DN started in startup()

    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitClusterUp();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test");
        FSDataOutputStream out = fs.create(path);
        out.writeBytes("data");
        out.hsync();

        List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
        final LocatedBlock block = blocks.get(0);
        final DataNode dataNode = cluster.getDataNodes().get(0);

        final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
        Thread recoveryThread = new Thread() {
            @Override
            public void run() {
                try {
                    DatanodeInfo[] locations = block.getLocations();
                    final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations,
                            block.getBlock().getGenerationStamp() + 1);
                    synchronized (dataNode.data) {
                        Thread.sleep(2000);
                        dataNode.initReplicaRecovery(recoveringBlock);
                    }
                } catch (Exception e) {
                    recoveryInitResult.set(false);
                }
            }
        };
        recoveryThread.start();
        try {
            out.close();
        } catch (IOException e) {
            Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
        } finally {
            recoveryThread.join();
        }
        Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());

        dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1,
                block.getBlock().getBlockId(), block.getBlockSize());
    } finally {
        if (null != cluster) {
            cluster.shutdown();
            cluster = null;
        }
    }
}

From source file:org.piraso.server.service.ResponseLoggerServiceImplTest.java

@Test
public void testLogging() throws IOException, TransformerConfigurationException, ParserConfigurationException,
        ExecutionException, InterruptedException, SAXException {
    final AtomicBoolean fail = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(2);

    final List<MessageEntry> expectedEntries = new ArrayList<MessageEntry>() {
        {/*from w  w w .ja  va2  s  . c  o  m*/
            for (int i = 0; i < 1000; i++) {
                add(new MessageEntry(1l, "test_" + (i + 1)));
            }
        }
    };

    // stop the service when number of entries is reached.
    stopOnWriteTimes(expectedEntries.size());

    Runnable startServiceRunnable = new Runnable() {
        public void run() {
            try {
                service.start();
            } catch (Exception e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Runnable logMessagesRunnable = new Runnable() {
        public void run() {
            try {
                // this entry should be ignored since this will throw an exception
                service.log(new ExceptionThrowEntry(1l));

                // these entries should succeed
                for (MessageEntry entry : expectedEntries) {
                    service.log(entry);
                }
            } catch (IOException e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Future future = executor.submit(startServiceRunnable);
    executor.submit(logMessagesRunnable);

    future.get();
    executor.shutdown();

    if (fail.get()) {
        fail("failure see exception trace.");
    }

    final List<Entry> entriesRead = new ArrayList<Entry>();
    PirasoEntryReader reader = new PirasoEntryReader(
            new ByteArrayInputStream(response.getContentAsByteArray()));
    reader.addListener(new EntryReadAdapter() {
        @Override
        public void readEntry(EntryReadEvent evt) {
            entriesRead.add(evt.getEntry());
        }
    });

    // start reading
    reader.start();

    assertEquals(service.getId(), reader.getId());
    assertEquals(expectedEntries.size(), entriesRead.size());
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDefaultVisLabelService.java

@Test(timeout = 60 * 1000)
public void testAddVisibilityLabelsOnRSRestart() throws Exception {
    List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster().getRegionServerThreads();
    for (RegionServerThread rsThread : regionServerThreads) {
        rsThread.getRegionServer().abort("Aborting ");
    }// w  w w . j  a va2 s .  c o  m
    // Start one new RS
    RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
    waitForLabelsRegionAvailability(rs.getRegionServer());
    final AtomicBoolean vcInitialized = new AtomicBoolean(true);
    do {
        PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
            public VisibilityLabelsResponse run() throws Exception {
                String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, "ABC", "XYZ" };
                try (Connection conn = ConnectionFactory.createConnection(conf)) {
                    VisibilityLabelsResponse resp = VisibilityClient.addLabels(conn, labels);
                    List<RegionActionResult> results = resp.getResultList();
                    if (results.get(0).hasException()) {
                        NameBytesPair pair = results.get(0).getException();
                        Throwable t = ProtobufUtil.toException(pair);
                        LOG.debug("Got exception writing labels", t);
                        if (t instanceof VisibilityControllerNotReadyException) {
                            vcInitialized.set(false);
                            LOG.warn("VisibilityController was not yet initialized");
                            Threads.sleep(10);
                        } else {
                            vcInitialized.set(true);
                        }
                    } else
                        LOG.debug("new labels added: " + resp);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(action);
    } while (!vcInitialized.get());
    // Scan the visibility label
    Scan s = new Scan();
    s.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL));

    int i = 0;
    try (Table ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME);
            ResultScanner scanner = ht.getScanner(s)) {
        while (true) {
            Result next = scanner.next();
            if (next == null) {
                break;
            }
            i++;
        }
    }
    // One label is the "system" label.
    Assert.assertEquals("The count should be 13", 13, i);
}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void testHelperMetadata() throws IOException, URISyntaxException, XmlProcessingException {
    final String bucketName = "helper_metadata";
    try {//from  w  w w.java2 s  . c  o  m
        HELPERS.ensureBucketExists(bucketName, envDataPolicyId);

        final List<Ds3Object> objects = new ArrayList<>();
        for (final String book : BOOKS) {
            final Path objPath = ResourceUtils.loadFileResource(RESOURCE_BASE_NAME + book);
            final long bookSize = Files.size(objPath);
            final Ds3Object obj = new Ds3Object(book, bookSize);

            objects.add(obj);
        }

        final Ds3ClientHelpers.Job job = HELPERS.startWriteJob(bucketName, objects);

        final AtomicBoolean calledWithMetadata = new AtomicBoolean(false);

        job.withMetadata(new Ds3ClientHelpers.MetadataAccess() {
            @Override
            public Map<String, String> getMetadataValue(final String filename) {
                if (filename.equals("beowulf.txt")) {
                    calledWithMetadata.set(true);
                    return ImmutableMap.of("fileType", "text");
                }

                return null;
            }
        });

        job.transfer(new ResourceObjectPutter(RESOURCE_BASE_NAME));

        assertTrue(calledWithMetadata.get());

        final HeadObjectResponse response = client.headObject(new HeadObjectRequest(bucketName, "beowulf.txt"));
        final Metadata metadata = response.getMetadata();
        final List<String> values = metadata.get("fileType");
        assertThat(values.size(), is(1));
        assertThat(values.get(0), is("text"));

    } finally {
        deleteAllContents(client, bucketName);
    }
}

From source file:org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager.java

/**
 * Pre-creating bookkeeper metadata path in zookeeper.
 *///from w w w  . j  a v a2s  . co  m
private void prepareBookKeeperEnv() throws IOException {
    // create bookie available path in zookeeper if it doesn't exists
    final String zkAvailablePath = conf.get(BKJM_ZK_LEDGERS_AVAILABLE_PATH,
            BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
    final CountDownLatch zkPathLatch = new CountDownLatch(1);

    final AtomicBoolean success = new AtomicBoolean(false);
    StringCallback callback = new StringCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx, String name) {
            if (KeeperException.Code.OK.intValue() == rc || KeeperException.Code.NODEEXISTS.intValue() == rc) {
                LOG.info("Successfully created bookie available path : " + zkAvailablePath);
                success.set(true);
            } else {
                KeeperException.Code code = KeeperException.Code.get(rc);
                LOG.error("Error : " + KeeperException.create(code, path).getMessage()
                        + ", failed to create bookie available path : " + zkAvailablePath);
            }
            zkPathLatch.countDown();
        }
    };
    ZkUtils.asyncCreateFullPathOptimistic(zkc, zkAvailablePath, new byte[0], Ids.OPEN_ACL_UNSAFE,
            CreateMode.PERSISTENT, callback, null);

    try {
        if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS) || !success.get()) {
            throw new IOException("Couldn't create bookie available path :" + zkAvailablePath + ", timed out "
                    + zkc.getSessionTimeout() + " millis");
        }
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IOException("Interrupted when creating the bookie available path : " + zkAvailablePath, e);
    }
}