Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_delete_with_not_equal_condition() throws Exception {
    //Given/*from   ww  w  .j av  a 2 s. c  o m*/
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final AtomicBoolean success = new AtomicBoolean(false);
    final LWTResultListener lwtResultListener = new LWTResultListener() {

        @Override
        public void onSuccess() {
            success.getAndSet(true);
        }

        @Override
        public void onError(LWTResult lwtResult) {

        }
    };
    //When
    manager.dsl().delete().allColumns_FromBaseTable().where().id_Eq(id).date_Eq(date)
            .ifConsistencyList_NotEq(Arrays.asList(ALL)).withLwtResultListener(lwtResultListener).execute();

    //Then
    final Row row = session.execute("SELECT * FROM simple WHERE id = " + id).one();
    assertThat(row).isNull();
    assertThat(success.get()).isTrue();
}

From source file:com.mellanox.r4h.DFSClient.java

/**
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or
 * rpcNamenode//  w  w w . j a va2  s . c  om
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats)
        throws IOException {
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
    traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
            .build();
    // Copy only the required DFSClient configuration
    this.dfsClientConf = new DFSClientConfBridge2_7(conf);
    if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) {
        LOG.debug("Using legacy short-circuit local reads.");
    }
    this.conf = conf;
    this.stats = stats;
    this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

    this.ugi = UserGroupInformation.getCurrentUser();

    this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
    this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_"
            + Thread.currentThread().getId();
    provider = DFSUtil.createKeyProvider(conf);
    if (LOG.isDebugEnabled()) {
        if (provider == null) {
            LOG.debug("No KeyProvider found.");
        } else {
            LOG.debug("Found KeyProvider: " + provider.toString());
        }
    }
    int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
    NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
    AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
    if (numResponseToDrop > 0) {
        // This case is used for testing.
        LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to "
                + numResponseToDrop + ", this hacked client will proactively drop responses");
        proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class,
                numResponseToDrop, nnFallbackToSimpleAuth);
    }

    if (proxyInfo != null) {
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    } else if (rpcNamenode != null) {
        // This case is used for testing.
        Preconditions.checkArgument(nameNodeUri == null);
        this.namenode = rpcNamenode;
        dtService = null;
    } else {
        Preconditions.checkArgument(nameNodeUri != null, "null URI");
        proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class,
                nnFallbackToSimpleAuth);
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    }

    String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
    localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
    if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
        LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses ["
                + Joiner.on(',').join(localInterfaceAddrs) + "]");
    }

    Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
    Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null
            : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
    Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
    this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead);
    this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead);
    this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
            dfsClientConf);
    this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
    int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
    if (numThreads > 0) {
        this.initThreadsNumForHedgedReads(numThreads);
    }
    this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
            TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}

From source file:com.netflix.spinnaker.front50.model.GcsStorageService.java

private AtomicBoolean updateLock(String daoTypeName) {
    return updateLockMap.computeIfAbsent(daoTypeName, (String s) -> new AtomicBoolean(false));
}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildNoCache() throws Exception {
    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final String usingCache = "Using cache";

    // Build once to make sure we have cached images.
    sut.build(Paths.get(dockerDirectory));

    // Build again and make sure we used cached image by parsing output.
    final AtomicBoolean usedCache = new AtomicBoolean(false);
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override//from  w  ww.j ava  2 s .co  m
        public void progress(ProgressMessage message) throws DockerException {
            if (message.stream().contains(usingCache)) {
                usedCache.set(true);
            }
        }
    });
    assertTrue(usedCache.get());

    // Build again with NO_CACHE set, and verify we don't use cache.
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override
        public void progress(ProgressMessage message) throws DockerException {
            assertThat(message.stream(), not(containsString(usingCache)));
        }
    }, NO_CACHE);
}

From source file:com.nridge.connector.common.con_com.crawl.CrawlQueue.java

/**
 * Clears the active state from the queue file system.  This involves
 * clearing the crawl id, extraction complete flag, crawl type and
 * date last modified.  Finally, this method will remove the queue
 * lock file./*w  w  w  . j  a  v a 2  s .co  m*/
 *
 * @return <i>true</i> if lock file is successfully removed or
 *          <i>false</i> otherwise.
 */
public boolean clear() {
    Logger appLogger = mAppMgr.getLogger(this, "clear");

    appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER);

    mCrawlId = 0L;
    mCrawlType = StringUtils.EMPTY;
    mPhaseComplete.put(Connector.PHASE_EXTRACT, new AtomicBoolean(false));
    mPhaseComplete.put(Connector.PHASE_TRANSFORM, new AtomicBoolean(false));
    mPhaseComplete.put(Connector.PHASE_PUBLISH, new AtomicBoolean(false));
    mCrawlLastModified = DateUtils.addYears(new Date(), CRAWL_BEGINNING_OF_TIME);

    appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART);

    return releaseLock();
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testPostFailure() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        Assert.assertEquals(0, r.getTimersCount());
        Assert.assertEquals(0, r.getCountersCount());
        Assert.assertEquals(0, r.getGaugesCount());
    })).willReturn(WireMock.aResponse().withStatus(400)));

    final AtomicBoolean assertionResult = new AtomicBoolean(false);
    final Semaphore semaphore = new Semaphore(0);
    final org.slf4j.Logger logger = Mockito.mock(org.slf4j.Logger.class);
    final Sink sink = new ApacheHttpSink(
            new ApacheHttpSink.Builder().setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
                    .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 2, false,
                            new CompletionHandler(semaphore))),
            logger);// ww w  . jav  a  2 s.co m

    final TsdEvent event = new TsdEvent(ANNOTATIONS, TEST_EMPTY_SERIALIZATION_TIMERS,
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);
    semaphore.acquire();

    // Ensure expected handler was invoked
    Assert.assertTrue(assertionResult.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());

    // Assert that an IOException was captured
    Mockito.verify(logger)
            .error(Mockito.startsWith("Received failure response when sending metrics to HTTP endpoint; uri="));
}

From source file:com.hurence.logisland.service.elasticsearch.Elasticsearch_5_4_0_ClientService.java

@Override
public void copyIndex(String reindexScrollTimeout, String srcIndex, String dstIndex) throws IOException {

    SearchResponse scrollResp = esClient.prepareSearch(srcIndex).setSearchType(SearchType.QUERY_THEN_FETCH)
            .setScroll(reindexScrollTimeout).setQuery(QueryBuilders.matchAllQuery()) // Match all query
            .setSize(100) // 100 hits per shard will be returned for each scroll
            .execute().actionGet();//from ww  w.j  av  a  2s .  com

    AtomicBoolean failed = new AtomicBoolean(false);

    // A user of a BulkProcessor just keeps adding requests to it, and the BulkProcessor itself decides when
    // to send a request to the ES nodes, based on its configuration settings. Calls can be triggerd by number
    // of queued requests, total size of queued requests, and time since previous request. The defaults for
    // these settings are all sensible, so are not overridden here. The BulkProcessor has an internal threadpool
    // which allows it to send multiple batches concurrently; the default is "1" meaning that a single completed
    // batch can be sending in the background while a new batch is being built. When the non-active batch is
    // "full", the add call blocks until the background batch completes.

    while (true) {
        if (scrollResp.getHits().getHits().length == 0) {
            // No more results
            break;
        }

        for (SearchHit hit : scrollResp.getHits()) {
            IndexRequest request = new IndexRequest(dstIndex, hit.type(), hit.id());
            Map<String, Object> source = hit.getSource();
            request.source(source);
            bulkProcessor.add(request);
        }

        String scrollId = scrollResp.getScrollId();
        scrollResp = esClient.prepareSearchScroll(scrollId).setScroll(reindexScrollTimeout).execute()
                .actionGet();
    }

    getLogger().info("Reindex completed");
}

From source file:com.netflix.spinnaker.front50.model.GcsStorageService.java

private AtomicBoolean scheduledUpdateLock(String daoTypeName) {
    return scheduledUpdateLockMap.computeIfAbsent(daoTypeName, (String s) -> new AtomicBoolean(false));
}

From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java

@Test
public void should_dsl_delete_if_exists() throws Exception {
    //Given//from w  ww.  j  a  v  a 2s .  com
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Date date = buildDateKey();

    final AtomicBoolean error = new AtomicBoolean(false);
    final LWTResultListener lwtResultListener = new LWTResultListener() {

        @Override
        public void onSuccess() {

        }

        @Override
        public void onError(LWTResult lwtResult) {
            error.getAndSet(true);
        }
    };

    //When
    manager.dsl().delete().value().fromBaseTable().where().id_Eq(id).date_Eq(date).ifExists()
            .withResultSetAsyncListener(rs -> {
                assertThat(rs.wasApplied()).isFalse();
                return rs;
            }).withLwtResultListener(lwtResultListener).execute();
    //Then
    assertThat(error.get()).isTrue();
}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildNoRm() throws Exception {
    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final String removingContainers = "Removing intermediate container";

    // Test that intermediate containers are removed with FORCE_RM by parsing output. We must
    // set NO_CACHE so that docker will generate some containers to remove.
    final AtomicBoolean removedContainer = new AtomicBoolean(false);
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override/*w w w .j ava  2  s  . co m*/
        public void progress(ProgressMessage message) throws DockerException {
            if (containsIgnoreCase(message.stream(), removingContainers)) {
                removedContainer.set(true);
            }
        }
    }, NO_CACHE, FORCE_RM);
    assertTrue(removedContainer.get());

    // Set NO_RM and verify we don't get message that containers were removed.
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override
        public void progress(ProgressMessage message) throws DockerException {
            assertThat(message.stream(), not(containsString(removingContainers)));
        }
    }, NO_CACHE, NO_RM);
}