Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildWithPull() throws Exception {
    assumeTrue("We need Docker API >= v1.19 to run this test." + "This Docker API is "
            + sut.version().apiVersion(), compareVersion(sut.version().apiVersion(), "1.19") >= 0);

    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final String pullMsg = "Pulling from";

    // Build once to make sure we have cached images.
    sut.build(Paths.get(dockerDirectory));

    // Build again with PULL set, and verify we pulled the base image
    final AtomicBoolean pulled = new AtomicBoolean(false);
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override//from w w w .  j av  a 2  s. c om
        public void progress(ProgressMessage message) throws DockerException {
            if (!isNullOrEmpty(message.status()) && message.status().contains(pullMsg)) {
                pulled.set(true);
            }
        }
    }, PULL_NEWER_IMAGE);
    assertTrue(pulled.get());
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

private void doReadJobWithJobStarter(final ReadJobStarter readJobStarter) throws IOException,
        URISyntaxException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/*from  w  w w.  j  a va 2s  .  c  om*/
        final String DIR_NAME = "largeFiles/";
        final String FILE_NAME = "lesmis.txt";

        final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME);
        final long bookSize = Files.size(objPath);
        final Ds3Object obj = new Ds3Object(FILE_NAME, bookSize);

        final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client);

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 3;
        final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim,
                maxNumBlockAllocationRetries, maxNumObjectTransferAttempts);

        final Ds3ClientHelpers.Job readJob = readJobStarter.startReadJob(ds3ClientHelpers, BUCKET_NAME,
                Arrays.asList(obj));

        final AtomicBoolean dataTransferredEventReceived = new AtomicBoolean(false);
        final AtomicBoolean objectCompletedEventReceived = new AtomicBoolean(false);
        final AtomicBoolean checksumEventReceived = new AtomicBoolean(false);
        final AtomicBoolean metadataEventReceived = new AtomicBoolean(false);
        final AtomicBoolean waitingForChunksEventReceived = new AtomicBoolean(false);
        final AtomicBoolean failureEventReceived = new AtomicBoolean(false);

        readJob.attachDataTransferredListener(new DataTransferredListener() {
            @Override
            public void dataTransferred(final long size) {
                dataTransferredEventReceived.set(true);
                assertEquals(bookSize, size);
            }
        });
        readJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                objectCompletedEventReceived.set(true);
            }
        });
        readJob.attachChecksumListener(new ChecksumListener() {
            @Override
            public void value(final BulkObject obj, final ChecksumType.Type type, final String checksum) {
                checksumEventReceived.set(true);
                assertEquals("69+JXWeZuzl2HFTM6Lbo8A==", checksum);
            }
        });
        readJob.attachMetadataReceivedListener(new MetadataReceivedListener() {
            @Override
            public void metadataReceived(final String filename, final Metadata metadata) {
                metadataEventReceived.set(true);
            }
        });
        readJob.attachWaitingForChunksListener(new WaitingForChunksListener() {
            @Override
            public void waiting(final int secondsToWait) {
                waitingForChunksEventReceived.set(true);
            }
        });
        readJob.attachFailureEventListener(new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                failureEventReceived.set(true);
            }
        });

        readJob.transfer(new FileObjectGetter(tempDirectory));

        final File originalFile = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME).toFile();
        final File fileCopiedFromBP = Paths.get(tempDirectory.toString(), FILE_NAME).toFile();
        assertTrue(FileUtils.contentEquals(originalFile, fileCopiedFromBP));

        assertTrue(dataTransferredEventReceived.get());
        assertTrue(objectCompletedEventReceived.get());
        assertTrue(checksumEventReceived.get());
        assertTrue(metadataEventReceived.get());
        assertFalse(waitingForChunksEventReceived.get());
        assertFalse(failureEventReceived.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void createReadJobWithBigFile() throws IOException, URISyntaxException, NoSuchMethodException,
        IllegalAccessException, InvocationTargetException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/*ww  w.ja  v a  2s.  com*/
        final String DIR_NAME = "largeFiles/";
        final String FILE_NAME = "lesmis-copies.txt";

        final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME);
        final long bookSize = Files.size(objPath);
        final Ds3Object obj = new Ds3Object(FILE_NAME, bookSize);

        final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client);

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 3;
        final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim,
                maxNumBlockAllocationRetries, maxNumObjectTransferAttempts);

        final Ds3ClientHelpers.Job readJob = ds3ClientHelpers.startReadJob(BUCKET_NAME, Arrays.asList(obj));

        final AtomicBoolean dataTransferredEventReceived = new AtomicBoolean(false);
        final AtomicBoolean objectCompletedEventReceived = new AtomicBoolean(false);
        final AtomicBoolean checksumEventReceived = new AtomicBoolean(false);
        final AtomicBoolean metadataEventReceived = new AtomicBoolean(false);
        final AtomicBoolean waitingForChunksEventReceived = new AtomicBoolean(false);
        final AtomicBoolean failureEventReceived = new AtomicBoolean(false);

        readJob.attachDataTransferredListener(new DataTransferredListener() {
            @Override
            public void dataTransferred(final long size) {
                dataTransferredEventReceived.set(true);
                assertEquals(bookSize, size);
            }
        });
        readJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                objectCompletedEventReceived.set(true);
            }
        });
        readJob.attachChecksumListener(new ChecksumListener() {
            @Override
            public void value(final BulkObject obj, final ChecksumType.Type type, final String checksum) {
                checksumEventReceived.set(true);
                assertEquals("0feqCQBgdtmmgGs9pB/Huw==", checksum);
            }
        });
        readJob.attachMetadataReceivedListener(new MetadataReceivedListener() {
            @Override
            public void metadataReceived(final String filename, final Metadata metadata) {
                metadataEventReceived.set(true);
            }
        });
        readJob.attachWaitingForChunksListener(new WaitingForChunksListener() {
            @Override
            public void waiting(final int secondsToWait) {
                waitingForChunksEventReceived.set(true);
            }
        });
        readJob.attachFailureEventListener(new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                failureEventReceived.set(true);
            }
        });

        final GetJobSpectraS3Response jobSpectraS3Response = ds3ClientShim
                .getJobSpectraS3(new GetJobSpectraS3Request(readJob.getJobId()));

        assertThat(jobSpectraS3Response.getMasterObjectListResult(), is(notNullValue()));

        readJob.transfer(new FileObjectGetter(tempDirectory));

        final File originalFile = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME).toFile();
        final File fileCopiedFromBP = Paths.get(tempDirectory.toString(), FILE_NAME).toFile();
        assertTrue(FileUtils.contentEquals(originalFile, fileCopiedFromBP));

        assertTrue(dataTransferredEventReceived.get());
        assertTrue(objectCompletedEventReceived.get());
        assertTrue(checksumEventReceived.get());
        assertTrue(metadataEventReceived.get());
        assertFalse(waitingForChunksEventReceived.get());
        assertFalse(failureEventReceived.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

@Test
public void testFlushBeforeCompletingScanWoFilter() throws IOException, InterruptedException {
    final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
    final int expectedSize = 3;
    testFlushBeforeCompletingScan(new MyListHook() {
        @Override/*from  w w w .  j  a v a2 s.  c  o  m*/
        public void hook(int currentSize) {
            if (currentSize == expectedSize - 1) {
                try {
                    flushStore(store, id++);
                    timeToGoNextRow.set(true);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    }, new FilterBase() {
        @Override
        public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
            return ReturnCode.INCLUDE;
        }
    }, expectedSize);
}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java

@Test
public void tableAndSchemasFinished() throws InterruptedException, StageException {
    String schema1 = "schema1";
    String table1Name = "table1";
    String table2Name = "table2";
    String schema2 = "schema2";
    String table3Name = "table3";

    String offsetCol = null;/* ww  w . j a v  a2s  .  c  om*/
    final String partitionSize = null;
    int maxActivePartitions = 0;
    int threadNumber = 0;
    int numThreads = 1;

    TableContext tableContext1 = createTableContext(schema1, table1Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext2 = createTableContext(schema1, table2Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext3 = createTableContext(schema2, table3Name, offsetCol, partitionSize,
            maxActivePartitions, false);

    Map<String, TableContext> tableContextMap = new HashMap<>();

    tableContextMap.put(tableContext1.getQualifiedName(), tableContext1);
    tableContextMap.put(tableContext2.getQualifiedName(), tableContext2);
    tableContextMap.put(tableContext3.getQualifiedName(), tableContext3);
    Queue<String> sortedTableOrder = new LinkedList<>();

    sortedTableOrder.add(tableContext1.getQualifiedName());
    sortedTableOrder.add(tableContext2.getQualifiedName());
    sortedTableOrder.add(tableContext3.getQualifiedName());

    Map threadNumToMaxTableSlots = new HashMap<>();

    BatchTableStrategy batchTableStrategy = BatchTableStrategy.PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE;
    MultithreadedTableProvider provider = new MultithreadedTableProvider(tableContextMap, sortedTableOrder,
            threadNumToMaxTableSlots, numThreads, batchTableStrategy);

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));

    TableRuntimeContext table1 = provider.nextTable(threadNumber);
    Assert.equals(table1Name, table1.getSourceTableContext().getTableName());

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));
    // there should be two tables remaining in schema1 (table1 and table2)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(2));
    // and one remaining in schema2 (table3)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    final AtomicBoolean tableFinished = new AtomicBoolean(false);
    final AtomicBoolean schemaFinished = new AtomicBoolean(false);
    final List<String> schemaFinishedTables = new LinkedList<>();

    // finish table1
    provider.reportDataOrNoMoreData(table1, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());

    // schema should not
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(1));

    // there should be a total of two remaining entries in the map
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(2));
    // one of which is in schema1
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(1));
    // and one of which is in schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table1, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table2 = provider.nextTable(threadNumber);
    Assert.equals(table2Name, table2.getSourceTableContext().getTableName());

    // finish table2
    provider.reportDataOrNoMoreData(table2, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());
    // as should the schema this time
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(2));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    // there should only be one entry left now
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());
    // which is for schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table2, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // suppose we did NOT actually reach the end of table3, in which case the conditions should be the same as above
    provider.reportDataOrNoMoreData(table3, 10, 10, false, tableFinished, schemaFinished, schemaFinishedTables);

    // now neither the table
    assertFalse(tableFinished.get());
    // nor schema should be finished
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    // and entries in the map should be the same as above
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());

    provider.releaseOwnedTable(table3, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    // cycle through table1 and table2 again
    table1 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table1, 1);
    table2 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table2, 1);

    // and get back to table3
    table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // now suppose we have finally finished table3
    provider.reportDataOrNoMoreData(table3, 3, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // both table
    assertTrue(tableFinished.get());
    // and schema should be finished
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(1));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(3));
    // there should now be no more entries in this map
    assertTrue(provider.getRemainingSchemasToTableContexts().isEmpty());

    provider.releaseOwnedTable(table3, 1);

    assertTrue(provider.shouldGenerateNoMoreDataEvent());

}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

@Test
public void testFlushBeforeCompletingScanWithFilter() throws IOException, InterruptedException {
    final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
    final int expectedSize = 2;
    testFlushBeforeCompletingScan(new MyListHook() {
        @Override//from  w w w .  j a v  a 2s. com
        public void hook(int currentSize) {
            if (currentSize == expectedSize - 1) {
                try {
                    flushStore(store, id++);
                    timeToGoNextRow.set(true);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    }, new FilterBase() {
        @Override
        public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
            if (timeToGoNextRow.get()) {
                timeToGoNextRow.set(false);
                return ReturnCode.NEXT_ROW;
            } else {
                return ReturnCode.INCLUDE;
            }
        }
    }, expectedSize);
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

@Test
public void testFlushBeforeCompletingScanWithFilterHint() throws IOException, InterruptedException {
    final AtomicBoolean timeToGetHint = new AtomicBoolean(false);
    final int expectedSize = 2;
    testFlushBeforeCompletingScan(new MyListHook() {
        @Override/*from  w  ww .ja  v a  2 s.c o  m*/
        public void hook(int currentSize) {
            if (currentSize == expectedSize - 1) {
                try {
                    flushStore(store, id++);
                    timeToGetHint.set(true);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    }, new FilterBase() {
        @Override
        public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
            if (timeToGetHint.get()) {
                timeToGetHint.set(false);
                return Filter.ReturnCode.SEEK_NEXT_USING_HINT;
            } else {
                return Filter.ReturnCode.INCLUDE;
            }
        }

        @Override
        public Cell getNextCellHint(Cell currentCell) throws IOException {
            return currentCell;
        }
    }, expectedSize);
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void persistBlocking(String id, JsonObject data, RBatch redissonBatch,
        Handler<AsyncResult<Boolean>> resultHandler) {
    RBatch batch = redissonBatch == null ? redissonWrite.createBatch() : redissonBatch;
    AtomicBoolean failed = new AtomicBoolean(false);
    try {//from  w w w.ja v  a  2  s .  com
        BeanMap pMap = new BeanMap(cls.newInstance());
        //remove the indexes;
        if (isRedisEntity()) {
            AtomicBoolean finished = new AtomicBoolean(false);
            AtomicBoolean hasNested = new AtomicBoolean(false);
            AtomicLong stack = new AtomicLong();
            pMap.forEach((k, v) -> {
                if ("class".equals(k)) {
                    return;
                }
                Class<?> type = pMap.getType((String) k);
                if (!isRedisEntity(type)) {
                    //recreate the indexes;
                    if ("id".equals(k)) {
                        batch.getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id, id);
                    } else {
                        batch.getMap(getStorageKey((String) k)).fastPutAsync(id, data.getValue((String) k));
                    }
                } else {
                    hasNested.set(true);
                    stack.incrementAndGet();
                    RedisRepositoryImpl<?> innerRepo;
                    try {
                        innerRepo = (RedisRepositoryImpl) factory.instance(type);
                    } catch (RepositoryException e) {
                        throw new RuntimeException(e);
                    }
                    JsonObject value = data.getJsonObject((String) k);
                    final boolean newOne = !value.containsKey("id") || value.getString("id") == null
                            || "null".equals(value.getString("id"));
                    final String ID = newOne ? id : value.getString("id");
                    innerRepo.persist(ID, value, batch, c -> {//making the nested entity shares the same id as the parent when its 1:1 relation. This makes fetch a lot faster since it doesn't not need to resolve the reference when fetching 1:1 nested objects.
                        if (c.succeeded()) {
                            long s = stack.decrementAndGet();
                            if (newOne) {
                                batch.getMap(getStorageKey((String) k)).fastPutAsync(id, ID);//different to the update, create needs to add the reference field to batch
                            }
                            if (s == 0 && finished.get() && !failed.get()) { //finished iterating and no outstanding processes. 
                                if (redissonBatch == null) {//if it's not inside a nested process.
                                    finishPersist(id, data, batch, resultHandler);
                                } else {//if it is inside a nested process.
                                    resultHandler.handle(Future.succeededFuture(true));
                                }
                            }
                            //else wait for others to complete
                        } else {
                            boolean firstToFail = failed.compareAndSet(false, true);
                            if (firstToFail) {
                                resultHandler.handle(Future.failedFuture(c.cause()));
                            }
                        }
                    });
                }
            });
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            finished.set(true);
            if (!hasNested.get()) {//does not have nested RedissonEntity within
                if (redissonBatch == null) {//if it's not inside a nested process.
                    finishPersist(id, data, batch, resultHandler);
                } else {//if it is inside a nested process.
                    resultHandler.handle(Future.succeededFuture(true));
                }
            }
        } else {//not a RedissonEntity class, persist as json string.
            //recreate the indexes;
            batch.<String, String>getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id,
                    Json.encode(data));
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            if (redissonBatch == null) {//if it's not inside a nested process.
                finishPersist(id, data, batch, resultHandler);
            } else {//if it is inside a nested process.
                resultHandler.handle(Future.succeededFuture(true));
            }
        }
    } catch (InstantiationException | IllegalAccessException | RuntimeException ex) {
        failed.set(true);
        resultHandler.handle(Future.failedFuture(ex));
    }
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

protected boolean addReplicationCluster(String remoteCluster, PersistentTopic persistentTopic,
        ManagedCursor cursor, String localCluster) {
    AtomicBoolean isReplicatorStarted = new AtomicBoolean(true);
    replicators.computeIfAbsent(remoteCluster, r -> {
        try {//from w ww  . j av  a 2 s . c  o m
            return new PersistentReplicator(PersistentTopic.this, cursor, localCluster, remoteCluster,
                    brokerService);
        } catch (NamingException e) {
            isReplicatorStarted.set(false);
            log.error("[{}] Replicator startup failed due to partitioned-topic {}", topic, remoteCluster);
        }
        return null;
    });
    // clean up replicator if startup is failed
    if (!isReplicatorStarted.get()) {
        replicators.remove(remoteCluster);
    }
    return isReplicatorStarted.get();
}