Example usage for java.util.concurrent.atomic AtomicReference compareAndSet

List of usage examples for java.util.concurrent.atomic AtomicReference compareAndSet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference compareAndSet.

Prototype

public final boolean compareAndSet(V expectedValue, V newValue) 

Source Link

Document

Atomically sets the value to newValue if the current value == expectedValue , with memory effects as specified by VarHandle#compareAndSet .

Usage

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

/**
 * Async replays given positions: a. before reading it filters out already-acked messages b. reads remaining entries
 * async and gives it to given ReadEntriesCallback c. returns all already-acked messages which are not replayed so,
 * those messages can be removed by caller(Dispatcher)'s replay-list and it won't try to replay it again
 *
 *//*from www  .  j  av a2s.  c  o  m*/
@Override
public Set<? extends Position> asyncReplayEntries(final Set<? extends Position> positions,
        ReadEntriesCallback callback, Object ctx) {
    List<Entry> entries = Lists.newArrayListWithExpectedSize(positions.size());
    if (positions.isEmpty()) {
        callback.readEntriesComplete(entries, ctx);
    }

    // filters out messages which are already acknowledged
    Set<Position> alreadyAcknowledgedPositions = Sets.newHashSet();
    lock.readLock().lock();
    try {
        positions.stream()
                .filter(position -> individualDeletedMessages.contains((PositionImpl) position)
                        || ((PositionImpl) position).compareTo(markDeletePosition) < 0)
                .forEach(alreadyAcknowledgedPositions::add);
    } finally {
        lock.readLock().unlock();
    }

    final int totalValidPositions = positions.size() - alreadyAcknowledgedPositions.size();
    final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();
    ReadEntryCallback cb = new ReadEntryCallback() {
        int pendingCallbacks = totalValidPositions;

        @Override
        public synchronized void readEntryComplete(Entry entry, Object ctx) {
            if (exception.get() != null) {
                // if there is already a failure for a different position, we should release the entry straight away
                // and not add it to the list
                entry.release();
                if (--pendingCallbacks == 0) {
                    callback.readEntriesFailed(exception.get(), ctx);
                }
            } else {
                entries.add(entry);
                if (--pendingCallbacks == 0) {
                    callback.readEntriesComplete(entries, ctx);
                }
            }
        }

        @Override
        public synchronized void readEntryFailed(ManagedLedgerException mle, Object ctx) {
            log.warn("[{}][{}] Error while replaying entries", ledger.getName(), name, mle);
            if (exception.compareAndSet(null, mle)) {
                // release the entries just once, any further read success will release the entry straight away
                entries.forEach(Entry::release);
            }
            if (--pendingCallbacks == 0) {
                callback.readEntriesFailed(exception.get(), ctx);
            }
        }
    };

    positions.stream().filter(position -> !alreadyAcknowledgedPositions.contains(position))
            .forEach(p -> ledger.asyncReadEntry((PositionImpl) p, cb, ctx));

    return alreadyAcknowledgedPositions;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

@Override
public void asyncDelete(final DeleteLedgerCallback callback, final Object ctx) {
    // Delete the managed ledger without closing, since we are not interested in gracefully closing cursors and
    // ledgers/*from   w ww . ja v a2 s.c  o  m*/
    STATE_UPDATER.set(this, State.Fenced);

    List<ManagedCursor> cursors = Lists.newArrayList(this.cursors);
    if (cursors.isEmpty()) {
        // No cursors to delete, proceed with next step
        deleteAllLedgers(callback, ctx);
        return;
    }

    AtomicReference<ManagedLedgerException> cursorDeleteException = new AtomicReference<>();
    AtomicInteger cursorsToDelete = new AtomicInteger(cursors.size());
    for (ManagedCursor cursor : cursors) {
        asyncDeleteCursor(cursor.getName(), new DeleteCursorCallback() {
            @Override
            public void deleteCursorComplete(Object ctx) {
                if (cursorsToDelete.decrementAndGet() == 0) {
                    if (cursorDeleteException.get() != null) {
                        // Some cursor failed to delete
                        callback.deleteLedgerFailed(cursorDeleteException.get(), ctx);
                        return;
                    }

                    // All cursors deleted, continue with deleting all ledgers
                    deleteAllLedgers(callback, ctx);
                }
            }

            @Override
            public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
                log.warn("[{}] Failed to delete cursor {}", name, cursor, exception);
                cursorDeleteException.compareAndSet(null, exception);
                if (cursorsToDelete.decrementAndGet() == 0) {
                    // Trigger callback only once
                    callback.deleteLedgerFailed(exception, ctx);
                }
            }
        }, null);
    }
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference<TableName> tableName,
        CompletableFuture<TableName> result) {
    addListener(getRegionLocation(encodeRegionName), (location, err) -> {
        if (err != null) {
            result.completeExceptionally(err);
            return;
        }/*  ww w .j  a  v  a2s .  c  o m*/
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            result.completeExceptionally(
                    new IllegalArgumentException("Can't invoke merge on non-default regions directly"));
            return;
        }
        if (!tableName.compareAndSet(null, regionInfo.getTable())) {
            if (!tableName.get().equals(regionInfo.getTable())) {
                // tables of this two region should be same.
                result.completeExceptionally(
                        new IllegalArgumentException("Cannot merge regions from two different tables "
                                + tableName.get() + " and " + regionInfo.getTable()));
            } else {
                result.complete(tableName.get());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.regionserver.TestRegionReplicaFailover.java

/**
 * Tests the case where there are 3 region replicas and the primary is continuously accepting
 * new writes while one of the secondaries is killed. Verification is done for both of the
 * secondary replicas./*  www.  jav a  2s. c  om*/
 */
@Test(timeout = 120000)
public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception {
    try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
            Table table = connection.getTable(htd.getTableName());
            Admin admin = connection.getAdmin()) {
        // start a thread to do the loading of primary
        HTU.loadNumericRows(table, fam, 0, 1000); // start with some base
        admin.flush(table.getName());
        HTU.loadNumericRows(table, fam, 1000, 2000);

        final AtomicReference<Throwable> ex = new AtomicReference<Throwable>(null);
        final AtomicBoolean done = new AtomicBoolean(false);
        final AtomicInteger key = new AtomicInteger(2000);

        Thread loader = new Thread() {
            @Override
            public void run() {
                while (!done.get()) {
                    try {
                        HTU.loadNumericRows(table, fam, key.get(), key.get() + 1000);
                        key.addAndGet(1000);
                    } catch (Throwable e) {
                        ex.compareAndSet(null, e);
                    }
                }
            }
        };
        loader.start();

        Thread aborter = new Thread() {
            @Override
            public void run() {
                try {
                    boolean aborted = false;
                    for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
                        for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
                            if (r.getRegionInfo().getReplicaId() == 1) {
                                LOG.info("Aborting region server hosting secondary region replica");
                                rs.getRegionServer().abort("for test");
                                aborted = true;
                            }
                        }
                    }
                    assertTrue(aborted);
                } catch (Throwable e) {
                    ex.compareAndSet(null, e);
                }
            };
        };

        aborter.start();
        aborter.join();
        done.set(true);
        loader.join();

        assertNull(ex.get());

        assertTrue(key.get() > 1000); // assert that the test is working as designed
        LOG.info("Loaded up to key :" + key.get());
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 0, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 1, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 2, 30000);
    }

    // restart the region server
    HTU.getMiniHBaseCluster().startRegionServer();
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestBlockReportRateLimiting.java

private static void setFailure(AtomicReference<String> failure, String what) {
    failure.compareAndSet("", what);
    LOG.error("Test error: " + what);
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestBlockReportRateLimiting.java

/**
 * Start a 2-node cluster with only one block report lease.  When the
 * first datanode gets a lease, kill it.  Then wait for the lease to
 * expire, and the second datanode to send a full block report.
 *///from   w  ww . j ava  2 s .  c  om
@Test(timeout = 180000)
public void testLeaseExpiration() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
    conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 100L);

    final Semaphore gotFbrSem = new Semaphore(0);
    final AtomicReference<String> failure = new AtomicReference<>();
    final AtomicReference<MiniDFSCluster> cluster = new AtomicReference<>();
    final AtomicReference<String> datanodeToStop = new AtomicReference<>();
    final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {

        @Override
        public void incomingBlockReportRpc(DatanodeID nodeID, BlockReportContext context) throws IOException {
            if (context.getLeaseId() == 0) {
                setFailure(failure,
                        "Got unexpected rate-limiting-" + "bypassing full block report RPC from " + nodeID);
            }
            if (nodeID.getXferAddr().equals(datanodeToStop.get())) {
                throw new IOException("Injecting failure into block " + "report RPC for " + nodeID);
            }
            gotFbrSem.release();
        }

        @Override
        public void requestBlockReportLease(DatanodeDescriptor node, long leaseId) {
            if (leaseId == 0) {
                return;
            }
            datanodeToStop.compareAndSet(null, node.getXferAddr());
        }

        @Override
        public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
        }
    };
    try {
        BlockManagerFaultInjector.instance = injector;
        cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
        cluster.get().waitActive();
        Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
        gotFbrSem.acquire();
        Assert.assertNull(failure.get());
    } finally {
        if (cluster.get() != null) {
            cluster.get().shutdown();
        }
    }
}

From source file:org.apache.hadoop.hdfs.TestDFSOpsCountStatistics.java

@Test
public void testCurrentAccess() throws InterruptedException {
    final int numThreads = 10;
    final ExecutorService threadPool = newFixedThreadPool(numThreads);

    try {/*from   ww  w. j  a  va2  s  .c om*/
        final CountDownLatch allReady = new CountDownLatch(numThreads);
        final CountDownLatch startBlocker = new CountDownLatch(1);
        final CountDownLatch allDone = new CountDownLatch(numThreads);
        final AtomicReference<Throwable> childError = new AtomicReference<>();

        for (int i = 0; i < numThreads; i++) {
            threadPool.submit(new Runnable() {
                @Override
                public void run() {
                    allReady.countDown();
                    try {
                        startBlocker.await();
                        incrementOpsCountByRandomNumbers();
                    } catch (Throwable t) {
                        LOG.error("Child failed when calling mkdir", t);
                        childError.compareAndSet(null, t);
                    } finally {
                        allDone.countDown();
                    }
                }
            });
        }

        allReady.await(); // wait until all threads are ready
        startBlocker.countDown(); // all threads start making directories
        allDone.await(); // wait until all threads are done

        assertNull("Child failed with exception.", childError.get());
        verifyStatistics();
    } finally {
        threadPool.shutdownNow();
    }
}

From source file:org.apache.hadoop.mapred.TestJvmManager.java

/**
 * Create a bunch of tasks and use a special hash map to detect
 * racy access to the various internal data structures of JvmManager.
 * (Regression test for MAPREDUCE-2224)//from  w w  w  .jav a2 s  .c  o m
 */
@Test
public void testForRaces() throws Exception {
    JvmManagerForType mapJvmManager = jvmManager.getJvmManagerForType(TaskType.MAP);

    // Sub out the HashMaps for maps that will detect racy access.
    mapJvmManager.jvmToRunningTask = new RaceHashMap<JVMId, TaskRunner>();
    mapJvmManager.runningTaskToJvm = new RaceHashMap<TaskRunner, JVMId>();
    mapJvmManager.jvmIdToRunner = new RaceHashMap<JVMId, JvmRunner>();

    // Launch a bunch of JVMs, but only allow MAP_SLOTS to run at once.
    final ExecutorService exec = Executors.newFixedThreadPool(MAP_SLOTS);
    final AtomicReference<Throwable> failed = new AtomicReference<Throwable>();

    for (int i = 0; i < MAP_SLOTS * 5; i++) {
        JobConf taskConf = new JobConf(ttConf);
        TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0);
        Task task = new MapTask(null, attemptID, i, null, 1);
        task.setConf(taskConf);
        TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
        File pidFile = new File(TEST_DIR, "pid_" + i);
        final TaskRunner taskRunner = task.createRunner(tt, tip);
        // launch a jvm which sleeps for 60 seconds
        final Vector<String> vargs = new Vector<String>(2);
        vargs.add(writeScript("script_" + i, "echo hi\n", pidFile).getAbsolutePath());
        final File workDir = new File(TEST_DIR, "work_" + i);
        workDir.mkdir();
        final File stdout = new File(TEST_DIR, "stdout_" + i);
        final File stderr = new File(TEST_DIR, "stderr_" + i);

        // launch the process and wait in a thread, till it finishes
        Runnable launcher = new Runnable() {
            public void run() {
                try {
                    taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100, workDir, null);
                } catch (Throwable t) {
                    failed.compareAndSet(null, t);
                    exec.shutdownNow();
                    return;
                }
            }
        };
        exec.submit(launcher);
    }

    exec.shutdown();
    exec.awaitTermination(3, TimeUnit.MINUTES);
    if (failed.get() != null) {
        throw new RuntimeException(failed.get());
    }
}

From source file:org.jmingo.query.QueryManager.java

/**
 * The entire method invocation is performed atomically.
 * Any attempts to perform update operations on {@link #queries} by other threads
 * may be blocked while reloading is in progress, so the
 * reloading logic shouldn't take a much time.
 * This method uses optimistic concurrency control thus if during reloading the query set by specified path was
 * changed again then current reloading process will be canceled and necessary message will be showed.
 *//* w ww .  j  a v  a 2 s  .c om*/
private void reload(Path path) {
    LOGGER.debug("reload query set: {}", path);
    AtomicReference<QuerySet> currentQuerySetRef = getQuerySetRef(path);
    QuerySet currQuerySet = currentQuerySetRef.get();
    String checksum = FileUtils.checksum(path.toFile());
    if (StringUtils.equals(checksum, currQuerySet.getChecksum())) {
        LOGGER.debug("query set: {} was edited but wasn't changed, content remains the same", path);
        return;
    }
    QuerySet newQuerySet = loadQuerySet(path);
    if (newQuerySet.getQueries().size() > currQuerySet.getQueries().size()) {
        LOGGER.warn(
                "{} queries was added in query set: {}. "
                        + "Operations 'add' and 'remove' on query set aren't supported.",
                newQuerySet.getQueries().size() - currQuerySet.getQueries().size(), path);
    }

    if (newQuerySet.getQueries().size() < currQuerySet.getQueries().size()) {
        LOGGER.warn(
                "{} queries was removed from query set: {}. "
                        + "Operations 'add' and 'remove' on query set aren't supported.",
                currQuerySet.getQueries().size() - newQuerySet.getQueries().size(), path);
    }

    if (currentQuerySetRef.compareAndSet(currQuerySet, newQuerySet)) {
        for (Query updatedQuery : newQuerySet.getQueries()) {
            String compositeId = QueryUtils.buildCompositeId(newQuerySet.getCollectionName(),
                    updatedQuery.getId());
            queries.computeIfPresent(compositeId, (key, currentQuery) -> {
                LOGGER.debug("query with composite id:'{}' was refreshed. query set: '{}'", compositeId,
                        newQuerySet.getPath());
                return updatedQuery;
            });
        }
        LOGGER.debug("query set: {} was successfully reloaded", path);
    } else {
        LOGGER.error("query set with path: {} was changed by someone before the actual update operation ended, "
                + "please refresh file {} and try to edit this query set again", path, path);
    }
}

From source file:org.springframework.data.gemfire.IndexFactoryBeanTest.java

@Test
public void defineMultipleIndexesWithSeparateIndexFactoryBeansSameSpringContext() throws Exception {

    ConfigurableBeanFactory mockBeanFactory = mock(ConfigurableBeanFactory.class,
            "testDefineMultipleIndexesWithSeparateIndexFactoryBeansSameSpringContext.MockBeanFactory");

    Cache mockCacheOne = mock(Cache.class,
            "testDefineMultipleIndexesWithSeparateIndexFactoryBeansSameSpringContext.MockCacheOne");

    Cache mockCacheTwo = mock(Cache.class,
            "testDefineMultipleIndexesWithSeparateIndexFactoryBeansSameSpringContext.MockCacheTwo");

    AtomicReference<QueryService> queryServiceReference = new AtomicReference<>(null);

    doAnswer(invocation -> (queryServiceReference.get() != null)).when(mockBeanFactory)
            .containsBean(eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE));

    doAnswer(invocation -> queryServiceReference.get()).when(mockBeanFactory).getBean(
            eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE), eq(QueryService.class));

    doAnswer(invocation -> {//from   ww  w . java  2s . c  o m

        assertEquals(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE,
                invocation.getArgument(0));

        queryServiceReference.compareAndSet(null, invocation.getArgument(1));

        return null;
    }).when(mockBeanFactory).registerSingleton(
            eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE), any(QueryService.class));

    when(mockCacheOne.getQueryService()).thenReturn(mockQueryService);

    IndexFactoryBean indexFactoryBeanOne = new IndexFactoryBean();

    indexFactoryBeanOne.setBeanFactory(mockBeanFactory);
    indexFactoryBeanOne.setCache(mockCacheOne);
    indexFactoryBeanOne.setDefine(true);
    indexFactoryBeanOne.setExpression("id");
    indexFactoryBeanOne.setFrom("/People");
    indexFactoryBeanOne.setName("PersonIdIndex");
    indexFactoryBeanOne.setType("Key");
    indexFactoryBeanOne.afterPropertiesSet();

    IndexFactoryBean indexFactoryBeanTwo = new IndexFactoryBean();

    indexFactoryBeanTwo.setBeanFactory(mockBeanFactory);
    indexFactoryBeanTwo.setCache(mockCacheTwo);
    indexFactoryBeanTwo.setDefine(true);
    indexFactoryBeanTwo.setExpression("purchaseDate");
    indexFactoryBeanTwo.setFrom("/Orders");
    indexFactoryBeanTwo.setImports("org.example.Order");
    indexFactoryBeanTwo.setName("PurchaseDateIndex");
    indexFactoryBeanTwo.setType("HASH");
    indexFactoryBeanTwo.afterPropertiesSet();

    verify(mockBeanFactory, times(2))
            .containsBean(eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE));

    verify(mockBeanFactory, times(1)).getBean(
            eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE), eq(QueryService.class));

    verify(mockBeanFactory, times(1)).registerSingleton(
            eq(GemfireConstants.DEFAULT_GEMFIRE_INDEX_DEFINITION_QUERY_SERVICE), same(mockQueryService));

    verify(mockCacheOne, times(1)).getQueryService();
    verify(mockCacheTwo, never()).getQueryService();

    verify(mockQueryService, times(1)).defineKeyIndex(eq("PersonIdIndex"), eq("id"), eq("/People"));

    verify(mockQueryService, times(1)).defineHashIndex(eq("PurchaseDateIndex"), eq("purchaseDate"),
            eq("/Orders"), eq("org.example.Order"));
}