Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

@Test
public void testConcurrentOpenCursor() throws Exception {
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testConcurrentOpenCursor");

    final AtomicReference<ManagedCursor> cursor1 = new AtomicReference<>(null);
    final AtomicReference<ManagedCursor> cursor2 = new AtomicReference<>(null);
    final CyclicBarrier barrier = new CyclicBarrier(2);
    final CountDownLatch latch = new CountDownLatch(2);

    cachedExecutor.execute(() -> {//w w  w.  ja  va 2  s . co  m
        try {
            barrier.await();
        } catch (Exception e) {
        }
        ledger.asyncOpenCursor("c1", new OpenCursorCallback() {

            @Override
            public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
                latch.countDown();
            }

            @Override
            public void openCursorComplete(ManagedCursor cursor, Object ctx) {
                cursor1.set(cursor);
                latch.countDown();
            }
        }, null);
    });

    cachedExecutor.execute(() -> {
        try {
            barrier.await();
        } catch (Exception e) {
        }
        ledger.asyncOpenCursor("c1", new OpenCursorCallback() {

            @Override
            public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
                latch.countDown();
            }

            @Override
            public void openCursorComplete(ManagedCursor cursor, Object ctx) {
                cursor2.set(cursor);
                latch.countDown();
            }
        }, null);
    });

    latch.await();
    assertNotNull(cursor1.get());
    assertNotNull(cursor2.get());
    assertEquals(cursor1.get(), cursor2.get());

    ledger.close();
}

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

@Override
public boolean delete(ConcreteResource resource) throws IOException {
    final File nfsFile = getNFSDetachedFile(resource);
    final String pathKey = getKeyForPath(nfsFile.getCanonicalPath());

    final AtomicReference<Exception> taskException = new AtomicReference<>();
    final Boolean deleteResult = tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS,
            r -> {//from  ww w . ja  va 2s  .c o  m
                boolean localDeleted = false;
                try {
                    // must make sure the local file is not in reading/writing status
                    if (!plCacheProvider.isWriteLocked(resource) && !plCacheProvider.isReadLocked(resource)) {
                        logger.debug("[galley] Local cache file is not locked, will be deleted now.");
                        localDeleted = plCacheProvider.delete(resource);
                    } else {
                        logger.warn(
                                "Resource {} is locked by other threads for waiting and writing, can not be deleted now",
                                resource);
                    }
                    if (!localDeleted) {
                        // if local deletion not success, no need to delete NFS to keep data consistency
                        logger.info("local file deletion failed for {}", resource);
                        return false;
                    }
                    lockByISPN(nfsOwnerCache, resource, LockLevel.delete);
                    nfsOwnerCache.remove(pathKey);
                    final boolean nfsDeleted = nfsFile.delete();
                    if (!nfsDeleted) {
                        logger.info("nfs file deletion failed for {}", nfsFile);
                    }
                    return nfsDeleted;
                } catch (NotSupportedException | SystemException | InterruptedException e) {
                    final String errorMsg = String
                            .format("[galley] Cache TransactionManager got error, locking key is %s", pathKey);
                    logger.error(errorMsg, e);
                    taskException.set(e);
                } catch (IOException e) {
                    taskException.set(e);
                } finally {
                    if (localDeleted) {
                        logger.info(
                                "Local file deleted and ISPN lock started for {}, need to release ISPN lock",
                                resource);
                        unlockByISPN(nfsOwnerCache, false, resource);
                        localFileCache.remove(resource.getPath());
                    }
                }
                return false;
            });

    propagateException(taskException.get());

    return deleteResult == null ? false : deleteResult;
}

From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java

protected Status replicateLogs(ClientContext peerContext, final HostAndPort peerTserver,
        final ReplicationTarget target, final Path p, final Status status, final long sizeLimit,
        final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper,
        final UserGroupInformation accumuloUgi)
        throws TTransportException, AccumuloException, AccumuloSecurityException {

    log.debug("Replication WAL to peer tserver");
    final Set<Integer> tids;
    final DataInputStream input;
    Span span = Trace.start("Read WAL header");
    span.data("file", p.toString());
    try {//from  w ww.  j a  v a  2  s . co  m
        input = getWalStream(p);
    } catch (LogHeaderIncompleteException e) {
        log.warn(
                "Could not read header from {}, assuming that there is no data present in the WAL, therefore replication is complete",
                p);
        Status newStatus;
        // Bump up the begin to the (infinite) end, trying to be accurate
        if (status.getInfiniteEnd()) {
            newStatus = Status.newBuilder(status).setBegin(Long.MAX_VALUE).build();
        } else {
            newStatus = Status.newBuilder(status).setBegin(status.getEnd()).build();
        }
        span = Trace.start("Update replication table");
        try {
            helper.recordNewStatus(p, newStatus, target);
        } catch (TableNotFoundException tnfe) {
            log.error("Tried to update status in replication table for {} as {}, but the table did not exist",
                    p, ProtobufUtil.toString(newStatus), e);
            throw new RuntimeException("Replication table did not exist, will retry", e);
        } finally {
            span.stop();
        }
        return newStatus;
    } catch (IOException e) {
        log.error("Could not create stream for WAL", e);
        // No data sent (bytes nor records) and no progress made
        return status;
    } finally {
        span.stop();
    }

    log.debug("Skipping unwanted data in WAL");
    span = Trace.start("Consume WAL prefix");
    span.data("file", p.toString());
    try {
        // We want to read all records in the WAL up to the "begin" offset contained in the Status message,
        // building a Set of tids from DEFINE_TABLET events which correspond to table ids for future mutations
        tids = consumeWalPrefix(target, input, p, status, sizeLimit);
    } catch (IOException e) {
        log.warn("Unexpected error consuming file.");
        return status;
    } finally {
        span.stop();
    }

    log.debug("Sending batches of data to peer tserver");

    Status lastStatus = status, currentStatus = status;
    final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
    while (true) {
        // Set some trace info
        span = Trace.start("Replicate WAL batch");
        span.data("Batch size (bytes)", Long.toString(sizeLimit));
        span.data("File", p.toString());
        span.data("Peer instance name", peerContext.getInstance().getInstanceName());
        span.data("Peer tserver", peerTserver.toString());
        span.data("Remote table ID", remoteTableId);

        ReplicationStats replResult;
        try {
            // Read and send a batch of mutations
            replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver,
                    new WalClientExecReturn(target, input, p, currentStatus, sizeLimit, remoteTableId, tcreds,
                            tids));
        } catch (Exception e) {
            log.error("Caught exception replicating data to {} at {}",
                    peerContext.getInstance().getInstanceName(), peerTserver, e);
            throw e;
        } finally {
            span.stop();
        }

        // Catch the overflow
        long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
        if (newBegin < 0) {
            newBegin = Long.MAX_VALUE;
        }

        currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();

        log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target,
                ProtobufUtil.toString(currentStatus));

        // If we got a different status
        if (!currentStatus.equals(lastStatus)) {
            span = Trace.start("Update replication table");
            try {
                if (null != accumuloUgi) {
                    final Status copy = currentStatus;
                    accumuloUgi.doAs(new PrivilegedAction<Void>() {
                        @Override
                        public Void run() {
                            try {
                                helper.recordNewStatus(p, copy, target);
                            } catch (Exception e) {
                                exceptionRef.set(e);
                            }
                            return null;
                        }
                    });
                    Exception e = exceptionRef.get();
                    if (null != e) {
                        if (e instanceof TableNotFoundException) {
                            throw (TableNotFoundException) e;
                        } else if (e instanceof AccumuloSecurityException) {
                            throw (AccumuloSecurityException) e;
                        } else if (e instanceof AccumuloException) {
                            throw (AccumuloException) e;
                        } else {
                            throw new RuntimeException("Received unexpected exception", e);
                        }
                    }
                } else {
                    helper.recordNewStatus(p, currentStatus, target);
                }
            } catch (TableNotFoundException e) {
                log.error(
                        "Tried to update status in replication table for {} as {}, but the table did not exist",
                        p, ProtobufUtil.toString(currentStatus), e);
                throw new RuntimeException("Replication table did not exist, will retry", e);
            } finally {
                span.stop();
            }

            log.debug("Recorded updated status for {}: {}", p, ProtobufUtil.toString(currentStatus));

            // If we don't have any more work, just quit
            if (!StatusUtil.isWorkRequired(currentStatus)) {
                return currentStatus;
            } else {
                // Otherwise, let it loop and replicate some more data
                lastStatus = currentStatus;
            }
        } else {
            log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target,
                    ProtobufUtil.toString(lastStatus));

            // otherwise, we didn't actually replicate (likely because there was error sending the data)
            // we can just not record any updates, and it will be picked up again by the work assigner
            return status;
        }
    }
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests that the source can be properly canceled when reading full partitions. 
 *//*from   ww w. jav  a  2  s.  co m*/
public void runCancelingOnFullInputTest() throws Exception {
    final String topic = "cancelingOnFullTopic";

    final int parallelism = 3;
    createTestTopic(topic, parallelism, 1);

    // launch a producer thread
    DataGenerators.InfiniteStringsGenerator generator = new DataGenerators.InfiniteStringsGenerator(kafkaServer,
            topic);
    generator.start();

    // launch a consumer asynchronously

    final AtomicReference<Throwable> jobError = new AtomicReference<>();

    final Runnable jobRunner = new Runnable() {
        @Override
        public void run() {
            try {
                final StreamExecutionEnvironment env = StreamExecutionEnvironment
                        .createRemoteEnvironment("localhost", flinkPort);
                env.setParallelism(parallelism);
                env.enableCheckpointing(100);
                env.getConfig().disableSysoutLogging();

                Properties props = new Properties();
                props.putAll(standardProps);
                props.putAll(secureProps);
                FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(),
                        props);

                env.addSource(source).addSink(new DiscardingSink<String>());

                env.execute("Runner for CancelingOnFullInputTest");
            } catch (Throwable t) {
                jobError.set(t);
            }
        }
    };

    Thread runnerThread = new Thread(jobRunner, "program runner thread");
    runnerThread.start();

    // wait a bit before canceling
    Thread.sleep(2000);

    Throwable failueCause = jobError.get();
    if (failueCause != null) {
        failueCause.printStackTrace();
        Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
    }

    // cancel
    JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout),
            "Runner for CancelingOnFullInputTest");

    // wait for the program to be done and validate that we failed with the right exception
    runnerThread.join();

    failueCause = jobError.get();
    assertNotNull("program did not fail properly due to canceling", failueCause);
    assertTrue(failueCause.getMessage().contains("Job was cancelled"));

    if (generator.isAlive()) {
        generator.shutdown();
        generator.join();
    } else {
        Throwable t = generator.getError();
        if (t != null) {
            t.printStackTrace();
            fail("Generator failed: " + t.getMessage());
        } else {
            fail("Generator failed with no exception");
        }
    }

    deleteTestTopic(topic);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests that the source can be properly canceled when reading empty partitions. 
 *///from   ww  w  .  j  ava  2 s  .  c o  m
public void runCancelingOnEmptyInputTest() throws Exception {
    final String topic = "cancelingOnEmptyInputTopic";

    final int parallelism = 3;
    createTestTopic(topic, parallelism, 1);

    final AtomicReference<Throwable> error = new AtomicReference<>();

    final Runnable jobRunner = new Runnable() {
        @Override
        public void run() {
            try {
                final StreamExecutionEnvironment env = StreamExecutionEnvironment
                        .createRemoteEnvironment("localhost", flinkPort);
                env.setParallelism(parallelism);
                env.enableCheckpointing(100);
                env.getConfig().disableSysoutLogging();

                Properties props = new Properties();
                props.putAll(standardProps);
                props.putAll(secureProps);
                FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(),
                        props);

                env.addSource(source).addSink(new DiscardingSink<String>());

                env.execute("CancelingOnEmptyInputTest");
            } catch (Throwable t) {
                LOG.error("Job Runner failed with exception", t);
                error.set(t);
            }
        }
    };

    Thread runnerThread = new Thread(jobRunner, "program runner thread");
    runnerThread.start();

    // wait a bit before canceling
    Thread.sleep(2000);

    Throwable failueCause = error.get();
    if (failueCause != null) {
        failueCause.printStackTrace();
        Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
    }
    // cancel
    JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout));

    // wait for the program to be done and validate that we failed with the right exception
    runnerThread.join();

    failueCause = error.get();
    assertNotNull("program did not fail properly due to canceling", failueCause);
    assertTrue(failueCause.getMessage().contains("Job was cancelled"));

    deleteTestTopic(topic);
}

From source file:org.eclipse.equinox.http.servlet.tests.ServletTest.java

public void testServletContextUnsupportedOperations() {
    final AtomicReference<ServletContext> contextHolder = new AtomicReference<ServletContext>();
    Servlet unsupportedServlet = new HttpServlet() {
        private static final long serialVersionUID = 1L;

        @Override//from w  w  w .  j  a  v a  2s.  c om
        public void init(ServletConfig config) throws ServletException {
            contextHolder.set(config.getServletContext());
        }
    };

    ServiceRegistration<Servlet> servletReg = null;
    Dictionary<String, Object> servletProps = new Hashtable<String, Object>();
    servletProps.put(HttpWhiteboardConstants.HTTP_WHITEBOARD_SERVLET_PATTERN, "/sessions");
    try {
        servletReg = getBundleContext().registerService(Servlet.class, unsupportedServlet, servletProps);
    } catch (Exception e) {
        fail("Unexpected exception: " + e);
    } finally {
        if (servletReg != null) {
            servletReg.unregister();
        }
    }
    ServletContext context = contextHolder.get();
    assertNotNull("Null context.", context);
    for (Method m : getUnsupportedMethods()) {
        checkMethod(m, context);
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

/**
 * Delete this ManagedLedger completely from the system.
 *
 * @throws Exception/*from   w ww . j a  va  2 s  .  c  om*/
 */
@Override
public void delete() throws InterruptedException, ManagedLedgerException {
    final CountDownLatch counter = new CountDownLatch(1);
    final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();

    asyncDelete(new DeleteLedgerCallback() {
        @Override
        public void deleteLedgerComplete(Object ctx) {
            counter.countDown();
        }

        @Override
        public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) {
            exception.set(e);
            counter.countDown();
        }

    }, null);

    if (!counter.await(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
        throw new ManagedLedgerException("Timeout during managed ledger delete operation");
    }

    if (exception.get() != null) {
        log.error("[{}] Error deleting managed ledger", name, exception.get());
        throw exception.get();
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

private static GetOperation[] sendToServer(final AtomicReference<Failure[]> failures,
        final AtomicBoolean onlineOperationRequired) {
    failures.set(new Failure[0]);
    onlineOperationRequired.set(true);//from  ww w.j a  v  a2  s  . c  om
    return null;
}

From source file:de.schildbach.pte.AbstractEfaProvider.java

protected QueryTripsResult queryMoreTripsMobile(final QueryTripsContext contextObj, final boolean later)
        throws IOException {
    final Context context = (Context) contextObj;
    final HttpUrl commandUrl = HttpUrl.parse(context.context);
    final HttpUrl.Builder url = commandUrl.newBuilder();
    url.addEncodedQueryParameter("command", later ? "tripNext" : "tripPrev");
    final AtomicReference<QueryTripsResult> result = new AtomicReference<>();

    final HttpClient.Callback callback = new HttpClient.Callback() {
        @Override//w  w  w. jav a 2 s. co  m
        public void onSuccessful(final CharSequence bodyPeek, final ResponseBody body) throws IOException {
            try {
                result.set(queryTripsMobile(url.build(), null, null, null, body.byteStream()));
            } catch (final XmlPullParserException x) {
                throw new ParserException("cannot parse xml: " + bodyPeek, x);
            } catch (final RuntimeException x) {
                throw new RuntimeException("uncategorized problem while processing " + url, x);
            }
        }
    };

    httpClient.getInputStream(callback, url.build(), httpRefererTrip);

    return result.get();
}

From source file:de.schildbach.pte.AbstractEfaProvider.java

@Override
public QueryTripsResult queryMoreTrips(final QueryTripsContext contextObj, final boolean later)
        throws IOException {
    final Context context = (Context) contextObj;
    final HttpUrl commandUrl = HttpUrl.parse(context.context);
    final HttpUrl.Builder url = commandUrl.newBuilder();
    url.addEncodedQueryParameter("command", later ? "tripNext" : "tripPrev");
    final AtomicReference<QueryTripsResult> result = new AtomicReference<>();

    final HttpClient.Callback callback = new HttpClient.Callback() {
        @Override/*from w w  w .  ja v a  2 s  .  c  o m*/
        public void onSuccessful(final CharSequence bodyPeek, final ResponseBody body) throws IOException {
            try {
                result.set(queryTrips(url.build(), body.byteStream()));
            } catch (final XmlPullParserException x) {
                throw new ParserException("cannot parse xml: " + bodyPeek, x);
            } catch (final RuntimeException x) {
                throw new RuntimeException("uncategorized problem while processing " + url, x);
            }
        }
    };

    httpClient.getInputStream(callback, url.build(), httpRefererTrip);

    return result.get();
}