Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

@Override
public void waitForReadUnlock(ConcreteResource resource) {
    //To avoid deadlock, here use a lock with timeout, if timeout happened, will throw exception
    try {//from   ww  w  .  j a  v a 2s.c  om
        final AtomicReference<IOException> taskException = new AtomicReference<>();
        tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS, r -> {
            plCacheProvider.waitForReadUnlock(resource);
            try {
                waitForISPNLock(resource, isReadLocked(resource), DEFAULT_WAIT_FOR_TRANSFER_LOCK_MILLIS);
            } catch (IOException e) {
                taskException.set(e);
            }
            return null;
        });
        propagateException(taskException.get());
    } catch (IOException e) {
        final String errorMsg = String.format(
                "[galley] When wait for read lock of resource: %s, got I/O error.", resource.toString());
        logger.error(errorMsg, e);
        throw new IllegalStateException(errorMsg, e);
    }
}

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

@Override
public void waitForWriteUnlock(ConcreteResource resource) {
    //To avoid deadlock, here use a lock with timeout, if timeout happened, will throw exception
    try {/*from w w  w  .  j  a v a2 s  . co m*/
        final AtomicReference<IOException> taskException = new AtomicReference<>();
        tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS, r -> {
            plCacheProvider.waitForWriteUnlock(resource);
            try {
                waitForISPNLock(resource, isWriteLocked(resource), DEFAULT_WAIT_FOR_TRANSFER_LOCK_MILLIS);
            } catch (IOException e) {
                taskException.set(e);
            }
            return null;
        });
        propagateException(taskException.get());
    } catch (IOException e) {
        final String errorMsg = String.format(
                "[galley] When wait for read lock of resource: %s, got I/O error.", resource.toString());
        logger.error(errorMsg, e);
        throw new IllegalStateException(errorMsg, e);
    }
}

From source file:org.elasticsearch.smoketest.SmokeTestWatcherTestSuiteIT.java

private ObjectPath getWatchHistoryEntry(String watchId) throws Exception {
    final AtomicReference<ObjectPath> objectPathReference = new AtomicReference<>();
    assertBusy(() -> {//from w  w  w  . ja va 2  s.com
        client().performRequest("POST", ".watcher-history-*/_refresh");

        try (XContentBuilder builder = jsonBuilder()) {
            builder.startObject();
            builder.startObject("query").startObject("bool").startArray("must");
            builder.startObject().startObject("term").startObject("watch_id").field("value", watchId)
                    .endObject().endObject().endObject();
            builder.endArray().endObject().endObject();
            builder.startArray("sort").startObject().startObject("trigger_event.triggered_time")
                    .field("order", "desc").endObject().endObject().endArray();
            builder.endObject();

            StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
            Response response = client().performRequest("POST", ".watcher-history-*/_search", emptyMap(),
                    entity);
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            int totalHits = objectPath.evaluate("hits.total");
            assertThat(totalHits, is(greaterThanOrEqualTo(1)));
            String watchid = objectPath.evaluate("hits.hits.0._source.watch_id");
            assertThat(watchid, is(watchId));
            objectPathReference.set(objectPath);
        }
    });
    return objectPathReference.get();
}

From source file:org.elasticsearch.xpack.ml.integration.MlJobIT.java

public void testDelete_multipleRequest() throws Exception {
    String jobId = "delete-job-mulitple-times";
    createFarequoteJob(jobId);/*from  w  w w.  j a  va 2 s .com*/

    ConcurrentMapLong<Response> responses = ConcurrentCollections.newConcurrentMapLong();
    ConcurrentMapLong<ResponseException> responseExceptions = ConcurrentCollections.newConcurrentMapLong();
    AtomicReference<IOException> ioe = new AtomicReference<>();
    AtomicInteger recreationGuard = new AtomicInteger(0);
    AtomicReference<Response> recreationResponse = new AtomicReference<>();
    AtomicReference<ResponseException> recreationException = new AtomicReference<>();

    Runnable deleteJob = () -> {
        try {
            boolean forceDelete = randomBoolean();
            String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId;
            if (forceDelete) {
                url += "?force=true";
            }
            Response response = client().performRequest("delete", url);
            responses.put(Thread.currentThread().getId(), response);
        } catch (ResponseException re) {
            responseExceptions.put(Thread.currentThread().getId(), re);
        } catch (IOException e) {
            ioe.set(e);
        }

        // Immediately after the first deletion finishes, recreate the job.  This should pick up
        // race conditions where another delete request deletes part of the newly created job.
        if (recreationGuard.getAndIncrement() == 0) {
            try {
                recreationResponse.set(createFarequoteJob(jobId));
            } catch (ResponseException re) {
                recreationException.set(re);
            } catch (IOException e) {
                ioe.set(e);
            }
        }
    };

    // The idea is to hit the situation where one request waits for
    // the other to complete. This is difficult to schedule but
    // hopefully it will happen in CI
    int numThreads = 5;
    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        threads[i] = new Thread(deleteJob);
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].join();
    }

    if (ioe.get() != null) {
        // This looks redundant but the check is done so we can
        // print the exception's error message
        assertNull(ioe.get().getMessage(), ioe.get());
    }

    assertEquals(numThreads, responses.size() + responseExceptions.size());

    // 404s are ok as it means the job had already been deleted.
    for (ResponseException re : responseExceptions.values()) {
        assertEquals(re.getMessage(), 404, re.getResponse().getStatusLine().getStatusCode());
    }

    for (Response response : responses.values()) {
        assertEquals(responseEntityToString(response), 200, response.getStatusLine().getStatusCode());
    }

    assertNotNull(recreationResponse.get());
    assertEquals(responseEntityToString(recreationResponse.get()), 200,
            recreationResponse.get().getStatusLine().getStatusCode());

    if (recreationException.get() != null) {
        assertNull(recreationException.get().getMessage(), recreationException.get());
    }

    try {
        // The idea of the code above is that the deletion is sufficiently time-consuming that
        // all threads enter the deletion call before the first one exits it.  Usually this happens,
        // but in the case that it does not the job that is recreated may get deleted.
        // It is not a error if the job does not exist but the following assertions
        // will fail in that case.
        client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId);

        // Check that the job aliases exist.  These are the last thing to be deleted when a job is deleted, so
        // if there's been a race between deletion and recreation these are what will be missing.
        String aliases = getAliases();

        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}"));
        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}"));

    } catch (ResponseException missingJobException) {
        // The job does not exist
        assertThat(missingJobException.getResponse().getStatusLine().getStatusCode(), equalTo(404));

        // The job aliases should be deleted
        String aliases = getAliases();
        assertThat(aliases, not(containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}")));
        assertThat(aliases,
                not(containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}")));
    }

    assertEquals(numThreads, recreationGuard.get());
}

From source file:io.fabric8.maven.core.service.openshift.OpenshiftBuildService.java

private void waitForOpenShiftBuildToComplete(OpenShiftClient client, Build build)
        throws MojoExecutionException, InterruptedException {
    final CountDownLatch latch = new CountDownLatch(1);
    final CountDownLatch logTerminateLatch = new CountDownLatch(1);
    final String buildName = KubernetesHelper.getName(build);

    final AtomicReference<Build> buildHolder = new AtomicReference<>();

    // Don't query for logs directly, Watch over the build pod:
    waitUntilPodIsReady(buildName + "-build", 20, log);
    log.info("Waiting for build " + buildName + " to complete...");
    try (LogWatch logWatch = client.pods().withName(buildName + "-build").watchLog()) {
        KubernetesClientUtil.printLogsAsync(logWatch, "Failed to tail build log", logTerminateLatch, log);
        Watcher<Build> buildWatcher = getBuildWatcher(latch, buildName, buildHolder);
        try (Watch watcher = client.builds().withName(buildName).watch(buildWatcher)) {
            // Check if the build is already finished to avoid waiting indefinitely
            Build lastBuild = client.builds().withName(buildName).get();
            if (Builds.isFinished(KubernetesResourceUtil.getBuildStatusPhase(lastBuild))) {
                log.debug("Build %s is already finished", buildName);
                buildHolder.set(lastBuild);
                latch.countDown();/*  w  w w .j  a v a 2 s. c o m*/
            }

            waitUntilBuildFinished(latch);
            logTerminateLatch.countDown();

            build = buildHolder.get();
            if (build == null) {
                log.debug("Build watcher on %s was closed prematurely", buildName);
                build = client.builds().withName(buildName).get();
            }
            String status = KubernetesResourceUtil.getBuildStatusPhase(build);
            if (Builds.isFailed(status) || Builds.isCancelled(status)) {
                throw new MojoExecutionException("OpenShift Build " + buildName + " error: "
                        + KubernetesResourceUtil.getBuildStatusReason(build));
            }

            if (!Builds.isFinished(status)) {
                log.warn(
                        "Could not wait for the completion of build %s. It may be  may be still running (status=%s)",
                        buildName, status);
            } else {
                log.info("Build %s in status %s", buildName, status);
            }
        }
    }
}

From source file:org.killbill.billing.plugin.meter.api.user.JsonSamplesOutputer.java

private void writeJsonForStoredChunks(final JsonGenerator generator, final List<Integer> hostIdsList,
        final List<Integer> sampleKindIdsList, final DateTime startTime, final DateTime endTime)
        throws IOException {
    final AtomicReference<Integer> lastHostId = new AtomicReference<Integer>(null);
    final AtomicReference<Integer> lastSampleKindId = new AtomicReference<Integer>(null);
    final List<TimelineChunk> chunksForHostAndSampleKind = new ArrayList<TimelineChunk>();

    timelineDao.getSamplesBySourceIdsAndMetricIds(hostIdsList, sampleKindIdsList, startTime, endTime,
            new TimelineChunkConsumer() {
                @Override//w  w  w. jav a  2 s  . co m
                public void processTimelineChunk(final TimelineChunk chunks) {
                    final Integer previousHostId = lastHostId.get();
                    final Integer previousSampleKindId = lastSampleKindId.get();
                    final Integer currentHostId = chunks.getSourceId();
                    final Integer currentSampleKindId = chunks.getMetricId();

                    chunksForHostAndSampleKind.add(chunks);
                    if (previousHostId != null && (!previousHostId.equals(currentHostId)
                            || !previousSampleKindId.equals(currentSampleKindId))) {
                        try {
                            writeJsonForChunks(generator, chunksForHostAndSampleKind);
                        } catch (IOException e) {
                            throw new RuntimeException(e);
                        }
                        chunksForHostAndSampleKind.clear();
                    }

                    lastHostId.set(currentHostId);
                    lastSampleKindId.set(currentSampleKindId);
                }
            }, context);

    if (chunksForHostAndSampleKind.size() > 0) {
        writeJsonForChunks(generator, chunksForHostAndSampleKind);
        chunksForHostAndSampleKind.clear();
    }
}

From source file:com.wk.lodge.composite.web.tomcat.IntegrationCompositeTests.java

@Test
public void testInit() throws Exception {

    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();

    URI uri = new URI("ws://localhost:" + port + "/composite");
    WebSocketStompClient stompClient = new WebSocketStompClient(uri, this.headers, sockJsClient);
    stompClient.setMessageConverter(new MappingJackson2MessageConverter());

    stompClient.connect(new StompMessageHandler() {

        private StompSession stompSession;

        @Override/*w w  w .j  a  v a  2  s. c o m*/
        public void afterConnected(StompSession stompSession, StompHeaderAccessor headers) {
            this.stompSession = stompSession;
            this.stompSession.subscribe("/user/queue/device", null);

            try {
                this.stompSession.send("/app/init", "");
            } catch (Throwable t) {
                failure.set(t);
                latch.countDown();
            }
        }

        @Override
        public void handleMessage(Message<byte[]> message) {
            StompHeaderAccessor headers = StompHeaderAccessor.wrap(message);
            if (!"/user/queue/device".equals(headers.getDestination())) {
                failure.set(new IllegalStateException("Unexpected message: " + message));
            }
            logger.debug("Got " + new String((byte[]) message.getPayload()));
            try {
                String json = parseMessageJson(message);
                new JsonPathExpectationsHelper("type").assertValue(json, "init");
                new JsonPathExpectationsHelper("uuid").exists(json);
            } catch (Throwable t) {
                failure.set(t);
            } finally {
                this.stompSession.disconnect();
                latch.countDown();
            }
        }

        @Override
        public void handleError(Message<byte[]> message) {
            StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message);
            String error = "[Producer] " + accessor.getShortLogMessage(message.getPayload());
            logger.error(error);
            failure.set(new Exception(error));
        }

        @Override
        public void handleReceipt(String receiptId) {
        }

        @Override
        public void afterDisconnected() {
        }
    });

    if (!latch.await(10, TimeUnit.SECONDS)) {
        fail("Init response not received");
    } else if (failure.get() != null) {
        throw new AssertionError("", failure.get());
    }
}

From source file:it.anyplace.sync.bep.BlockPuller.java

public FileDownloadObserver pullBlocks(FileBlocks fileBlocks) throws InterruptedException {
    logger.info("pulling file = {}", fileBlocks);
    checkArgument(connectionHandler.hasFolder(fileBlocks.getFolder()),
            "supplied connection handler %s will not share folder %s", connectionHandler,
            fileBlocks.getFolder());/*  ww w.j av a 2 s .c  o m*/
    final Object lock = new Object();
    final AtomicReference<Exception> error = new AtomicReference<>();
    final Object listener = new Object() {
        @Subscribe
        public void handleResponseMessageReceivedEvent(ResponseMessageReceivedEvent event) {
            synchronized (lock) {
                try {
                    if (!requestIds.contains(event.getMessage().getId())) {
                        return;
                    }
                    checkArgument(equal(event.getMessage().getCode(), ErrorCode.NO_ERROR),
                            "received error response, code = %s", event.getMessage().getCode());
                    byte[] data = event.getMessage().getData().toByteArray();
                    String hash = BaseEncoding.base16().encode(Hashing.sha256().hashBytes(data).asBytes());
                    blockCache.pushBlock(data);
                    if (missingHashes.remove(hash)) {
                        blocksByHash.put(hash, data);
                        logger.debug("aquired block, hash = {}", hash);
                        lock.notify();
                    } else {
                        logger.warn("received not-needed block, hash = {}", hash);
                    }
                } catch (Exception ex) {
                    error.set(ex);
                    lock.notify();
                }
            }
        }
    };
    FileDownloadObserver fileDownloadObserver = new FileDownloadObserver() {

        private long getReceivedData() {
            return blocksByHash.size() * BLOCK_SIZE;
        }

        private long getTotalData() {
            return (blocksByHash.size() + missingHashes.size()) * BLOCK_SIZE;
        }

        @Override
        public double getProgress() {
            return isCompleted() ? 1d : getReceivedData() / ((double) getTotalData());
        }

        @Override
        public String getProgressMessage() {
            return (Math.round(getProgress() * 1000d) / 10d) + "% "
                    + FileUtils.byteCountToDisplaySize(getReceivedData()) + " / "
                    + FileUtils.byteCountToDisplaySize(getTotalData());
        }

        @Override
        public boolean isCompleted() {
            return missingHashes.isEmpty();
        }

        @Override
        public void checkError() {
            if (error.get() != null) {
                throw new RuntimeException(error.get());
            }
        }

        @Override
        public double waitForProgressUpdate() throws InterruptedException {
            if (!isCompleted()) {
                synchronized (lock) {
                    checkError();
                    lock.wait();
                    checkError();
                }
            }
            return getProgress();
        }

        @Override
        public InputStream getInputStream() {
            checkArgument(missingHashes.isEmpty(), "pull failed, some blocks are still missing");
            List<byte[]> blockList = Lists
                    .newArrayList(Lists.transform(hashList, Functions.forMap(blocksByHash)));
            return new SequenceInputStream(Collections
                    .enumeration(Lists.transform(blockList, new Function<byte[], ByteArrayInputStream>() {
                        @Override
                        public ByteArrayInputStream apply(byte[] data) {
                            return new ByteArrayInputStream(data);
                        }
                    })));
        }

        @Override
        public void close() {
            missingHashes.clear();
            hashList.clear();
            blocksByHash.clear();
            try {
                connectionHandler.getEventBus().unregister(listener);
            } catch (Exception ex) {
            }
            if (closeConnection) {
                connectionHandler.close();
            }
        }
    };
    try {
        synchronized (lock) {
            hashList.addAll(Lists.transform(fileBlocks.getBlocks(), new Function<BlockInfo, String>() {
                @Override
                public String apply(BlockInfo block) {
                    return block.getHash();
                }
            }));
            missingHashes.addAll(hashList);
            for (String hash : missingHashes) {
                byte[] block = blockCache.pullBlock(hash);
                if (block != null) {
                    blocksByHash.put(hash, block);
                    missingHashes.remove(hash);
                }
            }
            connectionHandler.getEventBus().register(listener);
            for (BlockInfo block : fileBlocks.getBlocks()) {
                if (missingHashes.contains(block.getHash())) {
                    int requestId = Math.abs(new Random().nextInt());
                    requestIds.add(requestId);
                    connectionHandler.sendMessage(Request.newBuilder().setId(requestId)
                            .setFolder(fileBlocks.getFolder()).setName(fileBlocks.getPath())
                            .setOffset(block.getOffset()).setSize(block.getSize())
                            .setHash(ByteString.copyFrom(BaseEncoding.base16().decode(block.getHash())))
                            .build());
                    logger.debug("sent request for block, hash = {}", block.getHash());
                }
            }
            return fileDownloadObserver;
        }
    } catch (Exception ex) {
        fileDownloadObserver.close();
        throw ex;
    }
}

From source file:org.apache.hadoop.hdfs.TestFileConcurrentReader.java

@Test
public void testImmediateReadOfNewFile() throws IOException {
    final int blockSize = 64 * 1024;
    final int writeSize = 10 * blockSize;
    Configuration conf = new Configuration();

    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    init(conf);/*from   w  w w  .j  a v a  2s . co  m*/

    final int requiredSuccessfulOpens = 100;
    final Path file = new Path("/file1");
    final AtomicBoolean openerDone = new AtomicBoolean(false);
    final AtomicReference<String> errorMessage = new AtomicReference<>();
    final FSDataOutputStream out = fileSystem.create(file);

    final Thread writer = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                while (!openerDone.get()) {
                    out.write(DFSTestUtil.generateSequentialBytes(0, writeSize));
                    out.hflush();
                }
            } catch (IOException e) {
                LOG.warn("error in writer", e);
            } finally {
                try {
                    out.close();
                } catch (IOException e) {
                    LOG.error("unable to close file");
                }
            }
        }
    });

    Thread opener = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                for (int i = 0; i < requiredSuccessfulOpens; i++) {
                    fileSystem.open(file).close();
                }
                openerDone.set(true);
            } catch (IOException e) {
                openerDone.set(true);
                errorMessage.set(String.format("got exception : %s", StringUtils.stringifyException(e)));
            } catch (Exception e) {
                openerDone.set(true);
                errorMessage.set(String.format("got exception : %s", StringUtils.stringifyException(e)));
                writer.interrupt();
                fail("here");
            }
        }
    });

    writer.start();
    opener.start();

    try {
        writer.join();
        opener.join();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }

    assertNull(errorMessage.get(), errorMessage.get());
}