Example usage for java.util.concurrent.atomic AtomicReference AtomicReference

List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference AtomicReference.

Prototype

public AtomicReference() 

Source Link

Document

Creates a new AtomicReference with null initial value.

Usage

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the behavior of flush() with appends and storage errors (on the write() method).
 *//*from  w  w  w . j a  va 2  s .  c  o m*/
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = config.getFlushThresholdBytes() * 10;
    final int failSyncEvery = 2;
    final int failAsyncEvery = 3;

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    // Have the writes fail every few attempts with a well known exception.
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setWriteSyncErrorInjector(
            new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
    context.storage.setWriteAsyncErrorInjector(
            new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    // Part 1: flush triggered by accumulated size.
    int exceptionCount = 0;
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);

        // Call flush() and inspect the result.
        setException.set(null);
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
        FlushResult flushResult = null;

        try {
            flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
                exceptionCount++;
            } else {
                // Not expecting any exception this time.
                throw ex;
            }
        }

        // Check flush result.
        if (flushResult != null) {
            AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0,
                    flushResult.getFlushedBytes());
            Assert.assertEquals("Not expecting any merged bytes in this test.", 0,
                    flushResult.getMergedBytes());
        }
    }

    // Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
    context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    context.storage.setWriteSyncErrorInjector(null);
    context.storage.setWriteAsyncErrorInjector(null);
    context.segmentAggregator.flush(TIMEOUT, executorService()).join();

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
    AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}

From source file:hudson.plugins.jobConfigHistory.FileHistoryDao.java

File createNewHistoryEntry(Node node, final String operation) {
    try {/*  www  . j  av  a  2  s  . co m*/
        final AtomicReference<Calendar> timestampHolder = new AtomicReference<Calendar>();
        final File timestampedDir = getRootDir(node, timestampHolder);
        LOG.log(Level.FINE, "{0} on {1}", new Object[] { this, timestampedDir });
        createHistoryXmlFile(timestampHolder.get(), timestampedDir, operation);
        assert timestampHolder.get() != null;
        return timestampedDir;
    } catch (IOException e) {
        // If not able to create the history entry, log, but continue without it.
        // A known issue is where Hudson core fails to move the folders on rename,
        // but continues as if it did.
        // Reference https://issues.jenkins-ci.org/browse/JENKINS-8318
        throw new RuntimeException(
                "Unable to create history entry for configuration file of node " + node.getDisplayName(), e);
    }
}

From source file:org.elasticsearch.xpack.ml.integration.MlJobIT.java

public void testDelete_multipleRequest() throws Exception {
    String jobId = "delete-job-mulitple-times";
    createFarequoteJob(jobId);/*from  w ww. ja va2 s .c  o  m*/

    ConcurrentMapLong<Response> responses = ConcurrentCollections.newConcurrentMapLong();
    ConcurrentMapLong<ResponseException> responseExceptions = ConcurrentCollections.newConcurrentMapLong();
    AtomicReference<IOException> ioe = new AtomicReference<>();
    AtomicInteger recreationGuard = new AtomicInteger(0);
    AtomicReference<Response> recreationResponse = new AtomicReference<>();
    AtomicReference<ResponseException> recreationException = new AtomicReference<>();

    Runnable deleteJob = () -> {
        try {
            boolean forceDelete = randomBoolean();
            String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId;
            if (forceDelete) {
                url += "?force=true";
            }
            Response response = client().performRequest("delete", url);
            responses.put(Thread.currentThread().getId(), response);
        } catch (ResponseException re) {
            responseExceptions.put(Thread.currentThread().getId(), re);
        } catch (IOException e) {
            ioe.set(e);
        }

        // Immediately after the first deletion finishes, recreate the job.  This should pick up
        // race conditions where another delete request deletes part of the newly created job.
        if (recreationGuard.getAndIncrement() == 0) {
            try {
                recreationResponse.set(createFarequoteJob(jobId));
            } catch (ResponseException re) {
                recreationException.set(re);
            } catch (IOException e) {
                ioe.set(e);
            }
        }
    };

    // The idea is to hit the situation where one request waits for
    // the other to complete. This is difficult to schedule but
    // hopefully it will happen in CI
    int numThreads = 5;
    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        threads[i] = new Thread(deleteJob);
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].join();
    }

    if (ioe.get() != null) {
        // This looks redundant but the check is done so we can
        // print the exception's error message
        assertNull(ioe.get().getMessage(), ioe.get());
    }

    assertEquals(numThreads, responses.size() + responseExceptions.size());

    // 404s are ok as it means the job had already been deleted.
    for (ResponseException re : responseExceptions.values()) {
        assertEquals(re.getMessage(), 404, re.getResponse().getStatusLine().getStatusCode());
    }

    for (Response response : responses.values()) {
        assertEquals(responseEntityToString(response), 200, response.getStatusLine().getStatusCode());
    }

    assertNotNull(recreationResponse.get());
    assertEquals(responseEntityToString(recreationResponse.get()), 200,
            recreationResponse.get().getStatusLine().getStatusCode());

    if (recreationException.get() != null) {
        assertNull(recreationException.get().getMessage(), recreationException.get());
    }

    try {
        // The idea of the code above is that the deletion is sufficiently time-consuming that
        // all threads enter the deletion call before the first one exits it.  Usually this happens,
        // but in the case that it does not the job that is recreated may get deleted.
        // It is not a error if the job does not exist but the following assertions
        // will fail in that case.
        client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId);

        // Check that the job aliases exist.  These are the last thing to be deleted when a job is deleted, so
        // if there's been a race between deletion and recreation these are what will be missing.
        String aliases = getAliases();

        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}"));
        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}"));

    } catch (ResponseException missingJobException) {
        // The job does not exist
        assertThat(missingJobException.getResponse().getStatusLine().getStatusCode(), equalTo(404));

        // The job aliases should be deleted
        String aliases = getAliases();
        assertThat(aliases, not(containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}")));
        assertThat(aliases,
                not(containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}")));
    }

    assertEquals(numThreads, recreationGuard.get());
}

From source file:io.cloudslang.lang.tools.build.SlangBuilderTest.java

@Test
public void testProcessRunTestsParallel() {
    final Map<String, SlangTestCase> testCases = new LinkedHashMap<>();
    final SlangTestCase testCase1 = new SlangTestCase("test1", "testFlowPath", "desc", asList("abc", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase2 = new SlangTestCase("test2", "testFlowPath", "desc", asList("efg", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase3 = new SlangTestCase("test3", "testFlowPath", "desc", asList("new", "new2"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase4 = new SlangTestCase("test4", "testFlowPath", "desc", asList("jjj", "new2"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase5 = new SlangTestCase("test5", "testFlowPath", "desc",
            asList("hhh", "jjj", "abc"), "mock", null, null, false, "SUCCESS");

    testCases.put("test1", testCase1);
    testCases.put("test2", testCase2);
    testCases.put("test3", testCase3);
    testCases.put("test4", testCase4);
    testCases.put("test5", testCase5);

    final List<String> testSuites = newArrayList("abc");
    final Map<String, CompilationArtifact> compiledFlows = new HashMap<>();
    final String projectPath = "aaa";

    final AtomicReference<IRunTestResults> capturedArgument = new AtomicReference<>();
    doAnswer(getAnswer(capturedArgument)).when(slangTestRunner).splitTestCasesByRunState(any(BulkRunMode.class),
            anyMap(), anyList(), any(IRunTestResults.class), any(BuildModeConfig.class));
    doNothing().when(slangTestRunner).runTestsParallel(anyString(), anyMap(), anyMap(),
            any(ThreadSafeRunTestResults.class));

    // Tested call
    slangBuilder.processRunTests(projectPath, testSuites, ALL_PARALLEL, compiledFlows, testCases,
            buildModeConfig);/*from  w w  w .j  av a2  s. co m*/

    InOrder inOrder = Mockito.inOrder(slangTestRunner);
    inOrder.verify(slangTestRunner).splitTestCasesByRunState(eq(ALL_PARALLEL), eq(testCases), eq(testSuites),
            isA(ThreadSafeRunTestResults.class), any(BuildModeConfig.class));
    inOrder.verify(slangTestRunner).runTestsParallel(eq(projectPath), anyMap(), eq(compiledFlows),
            eq((ThreadSafeRunTestResults) capturedArgument.get()));
    verifyNoMoreInteractions(slangTestRunner);
    verify(slangTestRunner, never()).runTestsSequential(anyString(), anyMap(), anyMap(),
            any(IRunTestResults.class));
}

From source file:io.cloudslang.lang.tools.build.SlangBuilderTest.java

@Test
public void testProcessRunTestsSequential() {
    final Map<String, SlangTestCase> testCases = new LinkedHashMap<>();
    final SlangTestCase testCase1 = new SlangTestCase("test1", "testFlowPath", "desc", asList("abc", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase2 = new SlangTestCase("test2", "testFlowPath", "desc", asList("efg", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase3 = new SlangTestCase("test3", "testFlowPath", "desc", asList("new", "new2"),
            "mock", null, null, false, "SUCCESS");

    testCases.put("test1", testCase1);
    testCases.put("test2", testCase2);
    testCases.put("test3", testCase3);

    final List<String> testSuites = newArrayList("abc");
    final Map<String, CompilationArtifact> compiledFlows = new HashMap<>();
    final String projectPath = "aaa";

    final AtomicReference<IRunTestResults> theCapturedArgument = new AtomicReference<>();
    doAnswer(getAnswer(theCapturedArgument)).when(slangTestRunner).splitTestCasesByRunState(
            any(BulkRunMode.class), anyMap(), anyList(), any(IRunTestResults.class),
            any(BuildModeConfig.class));
    doNothing().when(slangTestRunner).runTestsSequential(anyString(), anyMap(), anyMap(),
            any(IRunTestResults.class));

    BuildModeConfig basic = BuildModeConfig.createBasicBuildModeConfig();
    // Tested call
    slangBuilder.processRunTests(projectPath, testSuites, ALL_SEQUENTIAL, compiledFlows, testCases, basic);

    InOrder inOrder = Mockito.inOrder(slangTestRunner);
    inOrder.verify(slangTestRunner).splitTestCasesByRunState(eq(ALL_SEQUENTIAL), eq(testCases), eq(testSuites),
            isA(RunTestsResults.class), eq(basic));
    inOrder.verify(slangTestRunner).runTestsSequential(eq(projectPath), anyMap(), eq(compiledFlows),
            eq((RunTestsResults) theCapturedArgument.get()));
    inOrder.verify(slangTestRunner, never()).runTestsParallel(anyString(), anyMap(), anyMap(),
            any(ThreadSafeRunTestResults.class));
    verifyNoMoreInteractions(slangTestRunner);
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldFireKeepAlive() throws Exception {
    final AtomicInteger keepAliveEventCounter = new AtomicInteger();
    final AtomicReference<ChannelHandlerContext> ctxRef = new AtomicReference();

    QueryHandler testHandler = new QueryHandler(endpoint, responseRingBuffer, queue, false) {
        @Override//from   w  w  w.  j a  va  2  s  .  co m
        public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
            super.channelRegistered(ctx);
            ctxRef.compareAndSet(null, ctx);
        }

        @Override
        protected void onKeepAliveFired(ChannelHandlerContext ctx, CouchbaseRequest keepAliveRequest) {
            assertEquals(1, keepAliveEventCounter.incrementAndGet());
        }

        @Override
        protected void onKeepAliveResponse(ChannelHandlerContext ctx, CouchbaseResponse keepAliveResponse) {
            assertEquals(2, keepAliveEventCounter.incrementAndGet());
        }
    };
    EmbeddedChannel channel = new EmbeddedChannel(testHandler);

    //test idle event triggers a query keepAlive request and hook is called
    testHandler.userEventTriggered(ctxRef.get(), IdleStateEvent.FIRST_ALL_IDLE_STATE_EVENT);

    assertEquals(1, keepAliveEventCounter.get());
    assertTrue(queue.peek() instanceof QueryHandler.KeepAliveRequest);
    QueryHandler.KeepAliveRequest keepAliveRequest = (QueryHandler.KeepAliveRequest) queue.peek();

    //test responding to the request with http response is interpreted into a KeepAliveResponse and hook is called
    HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
    LastHttpContent responseEnd = new DefaultLastHttpContent();
    channel.writeInbound(response, responseEnd);
    QueryHandler.KeepAliveResponse keepAliveResponse = keepAliveRequest.observable()
            .cast(QueryHandler.KeepAliveResponse.class).timeout(1, TimeUnit.SECONDS).toBlocking().single();

    ReferenceCountUtil.releaseLater(response);
    ReferenceCountUtil.releaseLater(responseEnd);

    assertEquals(2, keepAliveEventCounter.get());
    assertEquals(ResponseStatus.NOT_EXISTS, keepAliveResponse.status());
}

From source file:com.igormaznitsa.zxpoly.MainForm.java

private void loadDiskIntoDrive(final int drive) {
    this.stepSemaphor.lock();
    try {/*  w w w .java2  s . c  om*/
        final char diskName;
        switch (drive) {
        case BetaDiscInterface.DRIVE_A:
            diskName = 'A';
            break;
        case BetaDiscInterface.DRIVE_B:
            diskName = 'B';
            break;
        case BetaDiscInterface.DRIVE_C:
            diskName = 'C';
            break;
        case BetaDiscInterface.DRIVE_D:
            diskName = 'D';
            break;
        default:
            throw new Error("Unexpected drive index");
        }
        final AtomicReference<FileFilter> filter = new AtomicReference<>();
        final File selectedFile = chooseFileForOpen("Select Disk " + diskName, this.lastFloppyFolder, filter,
                new SCLFileFilter(), new TRDFileFilter());
        if (selectedFile != null) {
            this.lastFloppyFolder = selectedFile.getParentFile();
            try {
                final TRDOSDisk floppy = new TRDOSDisk(
                        filter.get().getClass() == SCLFileFilter.class ? TRDOSDisk.Source.SCL
                                : TRDOSDisk.Source.TRD,
                        FileUtils.readFileToByteArray(selectedFile), false);
                this.board.getBetaDiskInterface().insertDiskIntoDrive(drive, floppy);
                log.info("Loaded drive " + diskName + " by floppy image file " + selectedFile);
            } catch (IOException ex) {
                log.log(Level.WARNING, "Can't read Floppy image file [" + selectedFile + ']', ex);
                JOptionPane.showMessageDialog(this, "Can't read Floppy image file", "Error",
                        JOptionPane.ERROR_MESSAGE);
            }
        }
    } finally {
        this.stepSemaphor.unlock();
    }
}