Example usage for java.util.concurrent.atomic AtomicReference AtomicReference

List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference AtomicReference.

Prototype

public AtomicReference() 

Source Link

Document

Creates a new AtomicReference with null initial value.

Usage

From source file:org.keycloak.testsuite.admin.concurrency.ConcurrentLoginTest.java

@Test
public void concurrentLoginMultipleUsers() throws Throwable {
    log.info("*********************************************");
    long start = System.currentTimeMillis();

    AtomicReference<String> userSessionId = new AtomicReference<>();
    LoginTask loginTask = null;//from w w w  .  ja v a  2  s.c o m

    try (CloseableHttpClient httpClient = HttpClientBuilder.create()
            .setRedirectStrategy(new LaxRedirectStrategy()).build()) {
        loginTask = new LoginTask(httpClient, userSessionId, 100, 1, false,
                Arrays.asList(createHttpClientContextForUser(httpClient, "test-user@localhost", "password"),
                        createHttpClientContextForUser(httpClient, "john-doh@localhost", "password"),
                        createHttpClientContextForUser(httpClient, "roleRichUser", "password")));

        run(DEFAULT_THREADS, DEFAULT_CLIENTS_COUNT, loginTask);
        int clientSessionsCount = testingClient.testing().getClientSessionsCountInUserSession("test",
                userSessionId.get());
        Assert.assertEquals(1 + DEFAULT_CLIENTS_COUNT / 3 + (DEFAULT_CLIENTS_COUNT % 3 <= 0 ? 0 : 1),
                clientSessionsCount);
    } finally {
        long end = System.currentTimeMillis() - start;
        log.infof("Statistics: %s", loginTask == null ? "??" : loginTask.getHistogram());
        log.info("concurrentLoginMultipleUsers took " + (end / 1000) + "s");
        log.info("*********************************************");
    }
}

From source file:info.archinnov.achilles.test.integration.tests.AsyncQueryIT.java

@Test
public void should_excecute_DML_native_query_with_async_listeners() throws Exception {
    //Given//from ww w. j  a v  a  2 s .co m
    Long id = RandomUtils.nextLong(0, Long.MAX_VALUE);

    final CountDownLatch latch = new CountDownLatch(2);
    final AtomicReference<Object> successSpy = new AtomicReference<>();
    final AtomicReference<Throwable> exceptionSpy = new AtomicReference<>();

    FutureCallback<Object> successCallBack = new FutureCallback<Object>() {
        @Override
        public void onSuccess(Object result) {
            successSpy.getAndSet(result);
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            latch.countDown();
        }
    };

    FutureCallback<Object> exceptionCallBack = new FutureCallback<Object>() {
        @Override
        public void onSuccess(Object result) {
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            exceptionSpy.getAndSet(t);
            latch.countDown();
        }
    };

    final RegularStatement insert = insertInto("completebean").value("id", bindMarker("id"));
    final RegularStatement delete = delete().from("completebean").where(eq("name", "test"));

    //When
    asyncManager.nativeQuery(insert, id).execute(successCallBack);
    asyncManager.nativeQuery(delete).execute(exceptionCallBack);

    latch.await();

    //Then
    assertThat(successSpy.get()).isNotNull().isSameAs(Empty.INSTANCE);
    assertThat(exceptionSpy.get()).isNotNull().isInstanceOf(InvalidQueryException.class);
}

From source file:com.blacklocus.jres.request.index.JresUpdateDocumentScriptTest.java

@Test(expected = ExecutionException.class)
public void testRetryOnConflictExpectError() throws InterruptedException, ExecutionException {
    final String index = "JresUpdateDocumentScriptTest.testRetryOnConflictExpectError".toLowerCase();
    final String type = "test";
    final String id = "warzone";

    final AtomicReference<String> error = new AtomicReference<String>();
    final int numThreads = 16, numIterations = 100;

    ExecutorService x = Executors.newFixedThreadPool(numThreads);
    List<Future<?>> futures = new ArrayList<Future<?>>(numThreads);
    for (int i = 0; i < numThreads; i++) {
        futures.add(x.submit(new Callable<Void>() {
            @Override//ww  w .  jav a 2s  . c  om
            public Void call() throws Exception {
                for (int j = 0; j < numIterations; j++) {
                    jres.quest(new JresUpdateDocumentScript(index, type, id, "ctx._source.value += 1", null,
                            ImmutableMap.of("value", 0), null));
                }
                return null;
            }
        }));
    }
    x.shutdown();
    x.awaitTermination(1, TimeUnit.MINUTES);

    for (Future<?> future : futures) {
        // expecting a conflict exception from ElasticSearch
        future.get();
    }
}

From source file:com.fiadot.springjsoncrypt.json.CryptMappingJacson2HttpMessageConverter.java

@Override
public boolean canWrite(Class<?> clazz, MediaType mediaType) {
    if (!jackson23Available || !logger.isWarnEnabled()) {
        return (this.objectMapper.canSerialize(clazz) && canWrite(mediaType));
    }/*from   w  w  w. j  av  a  2s  . c om*/
    AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>();
    if (this.objectMapper.canSerialize(clazz) && canWrite(mediaType)) {
        return true;
    }
    Throwable cause = causeRef.get();
    if (cause != null) {
        String msg = "Failed to evaluate serialization for type [" + clazz + "]";
        if (logger.isDebugEnabled()) {
            logger.warn(msg, cause);
        } else {
            logger.warn(msg + ": " + cause);
        }
    }
    return false;
}

From source file:edu.rit.flick.genetics.FastFileInflator.java

@Override
public synchronized File inflate(final Configuration configuration, final File fileIn, final File fileOut) {
    assert fileIn.exists();

    try {/*from   ww w  . ja  va 2  s  . co m*/
        // Inflate to Directory
        final String outputDirectoryPath = fileOut.getPath()
                .replaceAll("." + Files.getFileExtension(fileOut.getPath()), FLICK_FAST_FILE_TMP_DIR_SUFFIX);

        final File tmpOutputDirectory = new File(outputDirectoryPath);
        if (tmpOutputDirectory.exists())
            FileUtils.deleteDirectory(tmpOutputDirectory);

        final AtomicReference<Thread> cleanHookAtomic = new AtomicReference<Thread>();

        final Thread inflateToDirectoryThread = new Thread(() -> {
            try {
                // Inflate Fast file to a temporary directory
                inflateFromFile(fileIn, tmpOutputDirectory);

                // Inflate Directory to a zip file
                inflateFromDirectory(tmpOutputDirectory, fileOut);

                // Clean up IO
                close();
                System.gc();
                Thread.sleep(100);

                // Clean up temporary directory
                FileUtils.deleteDirectory(tmpOutputDirectory);

                Runtime.getRuntime().removeShutdownHook(cleanHookAtomic.get());
            } catch (final Exception e) {
                if (!interrupted)
                    System.err.println(e.getMessage());
            }
        }, "Default_Inflation_Thread");

        // Make cleaning hook
        final Thread cleanHook = new Thread(() -> {
            interrupted = true;
            configuration.setFlag(VERBOSE_FLAG, false);
            configuration.setFlag(DELETE_FLAG, false);
            try {
                if (inflateToDirectoryThread.isAlive())
                    inflateToDirectoryThread.interrupt();

                // Clean up IO
                close();
                System.gc();
                Thread.sleep(100);

                synchronized (this) {
                    while (inflateToDirectoryThread.isAlive())
                        this.wait();
                }

            } catch (final IOException | InterruptedException e) {
                e.printStackTrace();
            } finally {
                // Clean up temporary directory
                FileUtils.deleteQuietly(tmpOutputDirectory);
                // Clean up INCOMPLETE output file
                FileUtils.deleteQuietly(fileOut);
                System.out.println();
            }

        }, "Inflation_Cleaning_Thread");

        cleanHookAtomic.set(cleanHook);

        Runtime.getRuntime().addShutdownHook(cleanHook);

        inflateToDirectoryThread.start();
        inflateToDirectoryThread.join();

    } catch (final IOException | InterruptedException e) {
        e.printStackTrace();
    }

    return fileOut;
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
 * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
 * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
 * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
 * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
 * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
 *///w  ww  . j a  v a2  s  .c  o  m
public void testOrdinarySniffRounds() throws Exception {
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final int iters = randomIntBetween(30, 100);
    final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final AtomicInteger runs = new AtomicInteger(iters);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Future<?>> lastFuture = new AtomicReference<>();
    final AtomicReference<Sniffer.Task> lastTask = new AtomicReference<>();
    Scheduler scheduler = new Scheduler() {
        @Override
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            assertEquals(sniffInterval, task.nextTaskDelay);
            int numberOfRuns = runs.getAndDecrement();
            if (numberOfRuns == iters) {
                //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O
                assertEquals(0L, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
            } else {
                //all of the subsequent times "schedule" is called with delay set to the configured sniff interval
                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
                if (numberOfRuns == 0) {
                    completionLatch.countDown();
                    return null;
                }
            }
            //we submit rather than scheduling to make the test quick and not depend on time
            Future<?> future = executor.submit(task);
            futures.add(future);
            if (numberOfRuns == 1) {
                lastFuture.set(future);
                lastTask.set(task);
            }
            return future;
        }

        @Override
        public void shutdown() {
            //the executor is closed externally, shutdown is tested separately
        }
    };
    try {
        new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertEquals(iters, futures.size());
        //the last future is the only one that may not be completed yet, as the count down happens
        //while scheduling the next round which is still part of the execution of the runnable itself.
        assertTrue(lastTask.get().hasStarted());
        lastFuture.get().get();
        for (Future<?> future : futures) {
            assertTrue(future.isDone());
            future.get();
        }
    } finally {
        executor.shutdown();
        assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
    }
    int totalRuns = hostsSniffer.runs.get();
    assertEquals(iters, totalRuns);
    int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
    verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
    verifyNoMoreInteractions(restClient);
}

From source file:de.hybris.platform.test.TransactionTest.java

@Test
public void testIneffectiveRollbackWithActivateFalse() throws Exception {
    try {/*from w w  w.j a  v a 2  s.  c  o m*/
        final Transaction tx = Transaction.current();
        // simulate config setting with threadlocal flag -> has the same semantics!
        Transaction.enableUserTransactionForThread(false);

        final AtomicReference<PK> titlePKref = new AtomicReference<PK>();
        try {
            tx.execute(new TransactionBody() {
                @Override
                public Object execute() throws Exception {
                    titlePKref.set(UserManager.getInstance().createTitle("TTT").getPK());
                    throw new RuntimeException("rollback please");
                }
            });
            fail("RuntimeException expected");
        } catch (final RuntimeException e) {
            assertEquals("rollback please", e.getMessage());
        }

        assertNotSame(tx, Transaction.current());
        assertNotNull(titlePKref.get());
        final Title title = jaloSession.getItem(titlePKref.get());
        assertNotNull(title);
        assertTrue(title.isAlive());
    } finally {
        Transaction.enableUserTransactionForThread(true);
    }
}

From source file:eu.eubrazilcc.lvl.storage.ResourceOwnerCollectionTest.java

@Test
public void test() {
    System.out.println("ResourceOwnerCollectionTest.test()");
    try {//from ww w .j av a 2 s  .c o  m
        final Collection<String> roles = newArrayList("role1", "role2");
        final Collection<String> permissions = newArrayList("sequences:leishmania:public:*:view",
                "sequences:sandflies:public:*:view");
        // insert (no salt)
        final ResourceOwner resourceOwner = ResourceOwner.builder()
                .user(User.builder().userid("username").password("password").email("username@example.com")
                        .firstname("firstname").lastname("lastname").roles(roles).permissions(permissions)
                        .build())
                .build();
        WriteResult<ResourceOwner> result = RESOURCE_OWNER_DAO.insert(resourceOwner);
        assertThat("insert resource owner result is not null", result, notNullValue());
        assertThat("insert resource owner result id is not null", result.getId(), notNullValue());
        assertThat("insert resource owner result id is not empty", isNotBlank(result.getId()), equalTo(true));
        assertThat("insert resource owner result element is not null", result.getElement(), notNullValue());
        assertThat("insert resource owner result user is not null", result.getElement().getUser(),
                notNullValue());
        assertThat("insert resource owner result hashed password", result.getElement().getUser().getPassword(),
                notNullValue());
        assertThat("insert resource owner result hashed password",
                isNotBlank(result.getElement().getUser().getPassword()), equalTo(true));
        assertThat("insert resource owner result salt", result.getElement().getUser().getSalt(),
                notNullValue());
        assertThat("insert resource owner result salt", isNotBlank(result.getElement().getUser().getSalt()),
                equalTo(true));
        assertThat("inserted resource owner coincides with original (ignoring password & salt)",
                resourceOwner.equalsToUnprotected(result.getElement()), equalTo(true));
        final ResourceOwner hashed = result.getElement();

        // find (no salt)
        ResourceOwner resourceOwner2 = RESOURCE_OWNER_DAO.find(resourceOwner.getOwnerId());
        assertThat("resource owner is not null", resourceOwner2, notNullValue());
        assertThat("resource owner coincides with original", resourceOwner2, equalTo(hashed));
        System.out.println(resourceOwner2.toString());

        // find (no salt) with volatile values
        resourceOwner2 = RESOURCE_OWNER_DAO.useGravatar(true).find(resourceOwner.getOwnerId());
        assertThat("resource owner with volatile values is not null", resourceOwner2, notNullValue());
        assertThat("resource owner links are null", resourceOwner2.getUser().getLinks(), nullValue());
        assertThat("resource owner picture URL is not null", resourceOwner2.getUser().getPictureUrl(),
                notNullValue());
        assertThat("resource owner picture URL is not empty",
                isNotBlank(resourceOwner2.getUser().getPictureUrl()));
        assertThat("resource owner with volatile values coincides with original",
                resourceOwner2.getUser().equalsIgnoringVolatile(hashed.getUser()));
        System.out.println(resourceOwner2.toString());

        // insert element with hard link
        final ResourceOwner resourceOwner1 = ResourceOwner.builder()
                .user(User.builder()
                        .links(newArrayList(Link.fromUri("http://example.com/users/username1").rel(SELF)
                                .type(APPLICATION_JSON).build()))
                        .userid("username1").password("password1").email("username1@example.com")
                        .firstname("Firstname 1").lastname("Lastname 1").roles(roles).permissions(permissions)
                        .build())
                .build();

        result = RESOURCE_OWNER_DAO.insert(resourceOwner1);
        resourceOwner1.getUser().setLinks(null);
        assertThat("resource owner result inserted with hard link is not null", result, notNullValue());
        assertThat("resource owner inserted with hard link is not null", result.getElement(), notNullValue());
        assertThat("resource owner inserted with hard link coincides with original (ignoring password & salt)",
                resourceOwner1.equalsToUnprotected(result.getElement()), equalTo(true));
        System.out.println(resourceOwner2.toString());

        RESOURCE_OWNER_DAO.delete(resourceOwner1.getOwnerId());

        // update
        final String plainPassword = "new_password";
        updatePassword(hashed, plainPassword);
        RESOURCE_OWNER_DAO.update(hashed);

        // find after update
        resourceOwner2 = RESOURCE_OWNER_DAO.reset().find(resourceOwner.getOwnerId());
        assertThat("resource owner is not null", resourceOwner2, notNullValue());
        assertThat("resource owner coincides with original", resourceOwner2, equalTo(hashed));
        System.out.println(resourceOwner2.toString());

        // check validity using owner Id and username
        AtomicReference<String> ownerIdRef = new AtomicReference<String>();
        boolean validity = RESOURCE_OWNER_DAO.isValid(hashed.getOwnerId(), hashed.getUser().getUserid(),
                plainPassword, false, null, ownerIdRef);
        assertThat("resource owner is valid (using owner Id & username)", validity, equalTo(true));
        assertThat("resource owner Id passed as reference coincides with expected", ownerIdRef.get(),
                equalTo(hashed.getOwnerId()));

        // check validity using email address
        ownerIdRef = new AtomicReference<String>();
        validity = RESOURCE_OWNER_DAO.isValid(null, hashed.getUser().getEmail(), plainPassword, true, null,
                ownerIdRef);
        assertThat("resource owner is valid (using email)", validity, equalTo(true));
        assertThat("resource owner Id passed as reference coincides with expected", ownerIdRef.get(),
                equalTo(hashed.getOwnerId()));

        // add roles
        RESOURCE_OWNER_DAO.addRoles(resourceOwner.getOwnerId(), "role3");

        // remove roles
        RESOURCE_OWNER_DAO.removeRoles(resourceOwner.getOwnerId(), "role2");

        // test listing unexisting shared datasets
        List<DatasetShare> shares = RESOURCE_OWNER_DAO.listDatashares("otheruser@lvl", "mysequences.xml", 0,
                Integer.MAX_VALUE, null, null, null);
        assertThat("dataset shares is null", shares, notNullValue());
        assertThat("dataset shares is empty", shares.isEmpty(), equalTo(true));
        // uncomment for additional output
        System.out.println(" >> Dataset shares (before permissions are granted): " + shares.toString());

        // share dataset by adding permissions to resource owner
        RESOURCE_OWNER_DAO.addPermissions(resourceOwner.getOwnerId(),
                "datasets:files:otheruser@lvl:mysequences.xml:view");
        resourceOwner2 = RESOURCE_OWNER_DAO.reset().find(resourceOwner.getOwnerId());
        assertThat("resource owner is not null", resourceOwner2, notNullValue());
        // uncomment for additional output
        System.out.println(" >> Owner with permissions to view shared dataset: " + resourceOwner2.toString());

        // test listing shared datasets
        shares = RESOURCE_OWNER_DAO.listDatashares("otheruser@lvl", "mysequences.xml", 0, Integer.MAX_VALUE,
                null, null, null);
        assertThat("dataset shares is not null", shares, notNullValue());
        assertThat("number of dataset shares coincides with expected", shares.size(), equalTo(1));
        // uncomment for additional output
        System.out.println(" >> Dataset shares (after permissions are granted): " + shares.toString());

        // insert redundant permissions
        RESOURCE_OWNER_DAO.addPermissions(resourceOwner.getOwnerId(),
                "datasets:files:otheruser@lvl:mysequences.xml:view,edit");

        // test getting information about a specific share
        DatasetShare share = RESOURCE_OWNER_DAO.findDatashare("otheruser@lvl", "mysequences.xml",
                resourceOwner.getOwnerId());
        assertThat("dataset share is not null", share, notNullValue());
        // uncomment for additional output
        System.out.println(" >> Dataset share (after adding redundant permissions): " + share.toString());

        // test modifying a share
        // not available

        // remove all permissions and stop sharing
        RESOURCE_OWNER_DAO.removePermissions(resourceOwner.getOwnerId(),
                "datasets:files:otheruser@lvl:mysequences.xml:view",
                "datasets:files:otheruser@lvl:mysequences.xml:view,edit");
        shares = RESOURCE_OWNER_DAO.listDatashares("otheruser@lvl", "mysequences.xml", 0, Integer.MAX_VALUE,
                null, null, null);
        assertThat("dataset shares is null", shares, notNullValue());
        assertThat("dataset shares is empty", shares.isEmpty(), equalTo(true));
        // uncomment for additional output
        System.out.println(" >> Dataset shares (after permissions are removed): " + shares.toString());

        // get OAuth scope
        resourceOwner2 = RESOURCE_OWNER_DAO.find(resourceOwner.getOwnerId());
        final String oauthScope = ResourceOwnerDAO.oauthScope(resourceOwner2, true);
        assertThat("resource owner OAuth scope is not null", oauthScope, notNullValue());
        assertThat("resource owner OAuth scope is not blank", isNotBlank(oauthScope));
        assertThat("resource owner OAuth scope coincided with expected", oauthScope, equalTo("role1 role3"));
        System.out.println("OAuth scope: '" + oauthScope + "'");

        // remove (default LVL administrator is not removed)
        RESOURCE_OWNER_DAO.delete(resourceOwner.getOwnerId());
        final long numRecords = RESOURCE_OWNER_DAO.count();
        assertThat("number of resource owners stored in the database coincides with expected", numRecords,
                equalTo(1l));

        // insert (with salt)
        result = RESOURCE_OWNER_DAO.insert(hashed);
        assertThat("insert resource owner result (with salt) is not null", result, notNullValue());
        assertThat("insert resource owner result (with salt) id is not null", result.getId(), notNullValue());
        assertThat("insert resource owner result (with salt) id is not empty", isNotBlank(result.getId()),
                equalTo(true));

        // find (with salt)
        resourceOwner2 = RESOURCE_OWNER_DAO.find(hashed.getOwnerId());
        assertThat("resource owner (with salt) is not null", resourceOwner2, notNullValue());
        assertThat("resource owner (with salt) coincides with original", resourceOwner2, equalTo(hashed));
        System.out.println(resourceOwner2.toString());

        // pagination
        final List<String> ids = newArrayList();
        for (int i = 0; i < 11; i++) {
            final ResourceOwner resourceOwner3 = ResourceOwner.builder()
                    .user(User.builder().userid(Integer.toString(i)).password("password")
                            .email("username" + i + "@example.com").firstname("Firstname").lastname("Lastname")
                            .roles(roles).permissions(permissions).build())
                    .build();
            ids.add(resourceOwner3.getOwnerId());
            RESOURCE_OWNER_DAO.insert(resourceOwner3);
        }
        final int size = 3;
        int start = 0;
        List<ResourceOwner> resourceOwners = null;
        final MutableLong count = new MutableLong(0l);
        do {
            resourceOwners = RESOURCE_OWNER_DAO.list(start, size, null, null, null, count);
            if (resourceOwners.size() != 0) {
                System.out.println("Paging: first item " + start + ", showing " + resourceOwners.size() + " of "
                        + count.getValue() + " items");
            }
            start += resourceOwners.size();
        } while (!resourceOwners.isEmpty());
        for (final String id2 : ids) {
            RESOURCE_OWNER_DAO.delete(id2);
        }
        RESOURCE_OWNER_DAO.stats(System.out);
    } catch (Exception e) {
        e.printStackTrace(System.err);
        fail("ResourceOwnerCollectionTest.test() failed: " + e.getMessage());
    } finally {
        System.out.println("ResourceOwnerCollectionTest.test() has finished");
    }
}

From source file:com.microsoft.tfs.client.common.ui.console.ConsoleCoreEventListener.java

private void onGet(final GetEvent e) {
    e.getStatus();/* w  w w .  ja va  2s. c om*/

    final AtomicReference<String> errorHolder = new AtomicReference<String>();
    final String messageString = e.getMessage(null, errorHolder);

    Message message;
    if (errorHolder.get() != null) {
        message = new Message(MessageType.ERROR, errorHolder.get());
    } else {
        message = new Message(MessageType.INFO, messageString);
    }

    printMessageToConsole(message);
}

From source file:hudson.plugins.jobConfigHistory.FileHistoryDaoTest.java

/**
 * Test of createNewHistoryDir method, of class FileHistoryDao.
 *//*from w  w w .j  a va  2s .  c om*/
@Test
public void testCreateNewHistoryDir() throws IOException {
    final AtomicReference<Calendar> timestampHolder = new AtomicReference<Calendar>();
    final File first = FileHistoryDao.createNewHistoryDir(historyRoot, timestampHolder);
    assertTrue(first.exists());
    assertTrue(first.isDirectory());
    // Should provoke clash
    final File second = FileHistoryDao.createNewHistoryDir(historyRoot, timestampHolder);
    assertTrue(second.exists());
    assertTrue(second.isDirectory());
    assertNotEquals(first.getAbsolutePath(), second.getAbsolutePath());
}