Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java

@Test
public void should_dsl_delete_if_exists() throws Exception {
    //Given/*from   www  .ja  v a 2  s  .c  o  m*/
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    final Date date = buildDateKey();

    final AtomicBoolean error = new AtomicBoolean(false);
    final LWTResultListener lwtResultListener = new LWTResultListener() {

        @Override
        public void onSuccess() {

        }

        @Override
        public void onError(LWTResult lwtResult) {
            error.getAndSet(true);
        }
    };

    //When
    manager.dsl().delete().value().fromBaseTable().where().id_Eq(id).date_Eq(date).ifExists()
            .withResultSetAsyncListener(rs -> {
                assertThat(rs.wasApplied()).isFalse();
                return rs;
            }).withLwtResultListener(lwtResultListener).execute();
    //Then
    assertThat(error.get()).isTrue();
}

From source file:com.vmware.admiral.compute.container.HostContainerListDataCollection.java

private void checkIfSystemContainerStateExistsBeforeStartIt(ContainerState containerState,
        ContainerDescription containerDesc, String containerHostLink) {

    QueryTask containerQuery = QueryUtil.buildPropertyQuery(ContainerState.class,
            ContainerState.FIELD_NAME_DESCRIPTION_LINK,
            SystemContainerDescriptions.AGENT_CONTAINER_DESCRIPTION_LINK, ContainerState.FIELD_NAME_PARENT_LINK,
            containerState.parentLink);//  ww  w.  j  ava  2 s .c  om

    QueryUtil.addExpandOption(containerQuery);
    AtomicBoolean stateExists = new AtomicBoolean(false);
    new ServiceDocumentQuery<ContainerState>(getHost(), ContainerState.class).query(containerQuery, (r) -> {
        if (r.hasException()) {
            logWarning("Failed to retrieve system container state: %s", containerState.documentSelfLink);
        } else if (r.hasResult()) {
            // If System ContainerState exists, all supported container
            // operations start/stop will work.
            stateExists.set(true);
        } else {
            if (stateExists.get()) {
                // If System ContainerState exists we can start it.
                // if version is valid, although we don't know the power state,
                // start the containers anyway as start is idempotent
                logFine("start existing system container %s", containerState.documentSelfLink);
                startSystemContainer(containerState, null);
            } else {
                // If System ContainerState does not exists, we create it before
                // start operation.
                final ContainerState systemContainerState = createSystemContainerState(containerState,
                        containerDesc, containerHostLink);

                sendRequest(OperationUtil.createForcedPost(this, ContainerFactoryService.SELF_LINK)
                        .setBody(systemContainerState).setCompletion((o, e) -> {
                            if (e != null) {
                                logWarning("Failure creating system container: " + Utils.toString(e));
                                return;
                            }
                            ContainerState body = o.getBody(ContainerState.class);
                            logInfo("Created system ContainerState: %s ", body.documentSelfLink);
                            createSystemContainerInstanceRequest(body, null);
                            updateNumberOfContainers(containerHostLink);
                            startSystemContainer(containerState, null);
                        }));

            }
        }
    });
}

From source file:ch.cyberduck.core.pool.DefaultSessionPoolTest.java

@Test
public void testCheckReconnectSocketFailure() throws Exception {
    final AtomicBoolean interrupt = new AtomicBoolean();
    final Host bookmark = new Host(new TestProtocol());
    final TestLoginConnectionService connect = new TestLoginConnectionService() {
        @Override/*from  ww  w  . j a  v  a2 s.co  m*/
        public boolean check(final Session<?> session, final Cache<Path> cache, final CancelCallback callback)
                throws BackgroundException {
            return true;
        }
    };
    final DefaultSessionPool pool = new DefaultSessionPool(connect,
            new DefaultVaultRegistry(new DisabledPasswordCallback()), PathCache.empty(),
            new DisabledTranscriptListener(), bookmark,
            new GenericObjectPool<Session>(new PooledSessionFactory(connect, new DisabledX509TrustManager(),
                    new DefaultX509KeyManager(), PathCache.empty(), bookmark,
                    new DefaultVaultRegistry(new DisabledPasswordCallback())) {
                @Override
                public Session create() {
                    return new NullSession(bookmark) {
                        @Override
                        public void interrupt() throws BackgroundException {
                            interrupt.set(true);
                            super.interrupt();
                        }
                    };
                }
            }));
    final Session<?> session = pool.borrow(BackgroundActionState.running);
    pool.release(session, new BackgroundException("m", new SocketException("m")));
    assertTrue(interrupt.get());
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

@Test(timeout = 300000)
public void testIOEOnOutputThread() throws Exception {
    conf.setBoolean(HBASE_SKIP_ERRORS, false);

    generateHLogs(-1);//from   ww w  .j  av a2  s  .c om
    fs.initialize(fs.getUri(), conf);
    FileStatus[] logfiles = fs.listStatus(HLOGDIR);
    assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);
    // Set up a splitter that will throw an IOE on the output side
    HLogSplitter logSplitter = new HLogSplitter(conf, HBASEDIR, fs, null, null, null) {
        protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf) throws IOException {
            HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
            Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
            return mockWriter;
        }
    };
    // Set up a background thread dumper.  Needs a thread to depend on and then we need to run
    // the thread dumping in a background thread so it does not hold up the test.
    final AtomicBoolean stop = new AtomicBoolean(false);
    final Thread someOldThread = new Thread("Some-old-thread") {
        @Override
        public void run() {
            while (!stop.get())
                Threads.sleep(10);
        }
    };
    someOldThread.setDaemon(true);
    someOldThread.start();
    final Thread t = new Thread("Background-thread-dumper") {
        public void run() {
            try {
                Threads.threadDumpingIsAlive(someOldThread);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    };
    t.setDaemon(true);
    t.start();
    try {
        logSplitter.splitLogFile(logfiles[0], null);
        fail("Didn't throw!");
    } catch (IOException ioe) {
        assertTrue(ioe.toString().contains("Injected"));
    } finally {
        // Setting this to true will turn off the background thread dumper.
        stop.set(true);
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngineTest.java

@Test
public void shouldAllowVariableReuseAcrossThreads() throws Exception {
    final BasicThreadFactory testingThreadFactory = new BasicThreadFactory.Builder()
            .namingPattern("test-gremlin-scriptengine-%d").build();
    final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory);
    final GremlinGroovyScriptEngine scriptEngine = new GremlinGroovyScriptEngine();

    final AtomicBoolean failed = new AtomicBoolean(false);
    final int max = 512;
    final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max));
    IntStream.range(0, max).forEach(i -> {
        final int yValue = i * 2;
        final int zValue = i * -1;
        final Bindings b = new SimpleBindings();
        b.put("x", i);
        b.put("y", yValue);

        final String script = "z=" + zValue + ";[x,y,z]";
        try {//w  ww.  ja  va2s  . c o  m
            service.submit(() -> {
                try {
                    final List<Integer> result = (List<Integer>) scriptEngine.eval(script, b);
                    futures.add(Pair.with(i, result));
                } catch (Exception ex) {
                    failed.set(true);
                }
            });
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    });

    service.shutdown();
    assertThat(service.awaitTermination(120000, TimeUnit.MILLISECONDS), is(true));

    // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this
    // test is partially designed to protected against.
    assertThat(failed.get(), is(false));
    assertEquals(max, futures.size());
    futures.forEach(t -> {
        assertEquals(t.getValue0(), t.getValue1().get(0));
        assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue());
        assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue());
    });
}

From source file:com.impetus.client.kudu.schemamanager.KuduDBSchemaManager.java

@Override
protected void update(List<TableInfo> tableInfos) {
    for (TableInfo tableInfo : tableInfos) {
        try {//from   w  w  w.j av  a 2  s.  c  o m
            if (!client.tableExists(tableInfo.getTableName())) {
                createKuduTable(tableInfo);
            } else {
                List<String> entityColumns = new ArrayList<String>();
                KuduTable table = client.openTable(tableInfo.getTableName());
                AlterTableOptions alterTableOptions = new AlterTableOptions();
                AtomicBoolean updated = new AtomicBoolean(false);
                Schema schema = table.getSchema();
                // add modify columns
                for (ColumnInfo columnInfo : tableInfo.getColumnMetadatas()) {
                    entityColumns.add(columnInfo.getColumnName());
                    alterColumn(alterTableOptions, schema, columnInfo, updated);
                }
                // update for embeddables logic
                for (EmbeddedColumnInfo embColumnInfo : tableInfo.getEmbeddedColumnMetadatas()) {
                    for (ColumnInfo columnInfo : embColumnInfo.getColumns()) {
                        entityColumns.add(columnInfo.getColumnName());
                        alterColumn(alterTableOptions, schema, columnInfo, updated);
                    }
                }

                // delete columns
                for (ColumnSchema columnSchema : schema.getColumns()) {
                    // if not in tableInfo and not a key then delete
                    if (!entityColumns.contains(columnSchema.getName()) && !columnSchema.isKey()) {
                        alterTableOptions.dropColumn(columnSchema.getName());
                        updated.set(true);
                    }
                }

                if (updated.get()) {
                    client.alterTable(tableInfo.getTableName(), alterTableOptions);
                }
            }
        } catch (Exception e) {
            logger.error("Error while updating tables, Caused by: " + e.getMessage());
            throw new KunderaException("Error while updating tables, Caused by: " + e.getMessage());
        }
    }
}

From source file:org.apache.hadoop.yarn.applications.mapred.TestDistributedShell.java

@Test(timeout = 90000)
public void testDSShell() throws Exception {

    String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command",
            Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2",
            "--container_memory", "128", "--container_vcores", "1" };

    LOG.info("Initializing DS Client");
    final Client client = new Client(new Configuration(yarnCluster.getConfig()));
    boolean initSuccess = client.init(args);
    Assert.assertTrue(initSuccess);//from  w  ww . ja va2 s. c o m
    LOG.info("Running DS Client");
    final AtomicBoolean result = new AtomicBoolean(false);
    Thread t = new Thread() {
        public void run() {
            try {
                result.set(client.run());
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    t.start();

    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(new Configuration(yarnCluster.getConfig()));
    yarnClient.start();
    String hostName = NetUtils.getHostname();
    boolean verified = false;
    while (!verified) {
        List<ApplicationReport> apps = yarnClient.getApplications();
        if (apps.size() == 0) {
            Thread.sleep(10);
            continue;
        }
        ApplicationReport appReport = apps.get(0);
        if (appReport.getHost().startsWith(hostName) && appReport.getRpcPort() == -1) {
            verified = true;
        }
        if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
            break;
        }
    }
    Assert.assertTrue(verified);
    t.join();
    LOG.info("Client run completed. Result=" + result);
    Assert.assertTrue(result.get());

    TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore()
            .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null,
                    null, null, null);
    Assert.assertNotNull(entitiesAttempts);
    Assert.assertEquals(1, entitiesAttempts.getEntities().size());
    Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size());
    Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),
            ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
    TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(
            ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, null, null, null);
    Assert.assertNotNull(entities);
    Assert.assertEquals(2, entities.getEntities().size());
    Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),
            ApplicationMaster.DSEntity.DS_CONTAINER.toString());
}

From source file:org.apache.hadoop.net.unix.TestDomainSocket.java

@Test(timeout = 180000)
public void testShutdown() throws Exception {
    final AtomicInteger bytesRead = new AtomicInteger(0);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final DomainSocket[] socks = DomainSocket.socketpair();
    Runnable reader = new Runnable() {
        @Override/*w  ww .  ja  v a 2s.  c  o  m*/
        public void run() {
            while (true) {
                try {
                    int ret = socks[1].getInputStream().read();
                    if (ret == -1)
                        return;
                    bytesRead.addAndGet(1);
                } catch (IOException e) {
                    DomainSocket.LOG.error("reader error", e);
                    failed.set(true);
                    return;
                }
            }
        }
    };
    Thread readerThread = new Thread(reader);
    readerThread.start();
    socks[0].getOutputStream().write(1);
    socks[0].getOutputStream().write(2);
    socks[0].getOutputStream().write(3);
    Assert.assertTrue(readerThread.isAlive());
    socks[0].shutdown();
    readerThread.join();
    Assert.assertFalse(failed.get());
    Assert.assertEquals(3, bytesRead.get());
    IOUtils.cleanup(null, socks);
}

From source file:org.alfresco.repo.activities.feed.cleanup.FeedCleaner.java

/**
 * Does the actual cleanup, expecting the lock to be maintained
 * //from  w  w  w  .ja  v  a 2s  .c o  m
 * @param keepGoing <tt>true</tt> to continue but will switch to <tt>false</tt> to stop
 * @return          number of entries deleted through whatever means
 */
private int executeWithLock(final AtomicBoolean keepGoing) throws JobExecutionException {
    int maxIdRangeDeletedCount = 0;
    int maxAgeDeletedCount = 0;
    int maxSizeDeletedCount = 0;

    try {
        /*
         * ALF-15383 (DH 15/08/2012)
         * Previously, we allowed maxFeedSize entries per user per site per format.
         * This scaled badly because some users (especially under test conditions)
         * were able to perform actions across many thousands of sites.  If the size
         * limit was 100 and the user belonged to 50K sites, we allowed 5M feed entries
         * for that user.  This may have been OK but for the fact that the queries
         * doing the work are not covered by appropriate indexes to support the where
         * and sort by clauses.
         * In fact, give the current state of indexes, it is necessary to limit the absolute
         * number of feed entries.  We can't use count() queries (they are poor) and cannot
         * reasonably sort by date and trim by count.  Therefore I have introduced an
         * absolute ID range trim that runs before everything else.
         */

        if (maxIdRange > 0 && keepGoing.get()) {
            maxIdRangeDeletedCount = feedDAO.deleteFeedEntries(maxIdRange);
            if (logger.isTraceEnabled()) {
                logger.trace("Cleaned " + maxIdRangeDeletedCount + " entries to keep ID range of " + maxIdRange
                        + ".");
            }
        }

        if (maxAgeMins > 0 && keepGoing.get()) {
            // clean old entries based on maxAgeMins

            long nowTimeOffset = new Date().getTime();
            long keepTimeOffset = nowTimeOffset - ((long) maxAgeMins * 60000L); // millsecs = mins * 60 secs * 1000 msecs
            Date keepDate = new Date(keepTimeOffset);

            maxAgeDeletedCount = feedDAO.deleteFeedEntries(keepDate);
            if (logger.isTraceEnabled()) {
                logger.trace("Cleaned " + maxAgeDeletedCount + " entries (upto " + keepDate + ", max age "
                        + maxAgeMins + " mins)");
            }
        }

        // TODO:    ALF-15511
        if (maxFeedSize > 0 && keepGoing.get()) {
            // Get user+format feeds exceeding the required maximum
            if (logger.isTraceEnabled()) {
                logger.trace("Selecting user+format feeds exceeding the required maximum of " + maxFeedSize
                        + " entries.");
            }
            List<ActivityFeedEntity> userFeedsTooMany = feedDAO.selectUserFeedsToClean(maxFeedSize);
            for (ActivityFeedEntity userFeedTooMany : userFeedsTooMany) {
                if (!keepGoing.get()) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Stopping cleaning the feeds.");
                    }
                    break;
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Found user activity feed entity: " + userFeedTooMany.toString());
                }
                String feedUserId = userFeedTooMany.getFeedUserId();
                // Rather than filter out the two usernames that indicate site-specific
                // feed entries, we can just filter them out now.
                if (ActivitiesDAO.KEY_ACTIVITY_NULL_VALUE.equals(feedUserId)) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Found site-specific feed entries, filtering.");
                    }
                    continue;
                }
                // Get the feeds to keep
                if (logger.isTraceEnabled()) {
                    logger.trace("Get the feeds to keep for user for all sites, not exluding users.");
                }
                List<ActivityFeedEntity> feedsToKeep = feedDAO.selectUserFeedEntries(feedUserId, null, false,
                        false, -1L, maxFeedSize);
                if (logger.isTraceEnabled()) {
                    for (ActivityFeedEntity feedToKeep : feedsToKeep) {
                        logger.trace("Found user activity feed entity to keep: " + feedToKeep.toString());
                    }
                }
                // If the feeds have been removed, then ignore
                if (feedsToKeep.size() < maxFeedSize) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Found less then " + maxFeedSize + " .The feeds were removed, ignoring.");
                    }
                    continue;
                }
                // Get the last one
                Date oldestFeedEntry = feedsToKeep.get(maxFeedSize - 1).getPostDate();

                if (logger.isTraceEnabled()) {
                    logger.trace("Deleting the oldest feed entry: " + oldestFeedEntry.toString());
                }
                int deletedCount = feedDAO.deleteUserFeedEntries(feedUserId, oldestFeedEntry);
                if (logger.isTraceEnabled()) {
                    logger.trace("Cleaned " + deletedCount + " entries for user '" + feedUserId + "'.");
                }
                maxSizeDeletedCount += deletedCount;
            }

            // Get site+format feeds exceeding the required maximum
            if (logger.isTraceEnabled()) {
                logger.trace("Selecting site+format feeds exceeding the required maximum of " + maxFeedSize
                        + " entries.");
            }
            List<ActivityFeedEntity> siteFeedsTooMany = feedDAO.selectSiteFeedsToClean(maxFeedSize);
            for (ActivityFeedEntity siteFeedTooMany : siteFeedsTooMany) {
                if (!keepGoing.get()) {
                    if (logger.isTraceEnabled()) {
                        logger.trace("Stopping cleaning the feeds.");
                    }
                    break;
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Found site activity feed entity: " + siteFeedTooMany.toString());
                }
                String siteId = siteFeedTooMany.getSiteNetwork();
                // Get the feeds to keep
                if (logger.isTraceEnabled()) {
                    logger.trace("Get the feeds to keep for site.");
                }
                List<ActivityFeedEntity> feedsToKeep = feedDAO.selectSiteFeedEntries(siteId, maxFeedSize);
                if (logger.isTraceEnabled()) {
                    for (ActivityFeedEntity feedToKeep : feedsToKeep) {
                        logger.trace("Found site activity feed entity to keep: " + feedToKeep.toString());
                    }
                }
                // If the feeds have been removed, then ignore
                if (feedsToKeep.size() < maxFeedSize) {
                    continue;
                }
                // Get the last one
                Date oldestFeedEntry = feedsToKeep.get(maxFeedSize - 1).getPostDate();
                if (logger.isTraceEnabled()) {
                    logger.trace("Deleting the oldest feed entry: " + oldestFeedEntry.toString());
                }
                int deletedCount = feedDAO.deleteSiteFeedEntries(siteId, oldestFeedEntry);
                if (logger.isTraceEnabled()) {
                    logger.trace("Cleaned " + deletedCount + " entries for site '" + siteId + "'.");
                }
                maxSizeDeletedCount += deletedCount;
            }
        }
    } catch (SQLException e) {
        logger.error("Exception during cleanup of feeds", e);
        throw new JobExecutionException(e);
    } catch (Throwable e) {
        // We were told to stop, which is also what will happen if the VM shuts down
        if (!keepGoing.get()) {
            // Ignore
        } else {
            logger.error("Exception during cleanup of feeds", e);
        }
    }

    return (maxIdRangeDeletedCount + maxAgeDeletedCount + maxSizeDeletedCount);
}

From source file:fr.mby.saml2.sp.opensaml.core.OpenSaml20SpProcessorTest.java

@Test
public void testTryAuthenticationPropagation() throws Exception {

    final IIncomingSaml incomingSaml = Mockito.mock(IIncomingSaml.class);
    final QueryAuthnResponse queryAuthnResponse = Mockito.mock(QueryAuthnResponse.class);
    final List<IAuthentication> authns = new ArrayList<IAuthentication>();
    final BasicSamlAuthentication basicAuth = new BasicSamlAuthentication();
    basicAuth.addAttribute(AUTH_ATTR_KEY, AUTH_ATTR_VALUES);
    authns.add(basicAuth);//ww  w. j  a va  2s  . c  o m

    Mockito.when(incomingSaml.getSamlQuery()).thenReturn(queryAuthnResponse);
    Mockito.when(queryAuthnResponse.getSamlAuthentications()).thenReturn(authns);

    final AtomicBoolean authPropagated = new AtomicBoolean(false);

    this.spProcessor.setAuthenticationHandler(new IAuthenticationHandler() {

        @Override
        public void propagateAuthentications(List<IAuthentication> authentications) {
            Assert.assertNotNull("No authentications propagated !", authentications);
            Assert.assertEquals("Bad authentications list size !", authns.size(), authentications.size());

            final IAuthentication authn = authentications.iterator().next();
            Assert.assertNotNull("Null authentication attributes list !", authn.getAttributes());
            Assert.assertEquals("Bad authentication attributes list size !", basicAuth.getAttributes().size(),
                    authn.getAttributes().size());

            final List<String> values = authn.getAttribute(AUTH_ATTR_KEY);
            Assert.assertNotNull("No attribute values found in propagated authentications !", values);
            Assert.assertEquals("Bad values list size !", AUTH_ATTR_VALUES.size(), values.size());

            final Iterator<String> valuesIt = values.iterator();
            Assert.assertEquals("Bad first propagated authentication attibutes !", AUTH_ATTR_VALUE_1,
                    valuesIt.next());
            Assert.assertEquals("Bad second propagated authentication attribute value !", AUTH_ATTR_VALUE_2,
                    valuesIt.next());

            authPropagated.set(true);
        }
    });

    this.spProcessor.tryAuthenticationPropagation(incomingSaml);

    Assert.assertTrue("Authentication wasn't propagated !", authPropagated.get());
}