Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.nifi.processors.standard.SplitXml.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile original = session.get();
    if (original == null) {
        return;/*from   w ww.j a v  a 2 s. co  m*/
    }

    final int depth = context.getProperty(SPLIT_DEPTH).asInteger();
    final ComponentLog logger = getLogger();

    final List<FlowFile> splits = new ArrayList<>();
    final String fragmentIdentifier = UUID.randomUUID().toString();
    final AtomicInteger numberOfRecords = new AtomicInteger(0);
    final XmlSplitterSaxParser parser = new XmlSplitterSaxParser(xmlTree -> {
        FlowFile split = session.create(original);
        split = session.write(split, out -> out.write(xmlTree.getBytes("UTF-8")));
        split = session.putAttribute(split, FRAGMENT_ID.key(), fragmentIdentifier);
        split = session.putAttribute(split, FRAGMENT_INDEX.key(),
                Integer.toString(numberOfRecords.getAndIncrement()));
        split = session.putAttribute(split, SEGMENT_ORIGINAL_FILENAME.key(),
                split.getAttribute(CoreAttributes.FILENAME.key()));
        splits.add(split);
    }, depth);

    final AtomicBoolean failed = new AtomicBoolean(false);
    session.read(original, rawIn -> {
        try (final InputStream in = new BufferedInputStream(rawIn)) {
            SAXParser saxParser = null;
            try {
                saxParser = saxParserFactory.newSAXParser();
                final XMLReader reader = saxParser.getXMLReader();
                reader.setContentHandler(parser);
                reader.parse(new InputSource(in));
            } catch (final ParserConfigurationException | SAXException e) {
                logger.error("Unable to parse {} due to {}", new Object[] { original, e });
                failed.set(true);
            }
        }
    });

    if (failed.get()) {
        session.transfer(original, REL_FAILURE);
        session.remove(splits);
    } else {
        splits.forEach((split) -> {
            split = session.putAttribute(split, FRAGMENT_COUNT.key(), Integer.toString(numberOfRecords.get()));
            session.transfer(split, REL_SPLIT);
        });

        final FlowFile originalToTransfer = copyAttributesToOriginal(session, original, fragmentIdentifier,
                numberOfRecords.get());
        session.transfer(originalToTransfer, REL_ORIGINAL);
        logger.info("Split {} into {} FlowFiles", new Object[] { originalToTransfer, splits.size() });
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldInterruptWhile() throws Exception {
    final Map<String, List<Object>> compilerCustomizerConfig = new HashMap<>();
    compilerCustomizerConfig.put(ThreadInterruptCustomizerProvider.class.getName(), new ArrayList<>());

    final Map<String, Object> config = new HashMap<>();
    config.put("compilerCustomizerProviders", compilerCustomizerConfig);

    final GremlinExecutor gremlinExecutor = GremlinExecutor.build()
            .addEngineSettings("gremlin-groovy", Collections.emptyList(), Collections.emptyList(),
                    Arrays.asList(PATHS.get("GremlinExecutorInit.groovy")), config)
            .create();// w w w . j  ava  2s.c  o  m
    final AtomicBoolean asserted = new AtomicBoolean(false);

    final Thread t = new Thread(() -> {
        try {
            gremlinExecutor
                    .eval("s = System.currentTimeMillis();\nwhile((System.currentTimeMillis() - s) < 10000) {}")
                    .get();
        } catch (Exception se) {
            asserted.set(se instanceof InterruptedException);
        }
    });

    t.start();
    Thread.sleep(100);
    t.interrupt();
    while (t.isAlive()) {
    }

    assertTrue(asserted.get());
}

From source file:org.apache.hadoop.hbase.master.cleaner.TestLogsCleaner.java

@Test
public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception {
    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();

    List<FileStatus> dummyFiles = Arrays.asList(
            new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
            new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2")));

    FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
    final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false);

    try {//from   ww w .  j  av  a 2s.  c  o m
        faultyZK.init();
        ReplicationQueueStorage queueStorage = spy(
                ReplicationStorageFactory.getReplicationQueueStorage(faultyZK, conf));
        doAnswer(new Answer<Object>() {
            @Override
            public Object answer(InvocationOnMock invocation) throws Throwable {
                try {
                    return invocation.callRealMethod();
                } catch (ReplicationException e) {
                    LOG.debug("Caught Exception", e);
                    getListOfReplicatorsFailed.set(true);
                    throw e;
                }
            }
        }).when(queueStorage).getAllWALs();

        cleaner.setConf(conf, faultyZK, queueStorage);
        // should keep all files due to a ConnectionLossException getting the queues znodes
        cleaner.preClean();
        Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);

        assertTrue(getListOfReplicatorsFailed.get());
        assertFalse(toDelete.iterator().hasNext());
        assertFalse(cleaner.isStopped());
    } finally {
        faultyZK.close();
    }
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumberSchoolTest.java

@Test(timeout = 60000)
public void testPersistFails() throws Exception {
    final AtomicBoolean committed = new AtomicBoolean(false);
    plumber.getSinks().put(0L, new Sink(new Interval(0, TimeUnit.HOURS.toMillis(1)), schema, tuningConfig,
            new DateTime("2014-12-01T12:34:56.789").toString()));
    plumber.startJob();// www.j  a  v a2 s .c  o  m
    final InputRow row = EasyMock.createNiceMock(InputRow.class);
    EasyMock.expect(row.getTimestampFromEpoch()).andReturn(0L);
    EasyMock.expect(row.getDimensions()).andReturn(new ArrayList<String>());
    EasyMock.replay(row);
    plumber.add(row, Committers.supplierOf(Committers.nil()));
    plumber.persist(Committers.supplierFromRunnable(new Runnable() {
        @Override
        public void run() {
            committed.set(true);
            throw new RuntimeException();
        }
    }).get());
    while (!committed.get()) {
        Thread.sleep(100);
    }

    // Exception may need time to propagate
    while (metrics.failedPersists() < 1) {
        Thread.sleep(100);
    }

    Assert.assertEquals(1, metrics.failedPersists());
}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testBasicClusterReconnect() throws Exception {
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {

            Options opts = new Options.Builder(Nats.defaultOptions()).dontRandomize().build();

            final AtomicBoolean dcbCalled = new AtomicBoolean(false);
            final CountDownLatch dcLatch = new CountDownLatch(1);
            opts.disconnectedCb = new DisconnectedCallback() {
                public void onDisconnect(ConnectionEvent event) {
                    // Suppress any additional calls
                    if (dcbCalled.get()) {
                        return;
                    }/*from  w  w w .  ja  v a 2s  . c o m*/
                    dcbCalled.set(true);
                    dcLatch.countDown();
                }
            };

            final CountDownLatch rcLatch = new CountDownLatch(1);
            opts.reconnectedCb = new ReconnectedCallback() {
                public void onReconnect(ConnectionEvent event) {
                    logger.info("rcb called");
                    rcLatch.countDown();
                }
            };

            try (Connection c = Nats.connect(servers, opts)) {
                assertNotNull(c.getConnectedUrl());

                s1.shutdown();

                // wait for disconnect
                assertTrue("Did not receive a disconnect callback message",
                        await(dcLatch, 2, TimeUnit.SECONDS));

                long reconnectTimeStart = System.nanoTime();

                assertTrue("Did not receive a reconnect callback message: ",
                        await(rcLatch, 2, TimeUnit.SECONDS));

                assertTrue(c.getConnectedUrl().equals(testServers[2]));

                // Make sure we did not wait on reconnect for default time.
                // Reconnect should be fast since it will be a switch to the
                // second server and not be dependent on server restart time.
                // assertTrue(reconElapsed.get() <= cf.getReconnectWait());

                long maxDuration = 100;
                long reconnectTime = System.nanoTime() - reconnectTimeStart;
                assertFalse(
                        String.format("Took longer than expected to reconnect: %dms\n",
                                TimeUnit.NANOSECONDS.toMillis(reconnectTime)),
                        TimeUnit.NANOSECONDS.toMillis(reconnectTime) > maxDuration);
            }
        }
    }
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.enable() method
 * which must only trigger @OnEnabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode.//from  w  w w  .  j a  va 2  s .co m
 */
@Test
public void validateServiceEnablementLogicHappensOnlyOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    assertFalse(serviceNode.isActive());
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.enableControllerService(serviceNode);
                    assertTrue(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.enableInvocationCount());
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable(..) method
 * which must never trigger @OnDisabled, regardless of how many threads may
 * have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode.//from  w  w  w  . j a va  2 s  .  c o  m
 */
@Test
public void validateDisabledServiceCantBeDisabled() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(0, ts.disableInvocationCount());
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable() method
 * which must only trigger @OnDisabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*w ww .  ja va  2 s .  c  o  m*/
 */
@Test
public void validateEnabledServiceCanOnlyBeDisabledOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    scheduler.enableControllerService(serviceNode);
    assertTrue(serviceNode.isActive());
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.disableInvocationCount());
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scan() {
    WebTarget t = extractWebTarget(dockerScanner.getDockerClient());
    logger.info("Scanning {}", t);
    JsonNode response = t.path("/info").request().buildGet().invoke(JsonNode.class);

    JsonNode swarm = response.path("Swarm");
    JsonNode cluster = swarm.path("Cluster");
    String swarmClusterId = cluster.path("ID").asText();

    // need to parse these dates
    String createdAt = cluster.path("CreatedAt").asText();
    String updatedAt = cluster.path("UpdatedAt").asText();
    ObjectNode props = mapper.createObjectNode();
    props.put("swarmClusterId", swarmClusterId);
    props.put("createdAt", createdAt);
    props.put("updatedAt", updatedAt);

    JsonNode swarmNode = dockerScanner.getNeoRxClient().execCypher(
            "merge (c:DockerSwarm {swarmClusterId:{id}}) set c+={props},c.updateTs=timestamp() return c", "id",
            swarmClusterId, "props", props).blockingFirst(MissingNode.getInstance());

    if (isUnixDomainScoket(t.getUri().toString())) {
        // Only set managerApiUrl to a unix domain socket if it has not
        // already been set.
        // This is useful for trident
        if (!isUnixDomainScoket(swarmNode.path("managerApiUrl").asText())) {

            String LOCAL_DOCKER_DAEMON_SOCKET_URL = "unix:///var/run/docker.sock";
            logger.info("setting mangerApiUrl to {} for swarm {}", LOCAL_DOCKER_DAEMON_SOCKET_URL,
                    swarmClusterId);/*from  ww w  .jav  a  2 s . c o m*/

            String name = "local";
            dockerScanner.getNeoRxClient()
                    .execCypher("match (c:DockerSwarm {name:{name}}) return c", "name", name).forEach(it -> {
                        String oldSwarmClusterId = it.path("swarmClusterId").asText();
                        if (!swarmClusterId.equals(oldSwarmClusterId)) {
                            dockerScanner.getNeoRxClient().execCypher(
                                    "match (c:DockerSwarm {swarmClusterId:{swarmClusterId}}) detach delete c",
                                    "swarmClusterId", oldSwarmClusterId);
                        }
                    });

            dockerScanner.getNeoRxClient().execCypher(
                    "match (c:DockerSwarm {swarmClusterId:{id}}) set c.managerApiUrl={managerApiUrl},c.name={name},c.tridentClusterId={name} return c",
                    "id", swarmClusterId, "managerApiUrl", LOCAL_DOCKER_DAEMON_SOCKET_URL, "name", name);

        }
    }

    AtomicBoolean fail = new AtomicBoolean(false);
    response = t.path("/nodes").request().buildGet().invoke(JsonNode.class);
    AtomicLong earliestTimestamp = new AtomicLong(Long.MAX_VALUE);
    response.elements().forEachRemaining(it -> {
        try {
            earliestTimestamp.set(
                    Math.min(earliestTimestamp.get(), saveDockerNode(swarmClusterId, flattenSwarmNode(it))));
        } catch (RuntimeException e) {
            logger.warn("problem", e);
            fail.set(true);
        }
    });

    if (!fail.get()) {
        if (earliestTimestamp.get() < System.currentTimeMillis()) {
            logger.info("deleting DockerHost nodes before with updateTs<{}", earliestTimestamp.get());
            dockerScanner.getNeoRxClient().execCypher(
                    "match (s:DockerSwarm {swarmClusterId:{id}})--(x:DockerHost) where s.updateTs>x.updateTs detach delete x",
                    "id", swarmClusterId);
        }
    }
    scanServicesForSwarm(swarmClusterId);
    scanTasksForSwarm(swarmClusterId);
}

From source file:ch.cyberduck.core.b2.B2LargeUploadServiceTest.java

@Test
public void testAppendNoPartCompleted() throws Exception {
    final B2Session session = new B2Session(new Host(new B2Protocol(), new B2Protocol().getDefaultHostname(),
            new Credentials(System.getProperties().getProperty("b2.user"),
                    System.getProperties().getProperty("b2.key"))));
    session.open(new DisabledHostKeyCallback());
    session.login(new DisabledPasswordStore(), new DisabledLoginCallback(), new DisabledCancelCallback());
    final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
    final Path test = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
    final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
    final int length = 102 * 1024 * 1024;
    final byte[] content = RandomUtils.nextBytes(length);
    IOUtils.write(content, local.getOutputStream(false));
    final TransferStatus status = new TransferStatus();
    status.setLength(content.length);//from  www.j  a va 2 s  .  c  o  m
    final AtomicBoolean interrupt = new AtomicBoolean();
    final B2LargeUploadService service = new B2LargeUploadService(session, new B2WriteFeature(session),
            100 * 1024L * 1024L, 1);
    try {
        service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
                new DisabledStreamListener() {
                    long count;

                    @Override
                    public void sent(final long bytes) {
                        count += bytes;
                        if (count >= 5 * 1024L * 1024L) {
                            throw new RuntimeException();
                        }
                    }
                }, status, new DisabledLoginCallback());
    } catch (BackgroundException e) {
        // Expected
        interrupt.set(true);
    }
    assertTrue(interrupt.get());
    assertEquals(0L, status.getOffset(), 0L);
    assertFalse(status.isComplete());

    final TransferStatus append = new TransferStatus().append(true).length(content.length);
    service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
            new DisabledStreamListener(), append, new DisabledLoginCallback());
    assertTrue(new B2FindFeature(session).find(test));
    assertEquals(content.length, new B2AttributesFinderFeature(session).find(test).getSize());
    assertEquals(content.length, append.getOffset(), 0L);
    assertTrue(append.isComplete());
    final byte[] buffer = new byte[content.length];
    final InputStream in = new B2ReadFeature(session).read(test, new TransferStatus(),
            new DisabledConnectionCallback());
    IOUtils.readFully(in, buffer);
    in.close();
    assertArrayEquals(content, buffer);
    new B2DeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(),
            new Delete.DisabledCallback());
    local.delete();
    session.close();
}