List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:org.apache.nifi.processors.standard.SplitXml.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final FlowFile original = session.get(); if (original == null) { return;/* w w w .j av a 2 s. c om*/ } final int depth = context.getProperty(SPLIT_DEPTH).asInteger(); final ComponentLog logger = getLogger(); final List<FlowFile> splits = new ArrayList<>(); final String fragmentIdentifier = UUID.randomUUID().toString(); final AtomicInteger numberOfRecords = new AtomicInteger(0); final XmlSplitterSaxParser parser = new XmlSplitterSaxParser(xmlTree -> { FlowFile split = session.create(original); split = session.write(split, out -> out.write(xmlTree.getBytes("UTF-8"))); split = session.putAttribute(split, FRAGMENT_ID.key(), fragmentIdentifier); split = session.putAttribute(split, FRAGMENT_INDEX.key(), Integer.toString(numberOfRecords.getAndIncrement())); split = session.putAttribute(split, SEGMENT_ORIGINAL_FILENAME.key(), split.getAttribute(CoreAttributes.FILENAME.key())); splits.add(split); }, depth); final AtomicBoolean failed = new AtomicBoolean(false); session.read(original, rawIn -> { try (final InputStream in = new BufferedInputStream(rawIn)) { SAXParser saxParser = null; try { saxParser = saxParserFactory.newSAXParser(); final XMLReader reader = saxParser.getXMLReader(); reader.setContentHandler(parser); reader.parse(new InputSource(in)); } catch (final ParserConfigurationException | SAXException e) { logger.error("Unable to parse {} due to {}", new Object[] { original, e }); failed.set(true); } } }); if (failed.get()) { session.transfer(original, REL_FAILURE); session.remove(splits); } else { splits.forEach((split) -> { split = session.putAttribute(split, FRAGMENT_COUNT.key(), Integer.toString(numberOfRecords.get())); session.transfer(split, REL_SPLIT); }); final FlowFile originalToTransfer = copyAttributesToOriginal(session, original, fragmentIdentifier, numberOfRecords.get()); session.transfer(originalToTransfer, REL_ORIGINAL); logger.info("Split {} into {} FlowFiles", new Object[] { originalToTransfer, splits.size() }); } }
From source file:org.apache.nifi.processors.standard.ValidateJson.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get();/* ww w. ja va2s .c om*/ if (flowFile == null) { return; } final Schema schema = schemaRef.get(); final ComponentLog logger = getLogger(); final AtomicBoolean valid = new AtomicBoolean(true); session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) { try { String str = IOUtils.toString(in, StandardCharsets.UTF_8); if (str.startsWith("[")) { schema.validate(new JSONArray(str)); // throws a ValidationException if this object is invalid } else { schema.validate(new JSONObject(str)); // throws a ValidationException if this object is invalid } } catch (final IllegalArgumentException | ValidationException | IOException e) { valid.set(false); logger.debug("Failed to validate {} against schema due to {}", new Object[] { flowFile, e }); } } }); if (valid.get()) { logger.debug("Successfully validated {} against schema; routing to 'valid'", new Object[] { flowFile }); session.getProvenanceReporter().route(flowFile, REL_VALID); session.transfer(flowFile, REL_VALID); } else { logger.debug("Failed to validate {} against schema; routing to 'invalid'", new Object[] { flowFile }); session.getProvenanceReporter().route(flowFile, REL_INVALID); session.transfer(flowFile, REL_INVALID); } }
From source file:org.apache.hadoop.hdfs.TestAutoEditRollWhenAvatarFailover.java
/** * Test if we can get block locations after killing primary avatar, * failing over to standby avatar (making it the new primary), * restarting a new standby avatar, killing the new primary avatar and * failing over to the restarted standby. * /* w w w. j av a 2s .c om*/ * Write logs for a while to make sure automatic rolling are triggered. */ @Test public void testDoubleFailOverWithAutomaticRoll() throws Exception { setUp(false, "testDoubleFailOverWithAutomaticRoll"); // To make sure it's never the case that both primary and standby // issue rolling, we use a injection handler. final AtomicBoolean startKeepThread = new AtomicBoolean(true); final AtomicInteger countAutoRolled = new AtomicInteger(0); final AtomicBoolean needFail = new AtomicBoolean(false); final AtomicLong currentThreadId = new AtomicLong(-1); final Object waitFor10Rolls = new Object(); InjectionHandler.set(new InjectionHandler() { @Override protected void _processEvent(InjectionEventI event, Object... args) { if (event == InjectionEvent.FSEDIT_AFTER_AUTOMATIC_ROLL) { countAutoRolled.incrementAndGet(); if (countAutoRolled.get() >= 10) { synchronized (waitFor10Rolls) { waitFor10Rolls.notifyAll(); } } if (!startKeepThread.get()) { currentThreadId.set(-1); } else if (currentThreadId.get() == -1) { currentThreadId.set(Thread.currentThread().getId()); } else if (currentThreadId.get() != Thread.currentThread().getId()) { LOG.warn("[Thread " + Thread.currentThread().getId() + "] expected: " + currentThreadId); needFail.set(true); } LOG.info("[Thread " + Thread.currentThread().getId() + "] finish automatic log rolling, count " + countAutoRolled.get()); // Increase the rolling time a little bit once after 7 auto rolls if (countAutoRolled.get() % 7 == 3) { DFSTestUtil.waitNMilliSecond(75); } } } }); FileSystem fs = cluster.getFileSystem(); // Add some transactions during a period of time before failing over. long startTime = System.currentTimeMillis(); for (int i = 0; i < 100; i++) { fs.setTimes(new Path("/"), 0, 0); DFSTestUtil.waitNMilliSecond(100); if (i % 10 == 0) { LOG.info("================== executed " + i + " queries"); } if (countAutoRolled.get() >= 10) { LOG.info("Automatic rolled 10 times."); long duration = System.currentTimeMillis() - startTime; TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short", duration > 4500); break; } } TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.", countAutoRolled.get() >= 10); // Tune the rolling timeout temporarily to avoid race conditions // only triggered in tests cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000); cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000); LOG.info("================== killing primary 1"); cluster.killPrimary(); // Fail over and make sure after fail over, automatic edits roll still // will happen. countAutoRolled.set(0); startKeepThread.set(false); currentThreadId.set(-1); LOG.info("================== failing over 1"); cluster.failOver(); cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000); LOG.info("================== restarting standby"); cluster.restartStandby(); cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000); LOG.info("================== Finish restarting standby"); // Wait for automatic rolling happens if there is no new transaction. startKeepThread.set(true); startTime = System.currentTimeMillis(); long waitDeadLine = startTime + 20000; synchronized (waitFor10Rolls) { while (System.currentTimeMillis() < waitDeadLine && countAutoRolled.get() < 10) { waitFor10Rolls.wait(waitDeadLine - System.currentTimeMillis()); } } TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.", countAutoRolled.get() >= 10); long duration = System.currentTimeMillis() - startTime; TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs", duration > 9000); // failover back countAutoRolled.set(0); startKeepThread.set(false); currentThreadId.set(-1); cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000); cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000); LOG.info("================== killing primary 2"); cluster.killPrimary(); LOG.info("================== failing over 2"); cluster.failOver(); cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000); // Make sure after failover back, automatic rolling can still happen. startKeepThread.set(true); for (int i = 0; i < 100; i++) { fs.setTimes(new Path("/"), 0, 0); DFSTestUtil.waitNMilliSecond(200); if (i % 10 == 0) { LOG.info("================== executed " + i + " queries"); } if (countAutoRolled.get() > 10) { LOG.info("Automatic rolled 10 times."); duration = System.currentTimeMillis() - startTime; TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short", duration > 9000); break; } } TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.", countAutoRolled.get() >= 10); InjectionHandler.clear(); if (needFail.get()) { TestCase.fail("Automatic rolling doesn't happen in the same thread when should."); } }
From source file:net.dempsy.container.TestContainer.java
@Test public void testEvictCollisionWithBlocking() throws Throwable { final TestProcessor mp = createAndGet("foo"); // now we're going to cause the passivate to be held up. mp.blockPassivate = new CountDownLatch(1); mp.evict.set(true); // allow eviction // now kick off the evict in a separate thread since we expect it to hang // until the mp becomes unstuck. final AtomicBoolean evictIsComplete = new AtomicBoolean(false); // this will allow us to see the evict pass complete final Thread thread = new Thread(new Runnable() { @Override//from w w w.j ava 2s . c om public void run() { container.evict(); evictIsComplete.set(true); } }); thread.start(); Thread.sleep(500); // let it get going. assertFalse(evictIsComplete.get()); // check to see we're hung. final ClusterMetricGetters sc = (ClusterMetricGetters) statsCollector; assertEquals(0, sc.getMessageCollisionCount()); // sending it a message will now cause it to have the collision tick up final TestAdaptor adaptor = context.getBean(TestAdaptor.class); adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo")); // give it some time. Thread.sleep(100); // make sure there's no collision assertEquals(0, sc.getMessageCollisionCount()); // make sure no message got handled assertEquals(1, mp.invocationCount); // 1 is the initial invocation that caused the instantiation. // now let the evict finish mp.blockPassivate.countDown(); // wait until the eviction completes assertTrue(poll(evictIsComplete, o -> o.get())); // Once the poll finishes a new Mp is instantiated and handling messages. assertTrue(poll(cache, c -> c.get("foo") != null)); final TestProcessor mp2 = cache.get("foo"); assertNotNull("MP not associated with expected key", mp); // invocationCount should be 1 from the initial invocation that caused the clone, and no more assertEquals(1, mp.invocationCount); assertEquals(1, mp2.invocationCount); assertTrue(mp != mp2); // send a message that should go through adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo")); assertTrue(poll(o -> mp2.invocationCount > 1)); Thread.sleep(100); assertEquals(1, mp.invocationCount); assertEquals(2, mp2.invocationCount); }
From source file:com.datatorrent.stram.engine.GenericNodeTest.java
@Test @SuppressWarnings("SleepWhileInLoop") public void testSynchingLogic() throws InterruptedException { long sleeptime = 25L; final ArrayList<Object> list = new ArrayList<Object>(); GenericOperator go = new GenericOperator(); final GenericNode gn = new GenericNode(go, new com.datatorrent.stram.engine.OperatorContext(0, new DefaultAttributeMap(), null)); gn.setId(1);//from w ww.j a v a 2 s.c om AbstractReservoir reservoir1 = AbstractReservoir.newReservoir("ip1Res", 1024); AbstractReservoir reservoir2 = AbstractReservoir.newReservoir("ip2Res", 1024); Sink<Object> output = new Sink<Object>() { @Override public void put(Object tuple) { list.add(tuple); } @Override public int getCount(boolean reset) { return 0; } }; gn.connectInputPort("ip1", reservoir1); gn.connectInputPort("ip2", reservoir2); gn.connectOutputPort("op", output); gn.firstWindowMillis = 0; gn.windowWidthMillis = 100; final AtomicBoolean ab = new AtomicBoolean(false); Thread t = new Thread() { @Override public void run() { ab.set(true); gn.activate(); gn.run(); gn.deactivate(); } }; t.start(); do { Thread.sleep(sleeptime); } while (ab.get() == false); Tuple beginWindow1 = new Tuple(MessageType.BEGIN_WINDOW, 0x1L); reservoir1.add(beginWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); reservoir2.add(beginWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); Tuple endWindow1 = new EndWindowTuple(0x1L); reservoir1.add(endWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); Tuple beginWindow2 = new Tuple(MessageType.BEGIN_WINDOW, 0x2L); reservoir1.add(beginWindow2); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); reservoir2.add(endWindow1); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); reservoir2.add(beginWindow2); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); Tuple endWindow2 = new EndWindowTuple(0x2L); reservoir2.add(endWindow2); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); reservoir1.add(endWindow2); Thread.sleep(sleeptime); Assert.assertEquals(4, list.size()); EndStreamTuple est = new EndStreamTuple(0L); reservoir1.add(est); Thread.sleep(sleeptime); Assert.assertEquals(4, list.size()); Tuple beginWindow3 = new Tuple(MessageType.BEGIN_WINDOW, 0x3L); reservoir2.add(beginWindow3); Thread.sleep(sleeptime); Assert.assertEquals(5, list.size()); Tuple endWindow3 = new EndWindowTuple(0x3L); reservoir2.add(endWindow3); Thread.sleep(sleeptime); Assert.assertEquals(6, list.size()); Assert.assertNotSame(Thread.State.TERMINATED, t.getState()); reservoir2.add(est); Thread.sleep(sleeptime); Assert.assertEquals(7, list.size()); Thread.sleep(sleeptime); Assert.assertEquals(Thread.State.TERMINATED, t.getState()); }
From source file:org.apache.hadoop.corona.TestMiniCoronaRunJob.java
private void runMultipleSleepJobs(final JobConf conf, final int maps, final int reduces, int numJobs) throws Exception { final CountDownLatch startSignal = new CountDownLatch(1); final CountDownLatch endSignal = new CountDownLatch(numJobs); final AtomicBoolean failed = new AtomicBoolean(false); for (int i = 0; i < numJobs; ++i) { Runnable action = new Runnable() { @Override/*from w w w . j av a 2 s . co m*/ public void run() { try { startSignal.await(); runSleepJob(conf, maps, reduces); LOG.info("Sleep Job finished"); endSignal.countDown(); } catch (Exception e) { LOG.error("Exception in running SleepJob", e); failed.set(true); endSignal.countDown(); } } }; new Thread(action).start(); } // Starting all jobs at the same time startSignal.countDown(); // Waiting for all jobs to finish endSignal.await(); if (failed.get()) { fail("Some of the Sleepjobs failed"); } }
From source file:ch.cyberduck.core.b2.B2LargeUploadServiceTest.java
@Test public void testAppendNoPartCompleted() throws Exception { final B2Session session = new B2Session(new Host(new B2Protocol(), new B2Protocol().getDefaultHostname(), new Credentials(System.getProperties().getProperty("b2.user"), System.getProperties().getProperty("b2.key")))); session.open(new DisabledHostKeyCallback()); session.login(new DisabledPasswordStore(), new DisabledLoginCallback(), new DisabledCancelCallback()); final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final int length = 102 * 1024 * 1024; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length);//from ww w .j ava 2 s . c om final AtomicBoolean interrupt = new AtomicBoolean(); final B2LargeUploadService service = new B2LargeUploadService(session, new B2WriteFeature(session), 100 * 1024L * 1024L, 1); try { service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener() { long count; @Override public void sent(final long bytes) { count += bytes; if (count >= 5 * 1024L * 1024L) { throw new RuntimeException(); } } }, status, new DisabledLoginCallback()); } catch (BackgroundException e) { // Expected interrupt.set(true); } assertTrue(interrupt.get()); assertEquals(0L, status.getOffset(), 0L); assertFalse(status.isComplete()); final TransferStatus append = new TransferStatus().append(true).length(content.length); service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), append, new DisabledLoginCallback()); assertTrue(new B2FindFeature(session).find(test)); assertEquals(content.length, new B2AttributesFinderFeature(session).find(test).getSize()); assertEquals(content.length, append.getOffset(), 0L); assertTrue(append.isComplete()); final byte[] buffer = new byte[content.length]; final InputStream in = new B2ReadFeature(session).read(test, new TransferStatus(), new DisabledConnectionCallback()); IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(content, buffer); new B2DeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); session.close(); }
From source file:org.lendingclub.mercator.docker.SwarmScanner.java
public void scan() { WebTarget t = extractWebTarget(dockerScanner.getDockerClient()); logger.info("Scanning {}", t); JsonNode response = t.path("/info").request().buildGet().invoke(JsonNode.class); JsonNode swarm = response.path("Swarm"); JsonNode cluster = swarm.path("Cluster"); String swarmClusterId = cluster.path("ID").asText(); // need to parse these dates String createdAt = cluster.path("CreatedAt").asText(); String updatedAt = cluster.path("UpdatedAt").asText(); ObjectNode props = mapper.createObjectNode(); props.put("swarmClusterId", swarmClusterId); props.put("createdAt", createdAt); props.put("updatedAt", updatedAt); JsonNode swarmNode = dockerScanner.getNeoRxClient().execCypher( "merge (c:DockerSwarm {swarmClusterId:{id}}) set c+={props},c.updateTs=timestamp() return c", "id", swarmClusterId, "props", props).blockingFirst(MissingNode.getInstance()); if (isUnixDomainScoket(t.getUri().toString())) { // Only set managerApiUrl to a unix domain socket if it has not // already been set. // This is useful for trident if (!isUnixDomainScoket(swarmNode.path("managerApiUrl").asText())) { String LOCAL_DOCKER_DAEMON_SOCKET_URL = "unix:///var/run/docker.sock"; logger.info("setting mangerApiUrl to {} for swarm {}", LOCAL_DOCKER_DAEMON_SOCKET_URL, swarmClusterId);/* ww w. j a va 2s . c o m*/ String name = "local"; dockerScanner.getNeoRxClient() .execCypher("match (c:DockerSwarm {name:{name}}) return c", "name", name).forEach(it -> { String oldSwarmClusterId = it.path("swarmClusterId").asText(); if (!swarmClusterId.equals(oldSwarmClusterId)) { dockerScanner.getNeoRxClient().execCypher( "match (c:DockerSwarm {swarmClusterId:{swarmClusterId}}) detach delete c", "swarmClusterId", oldSwarmClusterId); } }); dockerScanner.getNeoRxClient().execCypher( "match (c:DockerSwarm {swarmClusterId:{id}}) set c.managerApiUrl={managerApiUrl},c.name={name},c.tridentClusterId={name} return c", "id", swarmClusterId, "managerApiUrl", LOCAL_DOCKER_DAEMON_SOCKET_URL, "name", name); } } AtomicBoolean fail = new AtomicBoolean(false); response = t.path("/nodes").request().buildGet().invoke(JsonNode.class); AtomicLong earliestTimestamp = new AtomicLong(Long.MAX_VALUE); response.elements().forEachRemaining(it -> { try { earliestTimestamp.set( Math.min(earliestTimestamp.get(), saveDockerNode(swarmClusterId, flattenSwarmNode(it)))); } catch (RuntimeException e) { logger.warn("problem", e); fail.set(true); } }); if (!fail.get()) { if (earliestTimestamp.get() < System.currentTimeMillis()) { logger.info("deleting DockerHost nodes before with updateTs<{}", earliestTimestamp.get()); dockerScanner.getNeoRxClient().execCypher( "match (s:DockerSwarm {swarmClusterId:{id}})--(x:DockerHost) where s.updateTs>x.updateTs detach delete x", "id", swarmClusterId); } } scanServicesForSwarm(swarmClusterId); scanTasksForSwarm(swarmClusterId); }
From source file:com.jayway.restassured.path.json.JsonPathObjectDeserializationTest.java
@Test public void json_path_supports_custom_deserializer_with_static_configuration() { // Given/*from w w w . j av a 2s . c o m*/ final AtomicBoolean customDeserializerUsed = new AtomicBoolean(false); JsonPath.config = new JsonPathConfig().defaultObjectDeserializer(new JsonPathObjectDeserializer() { public <T> T deserialize(ObjectDeserializationContext ctx) { customDeserializerUsed.set(true); final String json = ctx.getDataToDeserialize().asString(); final Greeting greeting = new Greeting(); greeting.setFirstName(StringUtils.substringBetween(json, "\"firstName\":\"", "\"")); greeting.setLastName(StringUtils.substringBetween(json, "\"lastName\":\"", "\"")); return (T) greeting; } }); final JsonPath jsonPath = new JsonPath(GREETING); // When try { final Greeting greeting = jsonPath.getObject("", Greeting.class); // Then assertThat(greeting.getFirstName(), equalTo("John")); assertThat(greeting.getLastName(), equalTo("Doe")); assertThat(customDeserializerUsed.get(), is(true)); } finally { JsonPath.reset(); } }
From source file:com.navercorp.pinpoint.profiler.sender.UdpDataSenderTest.java
private boolean sendMessage_getLimit(TBase tbase, long waitTimeMillis) throws InterruptedException { final AtomicBoolean limitCounter = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); final MessageConverter<TBase<?, ?>> messageConverter = new BypassMessageConverter<TBase<?, ?>>(); final MessageSerializer<ByteMessage> thriftMessageSerializer = new ThriftUdpMessageSerializer( messageConverter, ThriftUdpMessageSerializer.UDP_MAX_PACKET_LENGTH) { @Override/*from w w w.j a v a2 s.com*/ protected boolean isLimit(int interBufferSize) { final boolean limit = super.isLimit(interBufferSize); limitCounter.set(limit); latch.countDown(); return limit; } }; UdpDataSender sender = new UdpDataSender("localhost", PORT, "test", 128, 1000, 1024 * 64 * 100, thriftMessageSerializer); try { sender.send(tbase); latch.await(waitTimeMillis, TimeUnit.MILLISECONDS); } finally { sender.stop(); } return limitCounter.get(); }