List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:org.apache.nifi.processors.standard.ValidateJson.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get();//from w w w . j a v a2 s . com if (flowFile == null) { return; } final Schema schema = schemaRef.get(); final ComponentLog logger = getLogger(); final AtomicBoolean valid = new AtomicBoolean(true); session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) { try { String str = IOUtils.toString(in, StandardCharsets.UTF_8); if (str.startsWith("[")) { schema.validate(new JSONArray(str)); // throws a ValidationException if this object is invalid } else { schema.validate(new JSONObject(str)); // throws a ValidationException if this object is invalid } } catch (final IllegalArgumentException | ValidationException | IOException e) { valid.set(false); logger.debug("Failed to validate {} against schema due to {}", new Object[] { flowFile, e }); } } }); if (valid.get()) { logger.debug("Successfully validated {} against schema; routing to 'valid'", new Object[] { flowFile }); session.getProvenanceReporter().route(flowFile, REL_VALID); session.transfer(flowFile, REL_VALID); } else { logger.debug("Failed to validate {} against schema; routing to 'invalid'", new Object[] { flowFile }); session.getProvenanceReporter().route(flowFile, REL_INVALID); session.transfer(flowFile, REL_INVALID); } }
From source file:org.eclipse.hono.deviceregistry.FileBasedCredentialsService.java
/** * Get the credentials associated with the authId and the given type. * If type is null, all credentials associated with the authId are returned (as JsonArray inside the return value). * * @param tenantId The id of the tenant the credentials belong to. * @param authId The authentication identifier to look up credentials for. * @param type The type of credentials to look up. * @return The credentials object of the given type or {@code null} if no matching credentials exist. *//* w ww . j a v a 2 s . c om*/ private JsonObject getSingleCredentials(final String tenantId, final String authId, final String type, final JsonObject clientContext) { Objects.requireNonNull(tenantId); Objects.requireNonNull(authId); Objects.requireNonNull(type); final Map<String, JsonArray> credentialsForTenant = credentials.get(tenantId); if (credentialsForTenant != null) { final JsonArray authIdCredentials = credentialsForTenant.get(authId); if (authIdCredentials != null) { for (final Object authIdCredentialEntry : authIdCredentials) { final JsonObject authIdCredential = (JsonObject) authIdCredentialEntry; // return the first matching type entry for this authId if (type.equals(authIdCredential.getString(CredentialsConstants.FIELD_TYPE))) { if (clientContext != null) { final AtomicBoolean match = new AtomicBoolean(true); clientContext.forEach(field -> { if (authIdCredential.containsKey(field.getKey())) { if (!authIdCredential.getString(field.getKey()).equals(field.getValue())) { match.set(false); } } else { match.set(false); } }); if (!match.get()) { continue; } } return authIdCredential; } } } } return null; }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java
@Test public void shouldRaiseExceptionInWithResultOfLifeCycle() throws Exception { final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create(); final GremlinExecutor.LifeCycle lc = GremlinExecutor.LifeCycle.build().withResult(r -> { throw new RuntimeException("no worky"); }).create();//from w w w . ja v a 2 s .c om final AtomicBoolean exceptionRaised = new AtomicBoolean(false); final CompletableFuture<Object> future = gremlinExecutor.eval("1+1", "gremlin-groovy", new SimpleBindings(), lc); future.handle((r, t) -> { exceptionRaised.set(t != null && t instanceof RuntimeException && t.getMessage().equals("no worky")); return null; }).get(); assertThat(exceptionRaised.get(), is(true)); gremlinExecutor.close(); }
From source file:org.apache.jackrabbit.oak.plugins.segment.CompactionAndCleanupIT.java
/** * Regression test for OAK-2192 testing for mixed segments. This test does not * cover OAK-3348. I.e. it does not assert the segment graph is free of cross * gc generation references./*from w ww . j a v a 2 s . c o m*/ */ @Test public void testMixedSegments() throws Exception { FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true) .build(); final SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build(); final AtomicBoolean compactionSuccess = new AtomicBoolean(true); CompactionStrategy strategy = new CompactionStrategy(true, false, CLEAN_NONE, 0, (byte) 5) { @Override public boolean compacted(Callable<Boolean> setHead) throws Exception { compactionSuccess.set(nodeStore.locked(setHead, 1, MINUTES)); return compactionSuccess.get(); } }; strategy.setForceAfterFail(true); store.setCompactionStrategy(strategy); NodeBuilder root = nodeStore.getRoot().builder(); createNodes(root.setChildNode("test"), 10, 3); nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); final Set<UUID> beforeSegments = new HashSet<UUID>(); collectSegments(store.getHead(), beforeSegments); final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true); final List<String> failedCommits = newArrayList(); Thread[] threads = new Thread[10]; for (int k = 0; k < threads.length; k++) { final int threadId = k; threads[k] = new Thread(new Runnable() { @Override public void run() { for (int j = 0; run.get(); j++) { String nodeName = "b-" + threadId + "," + j; try { NodeBuilder root = nodeStore.getRoot().builder(); root.setChildNode(nodeName); nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); Thread.sleep(5); } catch (CommitFailedException e) { failedCommits.add(nodeName); } catch (InterruptedException e) { Thread.interrupted(); break; } } } }); threads[k].start(); } store.compact(); run.set(false); for (Thread t : threads) { t.join(); } store.flush(); assumeTrue("Failed to acquire compaction lock", compactionSuccess.get()); assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty()); Set<UUID> afterSegments = new HashSet<UUID>(); collectSegments(store.getHead(), afterSegments); try { for (UUID u : beforeSegments) { assertFalse("Mixed segments found: " + u, afterSegments.contains(u)); } } finally { store.close(); } }
From source file:com.barchart.netty.server.http.TestHttpServer.java
@Test public void testShutdown() throws Exception { final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); final AtomicBoolean pass = new AtomicBoolean(false); final Thread t = new Thread(new Runnable() { @Override//from w w w. ja v a 2 s. c o m public void run() { try { Thread.sleep(1000); server.shutdown(); } catch (final InterruptedException e1) { e1.printStackTrace(); } try { client.execute(new HttpGet("http://localhost:" + port + "/basic")); } catch (final HttpHostConnectException hhce) { pass.set(true); } catch (final Exception e) { e.printStackTrace(); } } }); t.start(); final HttpGet get = new HttpGet("http://localhost:" + port + "/client-disconnect"); final HttpResponse response = client.execute(get); EntityUtils.consume(response.getEntity()); assertEquals(200, response.getStatusLine().getStatusCode()); t.join(); assertTrue(pass.get()); }
From source file:br.com.ingenieux.mojo.jbake.WatchMojo.java
public void executeInternal() throws MojoExecutionException { reRender();/*from w w w .j a va 2 s. c o m*/ Long lastProcessed = Long.valueOf(System.currentTimeMillis()); getLog().info("Now listening for changes on path " + inputDirectory.getPath()); initServer(); DirWatcher dirWatcher = null; try { dirWatcher = new DirWatcher(inputDirectory); final AtomicBoolean done = new AtomicBoolean(false); final BufferedReader reader = new BufferedReader(new InputStreamReader(System.in)); (new Thread() { @Override public void run() { try { getLog().info("Running. Enter a blank line to finish. Anything else forces re-rendering."); while (true) { String line = reader.readLine(); if (isBlank(line)) { break; } reRender(); } } catch (Exception exc) { getLog().info("Ooops", exc); } finally { done.set(true); } } }).start(); dirWatcher.start(); do { Long result = dirWatcher.processEvents(); if (null == result) { // do nothing on purpose } else if (result >= lastProcessed) { getLog().info("Refreshing"); super.reRender(); lastProcessed = Long.valueOf(System.currentTimeMillis()); } } while (!done.get()); } catch (Exception exc) { getLog().info("Oops", exc); throw new MojoExecutionException("Oops", exc); } finally { getLog().info("Finishing"); if (null != dirWatcher) dirWatcher.stop(); stopServer(); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.TestFSRMStateStore.java
@Test(timeout = 30000) public void testFSRMStateStoreClientRetry() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive();// w ww.j a v a 2 s .co m try { TestFSRMStateStoreTester fsTester = new TestFSRMStateStoreTester(cluster, false); final RMStateStore store = fsTester.getRMStateStore(); store.setRMDispatcher(new TestDispatcher()); final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); cluster.shutdownNameNodes(); Thread clientThread = new Thread() { @Override public void run() { try { store.storeApplicationStateInternal(ApplicationId.newInstance(100L, 1), ApplicationStateData.newInstance(111, 111, "user", null, RMAppState.ACCEPTED, "diagnostics", 333, null)); } catch (Exception e) { assertionFailedInThread.set(true); e.printStackTrace(); } } }; Thread.sleep(2000); clientThread.start(); cluster.restartNameNode(); clientThread.join(); Assert.assertFalse(assertionFailedInThread.get()); } finally { cluster.shutdown(); } }
From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java
private synchronized boolean shutdownExecutorIfNeeded() { AtomicBoolean interrupted = new AtomicBoolean(false); Optional.ofNullable(executorService).ifPresent(executor -> { if (!executor.isTerminated()) { LOG.info("Shutting down executor service"); executor.shutdown();// w w w. j a v a 2 s . co m try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { LOG.warn("Shutdown interrupted"); interrupted.set(true); } } }); return interrupted.get(); }
From source file:org.apache.hadoop.hbase.client.SpeculativeMutater.java
public Boolean mutate(final long waitToSendFailover, final long waitToSendFailoverWithException, final HBaseTableFunction<Void> function, final HTableInterface primaryTable, final Collection<HTableInterface> failoverTables, final AtomicLong lastPrimaryFail, final int waitTimeFromLastPrimaryFail) { ExecutorCompletionService<Boolean> exeS = new ExecutorCompletionService<Boolean>(exe); ArrayList<Callable<Boolean>> callables = new ArrayList<Callable<Boolean>>(); final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false); final long startTime = System.currentTimeMillis(); final long lastPrimaryFinalFail = lastPrimaryFail.get(); if (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { try { LOG.info(" --- CallingPrimary.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(primaryTable); LOG.info(" --- CallingPrimary.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); isPrimarySuccess.set(true); return true; } catch (java.io.InterruptedIOException e) { Thread.currentThread().interrupt(); } catch (Exception e) { lastPrimaryFail.set(System.currentTimeMillis()); Thread.currentThread().interrupt(); }/* www . j a v a2s . c om*/ return null; } }); } for (final HTableInterface failoverTable : failoverTables) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { long waitToRequest = (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) ? waitToSendFailover - (System.currentTimeMillis() - startTime) : waitToSendFailoverWithException - (System.currentTimeMillis() - startTime); LOG.info(" --- waitToRequest:" + waitToRequest + "," + (System.currentTimeMillis() - lastPrimaryFinalFail) + "," + (waitToSendFailover - (System.currentTimeMillis() - startTime)) + "," + (waitToSendFailoverWithException - (System.currentTimeMillis() - startTime))); if (waitToRequest > 0) { Thread.sleep(waitToRequest); } LOG.info(" --- isPrimarySuccess.get():" + isPrimarySuccess.get()); if (isPrimarySuccess.get() == false) { LOG.info(" --- CallingFailOver.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(failoverTable); LOG.info(" --- CallingFailOver.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); } return false; } }); } try { for (Callable<Boolean> call : callables) { exeS.submit(call); } Boolean result = exeS.take().get(); return result; } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e); } catch (ExecutionException e) { e.printStackTrace(); LOG.error(e); } return null; }
From source file:net.dempsy.container.TestContainer.java
@Test public void testEvictableWithBusyMp() throws Throwable { final TestProcessor mp = createAndGet("foo"); // now we're going to cause the processing to be held up. mp.latch = new CountDownLatch(1); mp.evict.set(true); // allow eviction // sending it a message will now cause it to hang up while processing final TestAdaptor adaptor = context.getBean(TestAdaptor.class); adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo")); final TestProcessor prototype = context.getBean(TestProcessor.class); // keep track of the cloneCount for later checking final int tmpCloneCount = prototype.cloneCount.intValue(); // invocation count should go to 2 assertTrue(poll(mp, o -> o.invocationCount == 2)); // now kick off the evict in a separate thread since we expect it to hang // until the mp becomes unstuck. final AtomicBoolean evictIsComplete = new AtomicBoolean(false); // this will allow us to see the evict pass complete final Thread thread = new Thread(new Runnable() { @Override/*from www .j a v a2 s . c o m*/ public void run() { container.evict(); evictIsComplete.set(true); } }); thread.start(); // now check to make sure eviction doesn't complete. Thread.sleep(100); // just a little to give any mistakes a change to work themselves through assertFalse(evictIsComplete.get()); // make sure eviction didn't finish mp.latch.countDown(); // this lets it go // wait until the eviction completes assertTrue(poll(evictIsComplete, o -> o.get())); Thread.sleep(100); assertEquals("activation count, 2nd message", 1, mp.activationCount); assertEquals("invocation count, 2nd message", 2, mp.invocationCount); adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo")); assertTrue(poll(o -> prototype.cloneCount.intValue() > tmpCloneCount)); Thread.sleep(1000); assertEquals("Clone count, 2nd message", tmpCloneCount + 1, prototype.cloneCount.intValue()); }