List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:io.nats.client.ITClusterTest.java
@Test public void testProperReconnectDelay() throws Exception { try (NatsServer s1 = runServerOnPort(1222)) { Options opts = new Options.Builder(defaultOptions()).dontRandomize().build(); opts.servers = Nats.processUrlArray(testServers); final CountDownLatch latch = new CountDownLatch(1); opts.disconnectedCb = new DisconnectedCallback() { public void onDisconnect(ConnectionEvent event) { event.getConnection().setDisconnectedCallback(null); latch.countDown();/*w ww . j av a 2s .co m*/ } }; final AtomicBoolean ccbCalled = new AtomicBoolean(false); opts.closedCb = new ClosedCallback() { public void onClose(ConnectionEvent event) { ccbCalled.set(true); } }; try (Connection c = opts.connect()) { assertFalse(c.isClosed()); s1.shutdown(); // wait for disconnect assertTrue("Did not receive a disconnect callback message", await(latch, 2, TimeUnit.SECONDS)); // Wait, want to make sure we don't spin on reconnect to non-existent servers. sleep(1, TimeUnit.SECONDS); assertFalse("Closed CB was triggered, should not have been.", ccbCalled.get()); assertEquals("Wrong state: " + c.getState(), c.getState(), RECONNECTING); } } }
From source file:ca.rmen.android.networkmonitor.app.dialog.ChoiceDialogFragment.java
/** * @return a Dialog with a list of items, one of them possibly pre-selected. *///www . j ava2 s . c o m @Override @NonNull public Dialog onCreateDialog(Bundle savedInstanceState) { Log.v(TAG, "onCreateDialog: savedInstanceState = " + savedInstanceState); Context context = getActivity(); AlertDialog.Builder builder = new AlertDialog.Builder(context); Bundle arguments = getArguments(); builder.setTitle(arguments.getString(DialogFragmentFactory.EXTRA_TITLE)); final int actionId = arguments.getInt(DialogFragmentFactory.EXTRA_ACTION_ID); int selectedItem = arguments.getInt(DialogFragmentFactory.EXTRA_SELECTED_ITEM); final CharSequence[] choices = arguments.getCharSequenceArray(DialogFragmentFactory.EXTRA_CHOICES); OnClickListener listener = null; final AtomicBoolean hasClicked = new AtomicBoolean(false); if (getActivity() instanceof DialogItemListener) { listener = new OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { FragmentActivity activity = getActivity(); if (activity == null) { Log.w(TAG, "User clicked on dialog after it was detached from activity. Monkey?"); } else if (hasClicked.get()) { Log.w(TAG, "User already clicked once on this dialog! Monkey?"); } else { hasClicked.set(true); ((DialogItemListener) activity).onItemSelected(actionId, choices, which); } } }; } // If one item is to be pre-selected, use the single choice items layout. if (selectedItem >= 0) builder.setSingleChoiceItems(choices, selectedItem, listener); // If no particular item is to be pre-selected, use the default list item layout. else builder.setItems(choices, listener); if (getActivity() instanceof OnCancelListener) builder.setOnCancelListener((OnCancelListener) getActivity()); final Dialog dialog = builder.create(); if (getActivity() instanceof OnDismissListener) dialog.setOnDismissListener((OnDismissListener) getActivity()); return dialog; }
From source file:ch.cyberduck.core.cryptomator.impl.CryptoVaultTest.java
@Test public void testFind() throws Exception { final NullSession session = new NullSession(new Host(new TestProtocol())) { @Override/* w w w . ja va 2 s.c om*/ @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if (type == Read.class) { return (T) new Read() { @Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String masterKey = "{\n" + " \"scryptSalt\": \"NrC7QGG/ouc=\",\n" + " \"scryptCostParam\": 16384,\n" + " \"scryptBlockSize\": 8,\n" + " \"primaryMasterKey\": \"Q7pGo1l0jmZssoQh9rXFPKJE9NIXvPbL+HcnVSR9CHdkeR8AwgFtcw==\",\n" + " \"hmacMasterKey\": \"xzBqT4/7uEcQbhHFLC0YmMy4ykVKbuvJEA46p1Xm25mJNuTc20nCbw==\",\n" + " \"versionMac\": \"hlNr3dz/CmuVajhaiGyCem9lcVIUjDfSMLhjppcXOrM=\",\n" + " \"version\": 5\n" + "}"; return IOUtils.toInputStream(masterKey, Charset.defaultCharset()); } @Override public boolean offset(final Path file) throws BackgroundException { return false; } }; } return super._getFeature(type); } }; final Path home = new Path("/", EnumSet.of((Path.Type.directory))); final CryptoVault vault = new CryptoVault(home, new DisabledPasswordStore()); assertEquals(home, vault.load(session, new DisabledPasswordCallback() { @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) throws LoginCanceledException { return new VaultCredentials("vault"); } }).getHome()); assertEquals(Vault.State.open, vault.getState()); final AtomicBoolean found = new AtomicBoolean(); assertEquals(vault, new DefaultVaultRegistry(new DisabledPasswordCallback()) { protected Vault find(final Session<?> session, final Path directory, final LoadingVaultLookupListener listener) throws VaultUnlockCancelException { found.set(true); return vault; } }.find(session, home)); assertTrue(found.get()); vault.close(); }
From source file:org.apache.nifi.processors.csv.ExtractCSVHeader.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final FlowFile original = session.get(); if (original == null) { return;/*from ww w.j a va 2s. c om*/ } final AtomicBoolean lineFound = new AtomicBoolean(false); final Map<String, String> attrs = new HashMap<>(); final AtomicInteger headerLength = new AtomicInteger(0); session.read(original, new InputStreamCallback() { @Override public void process(InputStream inputStream) throws IOException { // TODO expose the charset property? LineIterator iterator = IOUtils.lineIterator(inputStream, UTF_8); if (iterator.hasNext()) { lineFound.set(true); final String header = iterator.nextLine(); final String format = context.getProperty(PROP_FORMAT).getValue(); final String delimiter = context.getProperty(PROP_DELIMITER) .evaluateAttributeExpressions(original).getValue(); final String prefix = context.getProperty(PROP_SCHEMA_ATTR_PREFIX) .evaluateAttributeExpressions(original).getValue(); attrs.put(prefix + ATTR_HEADER_ORIGINAL, header); // TODO validate delimiter in the callback first final CSVFormat csvFormat = buildFormat(format, delimiter, true, // we assume first line is the header null); // no custom header final CSVParser parser = csvFormat.parse(new StringReader(header)); final Map<String, Integer> headers = parser.getHeaderMap(); final int columnCount = headers.size(); attrs.put(prefix + ATTR_HEADER_COLUMN_COUNT, String.valueOf(columnCount)); for (Map.Entry<String, Integer> h : headers.entrySet()) { // CSV columns are 1-based in Excel attrs.put(prefix + (h.getValue() + 1), h.getKey()); } // strip the header and send to the 'content' relationship if (StringUtils.isNotBlank(header)) { int hLength = header.length(); // move past the new line if there are more lines if (original.getSize() > hLength + 1) { hLength++; } headerLength.set(hLength); } } } }); if (lineFound.get()) { FlowFile ff = session.putAllAttributes(original, attrs); int offset = headerLength.get(); if (offset > 0) { FlowFile contentOnly = session.clone(ff, offset, original.getSize() - offset); session.transfer(contentOnly, REL_CONTENT); } session.transfer(ff, REL_ORIGINAL); } else { session.transfer(original, REL_FAILURE); } }
From source file:com.vmware.admiral.request.ContainerControlLoopServiceTest.java
@SuppressWarnings("unchecked") @Test//w w w. ja v a 2 s . c o m public void redeploymentOfSingleContainers() throws Throwable { containerDescription2 = createContainerDescription(false); // provision 3 single containers, 2 of them in ERROR state ContainerState state = null; for (int i = 0; i < SINGLE_CONTAINERS_TO_BE_PROVISIONED; i++) { state = provisionContainer(containerDescription2.documentSelfLink); if (i < SINGLE_CONTAINERS_TO_BE_PROVISIONED - 1) { state.powerState = PowerState.ERROR; doPut(state); } } Map<String, List<String>> containersPerContextId = new HashMap<>(); retrieveContainerStates(containerDescription2.documentSelfLink).thenAccept(containerStates -> { containerStates.stream().forEach(cs -> { containersPerContextId.put(cs.customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY), Arrays.asList(cs.documentSelfLink)); }); }); doOperation(new ContainerControlLoopState(), UriUtils.buildUri(host, ContainerControlLoopService.CONTROL_LOOP_INFO_LINK), false, Service.Action.PATCH); Map<String, List<String>> redeployedContainersPerContextId = new HashMap<>(); AtomicBoolean containerFromDesc2Redeployed = new AtomicBoolean(false); waitFor(() -> { // get all containers from containerDescription2 retrieveContainerStates(containerDescription2.documentSelfLink).thenAccept(containerStates -> { long healthyContainers = containerStates.stream() .filter(cs -> PowerState.RUNNING.equals(cs.powerState)).count(); host.log("Healthy containers from %s : %d", containerDescription2.documentSelfLink, healthyContainers); containerFromDesc2Redeployed.set(SINGLE_CONTAINERS_TO_BE_PROVISIONED == healthyContainers && SINGLE_CONTAINERS_TO_BE_PROVISIONED == containerStates.size()); containerStates.stream().forEach(cs -> { redeployedContainersPerContextId.put( cs.customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY), Arrays.asList(cs.documentSelfLink)); }); }); if (containerFromDesc2Redeployed.get()) { containersPerContextId.entrySet().stream().forEach(m -> { String contextId = m.getKey(); List<String> redeployedContainers = redeployedContainersPerContextId.get(contextId); host.log("Redeployed container: %s -> %s", StringUtils.join(m.getValue()), StringUtils.join(redeployedContainers)); }); } return containerFromDesc2Redeployed.get(); }); }
From source file:com.quartzdesk.executor.dao.AbstractDao.java
/** * Checks if the specified table exists in the specified schema and returns true if * it exists, false otherwise. This method tries to look up the table using both * lower-case and upper-case schema and table names because some databases seems to * require the names to be in upper case (DB2, Oracle), whereas other databases require * the names to be in lower-case.//from www . j av a 2 s .co m * * @param session a Hibernate session. * @param schemaName an optional schema name where to look for the table name. * @param tableName a table name. * @return true if the table exists, false otherwise. */ public boolean tableExists(Session session, final String schemaName, final String tableName) { final AtomicBoolean tableExists = new AtomicBoolean(false); session.doWork(new Work() { @Override public void execute(Connection connection) throws SQLException { log.debug("Checking if table '{}' exists.", tableName); DatabaseMetaData metaData = connection.getMetaData(); // 1. attempt - try schema and table name in lower-case (does not work in DB2 and Oracle) ResultSet res = metaData.getTables(null, schemaName == null ? null : schemaName.toLowerCase(Locale.US), tableName.toLowerCase(Locale.US), new String[] { "TABLE" }); tableExists.set(res.next()); DbUtils.close(res); if (tableExists.get()) { log.debug("Table '{}' exists.", tableName); } else { // 2. attempt - try schema and table name in upper-case (required for DB2 and Oracle) res = metaData.getTables(null, schemaName == null ? null : schemaName.toUpperCase(Locale.US), tableName.toUpperCase(Locale.US), new String[] { "TABLE" }); tableExists.set(res.next()); DbUtils.close(res); if (tableExists.get()) { log.debug("Table '{}' exists.", tableName); } else { log.debug("Table '{}' does not exist.", tableName); } } } }); return tableExists.get(); }
From source file:org.apache.nifi.processors.msgpack.MessagePackPack.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();/*from ww w . j a v a 2 s .c om*/ if (flowFile == null) { return; } final ObjectMapper reader = new ObjectMapper(); final ObjectMapper writer = new ObjectMapper(new MessagePackFactory()); writer.setAnnotationIntrospector(new JsonArrayFormat()); final AtomicBoolean failed = new AtomicBoolean(false); flowFile = session.write(flowFile, new StreamCallback() { @Override public void process(InputStream is, OutputStream os) throws IOException { try (final OutputStream msgpack = new BufferedOutputStream(os)) { final JsonNode json = reader.readTree(is); final byte[] bytes = writer.writeValueAsBytes(json); msgpack.write(bytes); msgpack.flush(); } catch (JsonProcessingException e) { getLogger().error(e.getMessage(), e); failed.set(true); } } }); if (failed.get()) { session.transfer(flowFile, REL_FAILURE); return; } flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), MIME_TYPE); flowFile = session.putAttribute(flowFile, MIME_EXT_KEY, MIME_EXT); session.transfer(flowFile, REL_SUCCESS); }
From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTParseableInputStream.java
/** * {@inheritDoc}/*from w w w .java 2s.co m*/ * <p> * Performs parsing of raw activity data to {@link ActivityInfo} data package, which can be transformed to * {@link com.jkoolcloud.tnt4j.core.Trackable} object and sent to JKool Cloud using TNT4J and JESL APIs. */ @Override protected void processActivityItem(T item, AtomicBoolean failureFlag) throws Exception { notifyProgressUpdate(incrementCurrentActivitiesCount(), getTotalActivities()); ActivityInfo ai = makeActivityInfo(item); if (ai == null) { logger().log(OpLevel.WARNING, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME, "TNTInputStream.no.parser"), item); incrementSkippedActivitiesCount(); if (haltIfNoParser) { failureFlag.set(true); notifyFailed(StreamsResources.getStringFormatted(StreamsResources.RESOURCE_BUNDLE_NAME, "TNTInputStream.no.parser", item), null, null); halt(false); } else { notifyStreamEvent(OpLevel.WARNING, StreamsResources.getStringFormatted(StreamsResources.RESOURCE_BUNDLE_NAME, "TNTInputStream.could.not.parse.activity", item), item); } } else { if (!ai.isFilteredOut()) { getOutput().logItem(ai); } else { logger().log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME, "TNTInputStream.activity.filtered.out"), ai); } } }
From source file:com.vmware.admiral.request.ContainerControlLoopServiceTest.java
@SuppressWarnings("unchecked") @Test// ww w . java2 s. c om public void testRedeploymentOfAContainerInCluster() throws Throwable { containerDescription1 = createContainerDescription(false); containerDescription1._cluster = 2; doPut(containerDescription1); // provision 2 containers in cluster ContainerState state = provisionContainer(containerDescription1.documentSelfLink); // change the power state of one of them state.powerState = PowerState.ERROR; doPut(state); Map<String, List<String>> containersPerContextId = new HashMap<>(); retrieveContainerStates(containerDescription1.documentSelfLink).thenAccept(containerStates -> { List<String> containersFromDesc1 = containerStates.stream().map(cs -> cs.documentSelfLink) .collect(Collectors.toList()); assertEquals(2, containersFromDesc1.size()); // clustered containers have same context_id containersPerContextId.put( containerStates.get(0).customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY), containersFromDesc1); }); doOperation(new ContainerControlLoopState(), UriUtils.buildUri(host, ContainerControlLoopService.CONTROL_LOOP_INFO_LINK), false, Service.Action.PATCH); Map<String, List<String>> redeployedContainersPerContextId = new HashMap<>(); AtomicBoolean containerFromDesc1Redeployed = new AtomicBoolean(false); waitFor(() -> { // get all containers from containerDescription1 retrieveContainerStates(containerDescription1.documentSelfLink).thenAccept(containerStates -> { long healthyContainers = containerStates.stream() .filter(cs -> PowerState.RUNNING.equals(cs.powerState)).count(); host.log("Healthy containers from %s : %d", containerDescription1.documentSelfLink, healthyContainers); containerFromDesc1Redeployed.set(containerDescription1._cluster == healthyContainers && containerDescription1._cluster == containerStates.size()); List<String> containersFromDesc1 = containerStates.stream().map(cs -> cs.documentSelfLink) .collect(Collectors.toList()); redeployedContainersPerContextId.put( containerStates.get(0).customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY), containersFromDesc1); }); if (containerFromDesc1Redeployed.get()) { containersPerContextId.entrySet().stream().forEach(m -> { String contextId = m.getKey(); List<String> redeployedContainers = redeployedContainersPerContextId.get(contextId); host.log("Redeployed container: %s -> %s", StringUtils.join(m.getValue()), StringUtils.join(redeployedContainers)); }); } return containerFromDesc1Redeployed.get(); }); }
From source file:org.apache.hadoop.corona.TestMiniCoronaRunJob.java
private void runMultipleSleepJobs(final JobConf conf, final int maps, final int reduces, int numJobs) throws Exception { final CountDownLatch startSignal = new CountDownLatch(1); final CountDownLatch endSignal = new CountDownLatch(numJobs); final AtomicBoolean failed = new AtomicBoolean(false); for (int i = 0; i < numJobs; ++i) { Runnable action = new Runnable() { @Override/*from w w w . jav a2 s .c om*/ public void run() { try { startSignal.await(); runSleepJob(conf, maps, reduces); LOG.info("Sleep Job finished"); endSignal.countDown(); } catch (Exception e) { LOG.error("Exception in running SleepJob", e); failed.set(true); endSignal.countDown(); } } }; new Thread(action).start(); } // Starting all jobs at the same time startSignal.countDown(); // Waiting for all jobs to finish endSignal.await(); if (failed.get()) { fail("Some of the Sleepjobs failed"); } }