List of usage examples for java.util.concurrent.atomic AtomicReference set
public final void set(V newValue)
From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java
/** * For file reading, first will check if the local cache has the file there. If yes, will directly to read the local * cache. If no, then will check the NFS volume for the file, and will copy it to the local cache if found, then read * from the local cache again.//from ww w . java 2s . c o m * * @param resource - the resource will be read * @return - the input stream for further reading * @throws IOException */ @Override public InputStream openInputStream(final ConcreteResource resource) throws IOException { final String pathKey = getKeyForResource(resource); // This lock is used to control the the local resource can be opened successfully finally when local resource missing // but NFS not, which means will do a NFS->local copy. final Object copyLock = new Object(); // A flag to mark if the local resource can be open now or need to wait for the copy thread completes its work final AtomicBoolean canStreamOpen = new AtomicBoolean(false); // A second flag to indicate whether copyTask failed final AtomicBoolean copyExceOccurs = new AtomicBoolean(false); // This copy task is responsible for the NFS->local copy, and will be run in another thread, // which can use PartyLine concurrent read/write function on the local cache to boost // the i/o operation final Runnable copyNFSTask = () -> { InputStream nfsIn = null; OutputStream localOut = null; try { lockByISPN(nfsOwnerCache, resource, LockLevel.write); File nfsFile = getNFSDetachedFile(resource); if (!nfsFile.exists()) { logger.trace("NFS file does not exist too."); copyExceOccurs.set(true); return; } nfsIn = new FileInputStream(nfsFile); localOut = plCacheProvider.openOutputStream(resource); canStreamOpen.set(true); // set it ASAP so the readers can start reading before copy completes synchronized (copyLock) { copyLock.notifyAll(); } IOUtils.copy(nfsIn, localOut); logger.trace("NFS copy to local cache done."); } catch (NotSupportedException | SystemException | IOException | InterruptedException e) { copyExceOccurs.set(true); if (e instanceof IOException) { final String errorMsg = String.format( "[galley] got i/o error when doing the NFS->Local copy for resource %s", resource.toString()); logger.warn(errorMsg, e); } else if (e instanceof InterruptedException) { final String errorMsg = String.format( "[galley] got thread interrupted error for partyline file locking when doing the NFS->Local copy for resource %s", resource.toString()); throw new IllegalStateException(errorMsg, e); } else { final String errorMsg = String.format( "[galley] Cache TransactionManager got error, locking key is %s, resource is %s", pathKey, resource.toString()); logger.error(errorMsg, e); throw new IllegalStateException(errorMsg, e); } } finally { unlockByISPN(nfsOwnerCache, false, resource); IOUtils.closeQuietly(nfsIn); IOUtils.closeQuietly(localOut); cacheLocalFilePath(resource); synchronized (copyLock) { copyLock.notifyAll(); } } }; // This lock is used to control the concurrent operations on the resource, like concurrent delete and read/write. // Use "this" as lock is heavy, should think about use the transfer for the resource as the lock for each thread final AtomicReference<IOException> taskException = new AtomicReference<>(); final InputStream stream = tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS, r -> { boolean localExisted = plCacheProvider.exists(r); if (localExisted) { logger.trace("local cache already exists, will directly get input stream from it."); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } else { logger.trace("local cache does not exist, will start to copy from NFS cache"); executor.execute(copyNFSTask); } synchronized (copyLock) { while (!canStreamOpen.get()) { if (copyExceOccurs.get()) { return null; } try { copyLock.wait(); } catch (InterruptedException e) { logger.warn("[galley] NFS copy thread is interrupted by other threads", e); } } logger.trace("the NFS->local copy completed, will get the input stream from local cache"); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } }); propagateException(taskException.get()); return stream; }
From source file:org.apache.nifi.processors.standard.Wait.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final ComponentLog logger = getLogger(); // Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER); final Integer bufferCount = context.getProperty(WAIT_BUFFER_COUNT).asInteger(); final Map<Relationship, List<FlowFile>> processedFlowFiles = new HashMap<>(); final Function<Relationship, List<FlowFile>> getFlowFilesFor = r -> processedFlowFiles.computeIfAbsent(r, k -> new ArrayList<>()); final AtomicReference<String> targetSignalId = new AtomicReference<>(); final AtomicInteger bufferedCount = new AtomicInteger(0); final List<FlowFile> failedFilteringFlowFiles = new ArrayList<>(); final Supplier<FlowFileFilter.FlowFileFilterResult> acceptResultSupplier = () -> bufferedCount .incrementAndGet() == bufferCount ? ACCEPT_AND_TERMINATE : ACCEPT_AND_CONTINUE; final List<FlowFile> flowFiles = session.get(f -> { final String fSignalId = signalIdProperty.evaluateAttributeExpressions(f).getValue(); // if the computed value is null, or empty, we transfer the FlowFile to failure relationship if (StringUtils.isBlank(fSignalId)) { // We can't penalize f before getting it from session, so keep it in a temporal list. logger.error("FlowFile {} has no attribute for given Release Signal Identifier", new Object[] { f }); failedFilteringFlowFiles.add(f); return ACCEPT_AND_CONTINUE; }//from w w w. j ava 2 s . co m final String targetSignalIdStr = targetSignalId.get(); if (targetSignalIdStr == null) { // This is the first one. targetSignalId.set(fSignalId); return acceptResultSupplier.get(); } if (targetSignalIdStr.equals(fSignalId)) { return acceptResultSupplier.get(); } return REJECT_AND_CONTINUE; }); final String attributeCopyMode = context.getProperty(ATTRIBUTE_COPY_MODE).getValue(); final boolean replaceOriginalAttributes = ATTRIBUTE_COPY_REPLACE.getValue().equals(attributeCopyMode); final AtomicReference<Signal> signalRef = new AtomicReference<>(); final Consumer<FlowFile> transferToFailure = flowFile -> { flowFile = session.penalize(flowFile); getFlowFilesFor.apply(REL_FAILURE).add(flowFile); }; final Consumer<Entry<Relationship, List<FlowFile>>> transferFlowFiles = routedFlowFiles -> { Relationship relationship = routedFlowFiles.getKey(); if (REL_WAIT.equals(relationship)) { final String waitMode = context.getProperty(WAIT_MODE).getValue(); if (WAIT_MODE_KEEP_IN_UPSTREAM.getValue().equals(waitMode)) { // Transfer to self. relationship = Relationship.SELF; } } final List<FlowFile> flowFilesWithSignalAttributes = routedFlowFiles.getValue().stream() .map(f -> copySignalAttributes(session, f, signalRef.get(), replaceOriginalAttributes)) .collect(Collectors.toList()); session.transfer(flowFilesWithSignalAttributes, relationship); }; failedFilteringFlowFiles.forEach(f -> { flowFiles.remove(f); transferToFailure.accept(f); }); if (flowFiles.isEmpty()) { // If there was nothing but failed FlowFiles while filtering, transfer those and end immediately. processedFlowFiles.entrySet().forEach(transferFlowFiles); return; } // the cache client used to interact with the distributed cache final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE) .asControllerService(AtomicDistributedMapCacheClient.class); final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache); final String signalId = targetSignalId.get(); final Signal signal; // get notifying signal try { signal = protocol.getSignal(signalId); signalRef.set(signal); } catch (final IOException e) { throw new ProcessException(String.format("Failed to get signal for %s due to %s", signalId, e), e); } String targetCounterName = null; long targetCount = 1; int releasableFlowFileCount = 1; final List<FlowFile> candidates = new ArrayList<>(); for (FlowFile flowFile : flowFiles) { // Set wait start timestamp if it's not set yet String waitStartTimestamp = flowFile.getAttribute(WAIT_START_TIMESTAMP); if (waitStartTimestamp == null) { waitStartTimestamp = String.valueOf(System.currentTimeMillis()); flowFile = session.putAttribute(flowFile, WAIT_START_TIMESTAMP, waitStartTimestamp); } long lWaitStartTimestamp; try { lWaitStartTimestamp = Long.parseLong(waitStartTimestamp); } catch (NumberFormatException nfe) { logger.error("{} has an invalid value '{}' on FlowFile {}", new Object[] { WAIT_START_TIMESTAMP, waitStartTimestamp, flowFile }); transferToFailure.accept(flowFile); continue; } // check for expiration long expirationDuration = context.getProperty(EXPIRATION_DURATION).asTimePeriod(TimeUnit.MILLISECONDS); long now = System.currentTimeMillis(); if (now > (lWaitStartTimestamp + expirationDuration)) { logger.info("FlowFile {} expired after {}ms", new Object[] { flowFile, (now - lWaitStartTimestamp) }); getFlowFilesFor.apply(REL_EXPIRED).add(flowFile); continue; } // If there's no signal yet, then we don't have to evaluate target counts. Return immediately. if (signal == null) { if (logger.isDebugEnabled()) { logger.debug("No release signal found for {} on FlowFile {} yet", new Object[] { signalId, flowFile }); } getFlowFilesFor.apply(REL_WAIT).add(flowFile); continue; } // Fix target counter name and count from current FlowFile, if those are not set yet. if (candidates.isEmpty()) { targetCounterName = context.getProperty(SIGNAL_COUNTER_NAME).evaluateAttributeExpressions(flowFile) .getValue(); try { targetCount = Long.valueOf(context.getProperty(TARGET_SIGNAL_COUNT) .evaluateAttributeExpressions(flowFile).getValue()); } catch (final NumberFormatException e) { transferToFailure.accept(flowFile); logger.error("Failed to parse targetCount when processing {} due to {}", new Object[] { flowFile, e }, e); continue; } try { releasableFlowFileCount = Integer.valueOf(context.getProperty(RELEASABLE_FLOWFILE_COUNT) .evaluateAttributeExpressions(flowFile).getValue()); } catch (final NumberFormatException e) { transferToFailure.accept(flowFile); logger.error("Failed to parse releasableFlowFileCount when processing {} due to {}", new Object[] { flowFile, e }, e); continue; } } // FlowFile is now validated and added to candidates. candidates.add(flowFile); } boolean waitCompleted = false; boolean waitProgressed = false; if (signal != null && !candidates.isEmpty()) { if (releasableFlowFileCount > 1) { signal.releaseCandidatese(targetCounterName, targetCount, releasableFlowFileCount, candidates, released -> getFlowFilesFor.apply(REL_SUCCESS).addAll(released), waiting -> getFlowFilesFor.apply(REL_WAIT).addAll(waiting)); waitProgressed = !getFlowFilesFor.apply(REL_SUCCESS).isEmpty(); } else { // releasableFlowFileCount = 0 or 1 boolean reachedTargetCount = StringUtils.isBlank(targetCounterName) ? signal.isTotalCountReached(targetCount) : signal.isCountReached(targetCounterName, targetCount); if (reachedTargetCount) { if (releasableFlowFileCount == 0) { getFlowFilesFor.apply(REL_SUCCESS).addAll(candidates); } else { // releasableFlowFileCount = 1 getFlowFilesFor.apply(REL_SUCCESS).add(candidates.remove(0)); getFlowFilesFor.apply(REL_WAIT).addAll(candidates); // If releasableFlowFileCount == 0, leave signal as it is, // so that any number of FlowFile can be released as long as target count condition matches. waitCompleted = true; } } else { getFlowFilesFor.apply(REL_WAIT).addAll(candidates); } } } // Transfer FlowFiles. processedFlowFiles.entrySet().forEach(transferFlowFiles); // Update signal if needed. try { if (waitCompleted) { protocol.complete(signalId); } else if (waitProgressed) { protocol.replace(signal); } } catch (final IOException e) { session.rollback(); throw new ProcessException( String.format("Unable to communicate with cache while updating %s due to %s", signalId, e), e); } }
From source file:com.igormaznitsa.jhexed.swing.editor.ui.MainForm.java
@Override public HexLayer makeHexLayer(final String name, final String comment) { if (name == null) { throw new NullPointerException("Name must not be null"); }//from w w w. jav a2 s .c o m if (comment == null) { throw new NullPointerException("Comments must not be null"); } final AtomicReference<HexLayer> result = new AtomicReference<HexLayer>(); final Runnable run = new Runnable() { @Override public void run() { result.set(layers.addLayer(layers.makeNewLayerField(name, comment))); } }; if (SwingUtilities.isEventDispatchThread()) { run.run(); } else { try { SwingUtilities.invokeAndWait(run); } catch (Exception ex) { throw new RuntimeException(ex); } } return result.get(); }
From source file:gda.util.persistence.LocalParametersTest.java
public void testThreadSafety() throws Exception { final File testScratchDir = TestUtils.createClassScratchDirectory(LocalParametersTest.class); final String configDir = testScratchDir.getAbsolutePath(); final String configName = "threadsafety"; // Delete config from disk, if it exists final File configFile = new File(configDir, configName + ".xml"); configFile.delete();/*w ww . ja va2 s . co m*/ final AtomicReference<Exception> error = new AtomicReference<Exception>(); final int numThreads = 4; final long threadRunTimeInMs = 5000; final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch finishLatch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { Thread t = new Thread() { @Override public void run() { try { final FileConfiguration config = LocalParameters.getThreadSafeXmlConfiguration(configDir, configName, true); // Wait for signal to start startLatch.await(); final String propertyName = Thread.currentThread().getName(); final long startTime = System.currentTimeMillis(); while (true) { // Finish if we've exceeded the run time long elapsedTime = System.currentTimeMillis() - startTime; if (elapsedTime >= threadRunTimeInMs) { break; } // Finish if another thread has generated an exception if (error.get() != null) { break; } config.setProperty(propertyName, System.currentTimeMillis()); config.save(); } } catch (Exception e) { e.printStackTrace(System.err); error.set(e); } finishLatch.countDown(); } }; t.start(); } // Start all threads final long startTime = System.currentTimeMillis(); startLatch.countDown(); // Wait for all threads to finish finishLatch.await(); final long endTime = System.currentTimeMillis(); final long elapsedTime = (endTime - startTime); System.out.printf("Finished after %dms%n", elapsedTime); // No error should have been thrown assertNull("An exception was thrown by one of the threads", error.get()); }
From source file:com.igormaznitsa.zxpoly.MainForm.java
private File chooseFileForOpen(final String title, final File initial, final AtomicReference<FileFilter> selectedFilter, final FileFilter... filter) { final JFileChooser chooser = new JFileChooser(initial); for (final FileFilter f : filter) { chooser.addChoosableFileFilter(f); }// w w w . j a va 2 s. c om chooser.setAcceptAllFileFilterUsed(false); chooser.setMultiSelectionEnabled(false); chooser.setDialogTitle(title); chooser.setFileFilter(filter[0]); chooser.setFileSelectionMode(JFileChooser.FILES_ONLY); final File result; if (chooser.showOpenDialog(this) == JFileChooser.APPROVE_OPTION) { result = chooser.getSelectedFile(); if (selectedFilter != null) { selectedFilter.set(chooser.getFileFilter()); } } else { result = null; } return result; }
From source file:org.hawkular.metrics.core.service.MetricsServiceITest.java
@Test public void createAndFindMetrics() throws Exception { Metric<Double> em1 = new Metric<>(new MetricId<>("t1", GAUGE, "em1")); metricsService.createMetric(em1).toBlocking().lastOrDefault(null); Metric<Double> actual = metricsService.<Double>findMetric(em1.getMetricId()).toBlocking() .lastOrDefault(null);//from w ww .j a va 2s . c o m assertNotNull(actual); Metric<Double> em2 = new Metric<>(em1.getMetricId(), 7); assertEquals(actual, em2, "The metric does not match the expected value"); Metric<Double> m1 = new Metric<>(new MetricId<>("t1", GAUGE, "m1"), ImmutableMap.of("a1", "1", "a2", "2"), 24); metricsService.createMetric(m1).toBlocking().lastOrDefault(null); actual = metricsService.<Double>findMetric(m1.getMetricId()).toBlocking().last(); assertEquals(actual, m1, "The metric does not match the expected value"); Metric<AvailabilityType> m2 = new Metric<>(new MetricId<>("t1", AVAILABILITY, "m2"), ImmutableMap.of("a3", "3", "a4", "3"), DEFAULT_TTL); metricsService.createMetric(m2).toBlocking().lastOrDefault(null); // Find definitions with given tags Map<String, String> tagMap = new HashMap<>(); tagMap.putAll(ImmutableMap.of("a1", "1", "a2", "2")); tagMap.putAll(ImmutableMap.of("a3", "3", "a4", "3")); // Test that distinct filtering does not remove same name from different types Metric<Double> gm2 = new Metric<>(new MetricId<>("t1", GAUGE, "m2"), ImmutableMap.of("a3", "3", "a4", "3"), null); metricsService.createMetric(gm2).toBlocking().lastOrDefault(null); Metric<AvailabilityType> actualAvail = metricsService.<AvailabilityType>findMetric(m2.getMetricId()) .toBlocking().last(); assertEquals(actualAvail, m2, "The metric does not match the expected value"); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> exceptionRef = new AtomicReference<>(); metricsService.createMetric(m1).subscribe(nullArg -> { }, t -> { exceptionRef.set(t); latch.countDown(); }, latch::countDown); latch.await(10, TimeUnit.SECONDS); assertTrue(exceptionRef.get() != null && exceptionRef.get() instanceof MetricAlreadyExistsException, "Expected a " + MetricAlreadyExistsException.class.getSimpleName() + " to be thrown"); Metric<Double> m3 = new Metric<>(new MetricId<>("t1", GAUGE, "m3"), emptyMap(), 24); metricsService.createMetric(m3).toBlocking().lastOrDefault(null); Metric<Double> m4 = new Metric<>(new MetricId<>("t1", GAUGE, "m4"), ImmutableMap.of("a1", "A", "a2", ""), null); metricsService.createMetric(m4).toBlocking().lastOrDefault(null); assertMetricIndexMatches("t1", GAUGE, asList(new Metric<>(em1.getMetricId(), 7), m1, new Metric<>(gm2.getMetricId(), gm2.getTags(), 7), m3, new Metric<>(m4.getMetricId(), m4.getTags(), 7))); assertMetricIndexMatches("t1", AVAILABILITY, singletonList(m2)); assertDataRetentionsIndexMatches("t1", GAUGE, ImmutableSet.of(new Retention(m3.getMetricId(), 24), new Retention(m1.getMetricId(), 24))); assertMetricsTagsIndexMatches("t1", "a1", asList(new MetricsTagsIndexEntry("1", m1.getMetricId()), new MetricsTagsIndexEntry("A", m4.getMetricId()))); }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
/** * Requests all updates from each peer in the provided list of peers. * <p>// ww w . ja va2 s . c om * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers * fail. * * @param peers the list of peers from which to request updates * @return a future to be completed once updates have been received from at least one peer */ private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) { if (peers.isEmpty()) { return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = new CompletableFuture<>(); final int totalPeers = peers.size(); AtomicBoolean successful = new AtomicBoolean(); AtomicInteger totalCount = new AtomicInteger(); AtomicReference<Throwable> lastError = new AtomicReference<>(); // Iterate through all of the peers and send a bootstrap request. On the first peer that returns // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any // successful bootstrap response, the future will be completed with the last exception. for (MemberId peer : peers) { requestBootstrapFromPeer(peer).whenComplete((result, error) -> { if (error == null) { if (successful.compareAndSet(false, true)) { future.complete(null); } else if (totalCount.incrementAndGet() == totalPeers) { Throwable e = lastError.get(); if (e != null) { future.completeExceptionally(e); } } } else { if (!successful.get() && totalCount.incrementAndGet() == totalPeers) { future.completeExceptionally(error); } else { lastError.set(error); } } }); } return future; }
From source file:com.wk.lodge.composite.web.tomcat.IntegrationCompositeTests.java
@Test public void testStop() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> failure = new AtomicReference<>(); URI uri = new URI("ws://localhost:" + port + "/composite"); WebSocketStompClient stompClient = new WebSocketStompClient(uri, this.headers, sockJsClient); stompClient.setMessageConverter(new MappingJackson2MessageConverter()); stompClient.connect(new StompMessageHandler() { private StompSession stompSession; @Override// ww w .j a v a 2s . c o m public void afterConnected(StompSession stompSession, StompHeaderAccessor headers) { this.stompSession = stompSession; String topicUuid = simulateJoinEvent(); this.stompSession.subscribe("/user/queue/device", null); this.stompSession.subscribe(String.format("/topic/%s", topicUuid), null); try { HashMap<String, Object> stop = new HashMap<String, Object>(); stop.put("type", "stop"); this.stompSession.send(String.format("/app/%s", topicUuid), stop); } catch (Throwable t) { failure.set(t); latch.countDown(); } } @Override public void handleMessage(Message<byte[]> message) { try { String json = parseMessageJson(message); new JsonPathExpectationsHelper("type").exists(json); new JsonPathExpectationsHelper("type").assertValue(json, "stop"); new JsonPathExpectationsHelper("serverTime").exists(json); } catch (Throwable t) { failure.set(t); } finally { this.stompSession.disconnect(); latch.countDown(); } } @Override public void handleError(Message<byte[]> message) { StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message); String error = "[Producer] " + accessor.getShortLogMessage(message.getPayload()); logger.error(error); failure.set(new Exception(error)); } @Override public void handleReceipt(String receiptId) { } @Override public void afterDisconnected() { } }); if (!latch.await(10, TimeUnit.SECONDS)) { fail("Stop response not received"); } else if (failure.get() != null) { throw new AssertionError("", failure.get()); } }
From source file:org.opennms.newts.gsod.ImportRunner.java
public void execute(String... args) throws Exception { CmdLineParser parser = new CmdLineParser(this); try {// w ww. j a va2 s . c o m parser.parseArgument(args); } catch (CmdLineException e) { // handling of wrong arguments System.err.println(e.getMessage()); parser.printUsage(System.err); return; } // Setup the slf4j metrics reporter MetricRegistry metrics = new MetricRegistry(); final long start = System.currentTimeMillis(); metrics.register("elapsed-seconds", new Gauge<Double>() { @Override public Double getValue() { return (System.currentTimeMillis() - start) / 1000.0; } }); final ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).outputTo(System.err) .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build(); reporter.start(10, SECONDS); if (m_restUrl == null) { // we are using a direct importer so use a NewtsReporter for storing metrics NewtsReporter newtsReporter = NewtsReporter.forRegistry(metrics).name("importer") .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build(repository()); newtsReporter.start(1, SECONDS); } LOG.debug("Scanning {} for GSOD data files...", m_source); // walk the files in the directory given Observable<Sample> samples = fileTreeWalker(m_source.toPath()).subscribeOn(Schedulers.io()) // set up a meter for each file processed .map(meter(metrics.meter("files"), Path.class)) // report file .map(reportFile()) // read all the files and convert them into lines .mergeMap(lines()) // excluding the header lines .filter(exclude("YEARMODA")) // turn each line into a list of samples .mergeMap(samples()) // adjust time on samples according to arguments .map(adjustTime()) // meter the samples .map(meter(metrics.meter("samples"), Sample.class)); Observable<List<Sample>> batches = samples // create batches each second or of size m_samplesPerBatch whichever comes first .buffer(m_samplesPerBatch); Observable<Boolean> doImport = m_restUrl != null ? restPoster(batches, metrics) : directPoster(batches, metrics); System.err.println("doImport = " + doImport); // GO!!! final AtomicReference<Subscription> subscription = new AtomicReference<>(); final AtomicBoolean failed = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); Subscription s = doImport.subscribe(new Observer<Boolean>() { @Override public void onCompleted() { System.err.println("Finished Importing Everything!"); reporter.report(); latch.countDown(); System.exit(0); } @Override public void onError(Throwable e) { failed.set(true); System.err.println("Error importing!"); e.printStackTrace(); try { //latch.await(); Subscription s = subscription.get(); if (s != null) s.unsubscribe(); } catch (Exception ex) { System.err.println("Failed to close httpClient!"); ex.printStackTrace(); } finally { //dumpThreads(); } } @Override public void onNext(Boolean t) { System.err.println("Received a boolen: " + t); } }); subscription.set(s); if (failed.get()) { s.unsubscribe(); } //latch.countDown(); System.err.println("Return from Subscribe!"); latch.await(); //dumpThreads(); }
From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java
@Test public void testBuildImageIdWithAuth() throws Exception { final String dockerDirectory = Resources.getResource("dockerDirectory").getPath(); final AtomicReference<String> imageIdFromMessage = new AtomicReference<>(); final DefaultDockerClient sut2 = DefaultDockerClient.builder().uri(dockerEndpoint).authConfig(authConfig) .build();// w ww . ja va 2 s . c o m final String returnedImageId = sut2.build(Paths.get(dockerDirectory), "test", new ProgressHandler() { @Override public void progress(ProgressMessage message) throws DockerException { final String imageId = message.buildImageId(); if (imageId != null) { imageIdFromMessage.set(imageId); } } }); assertThat(returnedImageId, is(imageIdFromMessage.get())); }