List of usage examples for java.util.concurrent CountDownLatch CountDownLatch
public CountDownLatch(int count)
From source file:com.vmware.photon.controller.deployer.dcp.DeployerXenonServiceHostTest.java
private void waitForServicesStartup(DeployerXenonServiceHost host) throws TimeoutException, InterruptedException, NoSuchFieldException, IllegalAccessException { serviceSelfLinks = ServiceHostUtils.getServiceSelfLinks( DeployerXenonServiceHost.FACTORY_SERVICE_FIELD_NAME_SELF_LINK, DeployerXenonServiceHost.FACTORY_SERVICES); serviceSelfLinks.add(DeployerXenonServiceHost.UPLOAD_VIB_SCHEDULER_SERVICE); final CountDownLatch latch = new CountDownLatch(serviceSelfLinks.size()); Operation.CompletionHandler handler = new Operation.CompletionHandler() { @Override/*w w w .j a v a 2 s . c o m*/ public void handle(Operation completedOp, Throwable failure) { latch.countDown(); } }; String[] links = new String[serviceSelfLinks.size()]; host.registerForServiceAvailability(handler, serviceSelfLinks.toArray(links)); if (!latch.await(10, TimeUnit.SECONDS)) { throw new TimeoutException(); } }
From source file:com.astexample.Recognize.java
/** Send streaming recognize requests to server. */ public void recognize() throws InterruptedException, IOException { final CountDownLatch finishLatch = new CountDownLatch(1); StreamObserver<RecognizeResponse> responseObserver = new StreamObserver<RecognizeResponse>() { @Override/*www. j av a2 s . c o m*/ public void onNext(RecognizeResponse response) { logger.info("Received response: " + TextFormat.printToString(response)); } @Override public void onError(Throwable error) { Status status = Status.fromThrowable(error); logger.log(Level.WARNING, "recognize failed: {0}", status); finishLatch.countDown(); } @Override public void onCompleted() { logger.info("recognize completed."); finishLatch.countDown(); } }; StreamObserver<RecognizeRequest> requestObserver = stub.recognize(responseObserver); try { // Build and send a RecognizeRequest containing the parameters for processing the audio. InitialRecognizeRequest initial = InitialRecognizeRequest.newBuilder() .setEncoding(AudioEncoding.LINEAR16).setSampleRate(samplingRate).setInterimResults(true) .build(); RecognizeRequest firstRequest = RecognizeRequest.newBuilder().setInitialRequest(initial).build(); requestObserver.onNext(firstRequest); // Open audio file. Read and send sequential buffers of audio as additional RecognizeRequests. FileInputStream in = new FileInputStream(new File(file)); // For LINEAR16 at 16000 Hz sample rate, 3200 bytes corresponds to 100 milliseconds of audio. byte[] buffer = new byte[3200]; int bytesRead; int totalBytes = 0; while ((bytesRead = in.read(buffer)) != -1) { totalBytes += bytesRead; AudioRequest audio = AudioRequest.newBuilder().setContent(ByteString.copyFrom(buffer, 0, bytesRead)) .build(); RecognizeRequest request = RecognizeRequest.newBuilder().setAudioRequest(audio).build(); requestObserver.onNext(request); // To simulate real-time audio, sleep after sending each audio buffer. // For 16000 Hz sample rate, sleep 100 milliseconds. Thread.sleep(samplingRate / 160); } logger.info("Sent " + totalBytes + " bytes from audio file: " + file); } catch (RuntimeException e) { // Cancel RPC. requestObserver.onError(e); throw e; } // Mark the end of requests. requestObserver.onCompleted(); // Receiving happens asynchronously. finishLatch.await(1, TimeUnit.MINUTES); }
From source file:com.twitter.hbc.httpclient.ClientBase.java
ClientBase(String name, HttpClient client, Hosts hosts, StreamingEndpoint endpoint, Authentication auth, HosebirdMessageProcessor processor, ReconnectionManager manager, RateTracker rateTracker, @Nullable BlockingQueue<Event> eventsQueue) { this.client = Preconditions.checkNotNull(client); this.name = Preconditions.checkNotNull(name); this.endpoint = Preconditions.checkNotNull(endpoint); this.hosts = Preconditions.checkNotNull(hosts); this.auth = Preconditions.checkNotNull(auth); this.processor = Preconditions.checkNotNull(processor); this.reconnectionManager = Preconditions.checkNotNull(manager); this.rateTracker = Preconditions.checkNotNull(rateTracker); this.eventsQueue = eventsQueue; this.exitEvent = new AtomicReference<Event>(); this.isRunning = new CountDownLatch(1); this.statsReporter = new StatsReporter(); this.connectionEstablished = new AtomicBoolean(false); this.reconnect = new AtomicBoolean(false); }
From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfiguration.java
private void zkInit() throws IOException { sync.raiseBarrier();/*ww w.j a v a 2 s . co m*/ final CountDownLatch connected = new CountDownLatch(1); log.debug("zkInit - connecting"); // if (zk != null) zk.close(); zk = new ZooKeeper(zkConnectionString, zkTimeout, new ZKWatcher(connected, sync)); log.info("zkInit - ensure root node exists"); try { if (connected.await(zkTimeout, TimeUnit.MILLISECONDS)) { for (int i = zkRoot.indexOf('/', 1); i > 0; i = zkRoot.indexOf('/', i + 1)) { final String path = zkRoot.substring(0, i); log.trace("zkInit - checking existence of {}", path); if (zk.exists(path, false) == null) { zk.create(path, new byte[] {}, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } } log.debug("zkInit - zkRoot {} exists", zkRoot); } else { throw new IOException("Timeout while establishing ZooKeeper connection"); } } catch (InterruptedException e) { throw new IOException("Could not connect", e); } catch (KeeperException e) { throw new IOException("Initial Connection failed - is zookeeper available?", e); } log.info("zkInit - connected"); sync.lowerBarrier(); }
From source file:com.datatorrent.demos.dimensions.generic.GenericAppTest.java
@Test public void testApplication() throws Exception { LocalMode lma = LocalMode.newInstance(); Configuration conf = new Configuration(false); conf.addResource("META-INF/properties.xml"); conf.set("dt.operator.DimensionsComputation.attr.APPLICATION_WINDOW_COUNT", "1"); conf.set("dt.operator.QueryResult.prop.configProperties(metadata.broker.list)", "localhost:9092"); conf.set("dt.operator.DimensionsStore.fileStore.basePath", "target/HDSApplicationTestStore"); conf.set("dt.operator.Query.brokerSet", "localhost:9092"); conf.set("dt.operator.Query.topic", kafkaQueryTopic); conf.set("dt.operator.QueryResult.topic", kafkaQueryResultTopic); conf.set("dt.operator.DimensionsComputation.attr.APPLICATION_WINDOW_COUNT", "2"); conf.set("dt.operator.InputGenerator.numPublishers", "2"); conf.set("dt.loggers.level", "server.*:INFO"); GenericDimensionsApplication app = new GenericDimensionsApplication(); lma.prepareDAG(app, conf);/* ww w . j a va 2 s . com*/ LocalMode.Controller lc = lma.getController(); lc.setHeartbeatMonitoringEnabled(false); lc.runAsync(); //Write messages to kafkaQueryTopic KafkaTestProducer kafkaQuery = new KafkaTestProducer(kafkaQueryTopic); // Query should be able to support keys of String or Number type String testQuery = "{\n" + " \"id\": \"query1\",\n" + " \"keys\": {\"publisherId\": \"1\"},\n" + " \"kafka\": {\"queryTopic\":\"GenericDimensionsQuery\",\"resultTopic\":\"GenericDimensionsQueryResult\"}" + "}"; List<String> testQueryMessages = new ArrayList<String>(); testQueryMessages.add(testQuery); kafkaQuery.setMessages(testQueryMessages); kafkaQuery.run(); // Setup a message listener to receive the query results CountDownLatch latch = new CountDownLatch(100); KafkaTestConsumer queryResultsListener = new KafkaTestConsumer(kafkaQueryResultTopic); queryResultsListener.setLatch(latch); new Thread(queryResultsListener).start(); // Wait to receive messages latch.await(15, TimeUnit.SECONDS); lc.shutdown(); // Evaluate results String lastMessage; LOG.info("Sent " + kafkaQuery.getSendCount() + " messages to " + kafkaQueryTopic); LOG.info("Received " + queryResultsListener.holdingBuffer.size() + " messages from Kafka on " + kafkaQueryResultTopic + " topic"); Assert.assertTrue("Minimum messages received from Kafka " + queryResultsListener.holdingBuffer, queryResultsListener.holdingBuffer.size() >= 1); while (!queryResultsListener.holdingBuffer.isEmpty()) { lastMessage = queryResultsListener.getMessage(queryResultsListener.holdingBuffer.poll()); Assert.assertNotNull("Did not receive message from Kafka", lastMessage); LOG.info("received:\n{}", lastMessage); } }
From source file:gov.va.isaac.workflow.engine.RemoteSynchronizer.java
/** * Request a remote synchronization. This call blocks until the operation is complete, * or the thread is interrupted./*from ww w. j a va 2s. c o m*/ * * @throws InterruptedException */ public SynchronizeResult blockingSynchronize() throws InterruptedException { log.info("Queuing a blocking sync request"); final MutableObject<SynchronizeResult> result = new MutableObject<SynchronizeResult>(); final CountDownLatch cdl = new CountDownLatch(1); Consumer<SynchronizeResult> callback = new Consumer<SynchronizeResult>() { @Override public void accept(SynchronizeResult t) { result.setValue(t); cdl.countDown(); } }; synchronize(callback); cdl.await(); return result.getValue(); }
From source file:org.zodiark.publisher.PublisherTest.java
@Test(enabled = false) public void startStreamingSession() throws IOException, InterruptedException { final ZodiarkClient wowzaClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build(); final CountDownLatch connected = new CountDownLatch(1); final AtomicReference<String> uuid = new AtomicReference<>(); // Fake Wowza Client wowzaClient.handler(new OnEnvelopHandler() { @Override/*from w w w .j a v a2 s. c om*/ public boolean onEnvelop(Envelope e) throws IOException { Message m = e.getMessage(); switch (m.getPath()) { case WOWZA_CONNECT: // Connected. Listen uuid.set(e.getUuid()); break; case SERVER_VALIDATE_OK: Envelope publisherOk = Envelope .newClientToServerRequest(new Message(new Path(""), e.getMessage().getData())); wowzaClient.send(publisherOk); break; default: // ERROR } connected.countDown(); return false; } }).open(); Envelope wowzaConnect = Envelope.newClientToServerRequest( new Message(new Path(WOWZA_CONNECT), mapper.writeValueAsString(new UserPassword("wowza", "bar")))); wowzaClient.send(wowzaConnect); connected.await(); // Publisher final AtomicReference<PublisherResults> answer = new AtomicReference<>(); final ZodiarkClient publisherClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build(); final CountDownLatch latch = new CountDownLatch(1); publisherClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { answer.set(mapper.readValue(e.getMessage().getData(), PublisherResults.class)); latch.countDown(); return true; } }).open(); Envelope createSessionMessage = Envelope .newClientToServerRequest(new Message(new Path(DB_POST_PUBLISHER_SESSION_CREATE), mapper.writeValueAsString(new UserPassword("publisherex", "bar")))); createSessionMessage.setFrom(new From(ActorValue.PUBLISHER)); publisherClient.send(createSessionMessage); latch.await(); assertEquals("OK", answer.get().getResults()); answer.set(null); final CountDownLatch tlatch = new CountDownLatch(1); publisherClient.handler(new OnEnvelopHandler() { @Override public boolean onEnvelop(Envelope e) throws IOException { answer.set(mapper.readValue(e.getMessage().getData(), PublisherResults.class)); tlatch.countDown(); return true; } }); Envelope startStreamingSession = Envelope .newClientToServerRequest(new Message(new Path(VALIDATE_PUBLISHER_STREAMING_SESSION), mapper.writeValueAsString(new WowzaUUID(uuid.get())))); createSessionMessage.setFrom(new From(ActorValue.PUBLISHER)); publisherClient.send(startStreamingSession); tlatch.await(); assertEquals("OK", answer.get().getResults()); }
From source file:com.google.api.ads.adwords.jaxws.extensions.processors.onfile.ReportProcessorOnFile.java
private <R extends Report> void processFiles(String userId, String mccAccountId, Class<R> reportBeanClass, Collection<File> localFiles, ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd) {/*from ww w . j a va 2s .c o m*/ final CountDownLatch latch = new CountDownLatch(localFiles.size()); ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors); // Processing Report Local Files LOGGER.info(" Procesing reports..."); Stopwatch stopwatch = Stopwatch.createStarted(); for (File file : localFiles) { LOGGER.trace("."); try { ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>(); MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass); LOGGER.debug("Parsing file: " + file.getAbsolutePath()); RunnableProcessorOnFile<R> runnableProcesor = new RunnableProcessorOnFile<R>(file, csvToBean, mappingStrategy, dateRangeType, dateStart, dateEnd, mccAccountId, persister, reportRowsSetSize); runnableProcesor.setLatch(latch); executorService.execute(runnableProcesor); } catch (Exception e) { LOGGER.error("Ignoring file (Error when processing): " + file.getAbsolutePath()); e.printStackTrace(); } } try { latch.await(); } catch (InterruptedException e) { LOGGER.error(e.getMessage()); e.printStackTrace(); } executorService.shutdown(); stopwatch.stop(); LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000) + " seconds ***\n"); }
From source file:com.frostwire.AzureusStarter.java
private static synchronized void azureusInit() { try {//from w w w . jav a2 s .c o m if (isAzureusCoreStarted()) { LOG.debug("azureusInit(): core already started. skipping."); return; } } catch (Exception ignore) { } Application.setApplication( CommonUtils.getUserSettingsDir().getAbsolutePath() + File.separator + "appwork" + File.separator); File jdHome = new File( CommonUtils.getUserSettingsDir().getAbsolutePath() + File.separator + "jd_home" + File.separator); if (!jdHome.exists()) { jdHome.mkdir(); } JDUtilities.setJDHomeDirectory(jdHome); JDUtilities.getConfiguration().setProperty("DOWNLOAD_DIRECTORY", SharingSettings.TORRENT_DATA_DIR_SETTING.getValue().getAbsolutePath()); File azureusUserPath = new File( CommonUtils.getUserSettingsDir() + File.separator + "azureus" + File.separator); if (!azureusUserPath.exists()) { azureusUserPath.mkdirs(); } System.setProperty("azureus.loadplugins", "0"); // disable third party azureus plugins System.setProperty("azureus.config.path", azureusUserPath.getAbsolutePath()); System.setProperty("azureus.install.path", azureusUserPath.getAbsolutePath()); if (!AzureusCoreFactory.isCoreAvailable()) { //This does work org.gudy.azureus2.core3.util.SystemProperties.APPLICATION_NAME = "azureus"; org.gudy.azureus2.core3.util.SystemProperties.setUserPath(azureusUserPath.getAbsolutePath()); if (!SharingSettings.TORRENTS_DIR_SETTING.getValue().exists()) { SharingSettings.TORRENTS_DIR_SETTING.getValue().mkdirs(); } COConfigurationManager.setParameter("Auto Adjust Transfer Defaults", false); COConfigurationManager.setParameter("General_sDefaultTorrent_Directory", SharingSettings.TORRENTS_DIR_SETTING.getValue().getAbsolutePath()); try { AZUREUS_CORE = AzureusCoreFactory.create(); } catch (AzureusCoreException coreException) { //so we already had one eh... if (AZUREUS_CORE == null) { AZUREUS_CORE = AzureusCoreFactory.getSingleton(); } } //to guarantee a synchronous start final CountDownLatch signal = new CountDownLatch(1); AZUREUS_CORE.addLifecycleListener(new AzureusCoreLifecycleListener() { @Override public boolean syncInvokeRequired() { return false; } @Override public void stopping(AzureusCore core) { core.getGlobalManager().pauseDownloads(); } @Override public void stopped(AzureusCore core) { } @Override public boolean stopRequested(AzureusCore core) throws AzureusCoreException { return false; } @Override public void started(AzureusCore core) { signal.countDown(); } @Override public boolean restartRequested(AzureusCore core) throws AzureusCoreException { return false; } @Override public boolean requiresPluginInitCompleteBeforeStartedEvent() { return false; } @Override public void componentCreated(AzureusCore core, AzureusCoreComponent component) { } }); if (!AZUREUS_CORE.isStarted() && !AZUREUS_CORE.isRestarting()) { AZUREUS_CORE.start(); } AZUREUS_CORE.getGlobalManager().resumeDownloads(); LOG.debug("azureusInit(): core.start() waiting..."); try { signal.await(); LOG.debug("azureusInit(): core started..."); } catch (InterruptedException e) { e.printStackTrace(); } } }