List of usage examples for org.apache.commons.lang3.time StopWatch isStarted
public boolean isStarted()
The method is used to find out if the StopWatch is started.
From source file:net.kmycode.javaspeechserver.cloud.StreamingRecognizeClient.java
/** Send streaming recognize requests to server. */ public void recognize() throws InterruptedException, IOException { final AudioRecorder recorder = AudioRecorder.getDefault(); final StopWatch stopwatch = new StopWatch(); final CountDownLatch finishLatch = new CountDownLatch(1); StreamObserver<StreamingRecognizeResponse> responseObserver = new StreamObserver<StreamingRecognizeResponse>() { private int sentenceLength = 1; /**/* w w w.java 2 s . c o m*/ * Prints the transcription results. Interim results are overwritten by subsequent * results, until a final one is returned, at which point we start a new line. * * Flags the program to exit when it hears "exit". */ @Override public void onNext(StreamingRecognizeResponse response) { byteStringQueue.clear(); stopwatch.reset(); List<StreamingRecognitionResult> results = response.getResultsList(); if (results.size() < 1) { return; } StreamingRecognitionResult result = results.get(0); String transcript = result.getAlternatives(0).getTranscript(); // Print interim results with a line feed, so subsequent transcriptions will overwrite // it. Final result will print a newline. String format = "%-" + this.sentenceLength + 's'; format += " (" + result.getAlternatives(0).getConfidence() + ") "; if (result.getIsFinal()) { format += '\n'; this.sentenceLength = 1; finishLatch.countDown(); } else { format += '\r'; this.sentenceLength = transcript.length(); } System.out.print(String.format(format, transcript)); } @Override public void onError(Throwable error) { logger.log(Level.ERROR, "recognize failed: {0}", error); finishLatch.countDown(); } @Override public void onCompleted() { logger.info("recognize completed."); finishLatch.countDown(); } }; this.requestObserver = this.speechClient.streamingRecognize(responseObserver); try { // Build and send a StreamingRecognizeRequest containing the parameters for // processing the audio. RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16).setSampleRate(recorder.getSamplingRate()) .setLanguageCode("ja-JP").build(); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config) .setInterimResults(true).setSingleUtterance(false).build(); StreamingRecognizeRequest initial = StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingConfig).build(); requestObserver.onNext(initial); while (this.byteStringQueue.size() > 0) { ByteString data = this.byteStringQueue.poll(); this.request(data); } // Read and send sequential buffers of audio as additional RecognizeRequests. while (finishLatch.getCount() > 0 && recorder.read()) { if (recorder.isSound()) { ByteString data = this.recorder.getBufferAsByteString(); this.byteStringQueue.add(data); if (!stopwatch.isStarted()) { stopwatch.start(); } else if (stopwatch.getTime() > 2000) { this.byteStringQueue.clear(); break; } this.request(data); } else { this.notSoundCount++; if (this.notSoundCount >= 3) { // stop recognizition break; } } } } catch (RuntimeException e) { // Cancel RPC. requestObserver.onError(e); throw e; } // Mark the end of requests. requestObserver.onCompleted(); // Receiving happens asynchronously. finishLatch.await(1, TimeUnit.MINUTES); }