Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:net.dempsy.container.TestContainer.java

@Test
public void testEvictableWithBusyMp() throws Throwable {
    final TestProcessor mp = createAndGet("foo");

    // now we're going to cause the processing to be held up.
    mp.latch = new CountDownLatch(1);
    mp.evict.set(true); // allow eviction

    // sending it a message will now cause it to hang up while processing
    final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));

    final TestProcessor prototype = context.getBean(TestProcessor.class);

    // keep track of the cloneCount for later checking
    final int tmpCloneCount = prototype.cloneCount.intValue();

    // invocation count should go to 2
    assertTrue(poll(mp, o -> o.invocationCount == 2));

    // now kick off the evict in a separate thread since we expect it to hang
    // until the mp becomes unstuck.
    final AtomicBoolean evictIsComplete = new AtomicBoolean(false); // this will allow us to see the evict pass complete
    final Thread thread = new Thread(new Runnable() {
        @Override/*w  w w .j  a  va 2 s  .c o  m*/
        public void run() {
            container.evict();
            evictIsComplete.set(true);
        }
    });
    thread.start();

    // now check to make sure eviction doesn't complete.
    Thread.sleep(100); // just a little to give any mistakes a change to work themselves through
    assertFalse(evictIsComplete.get()); // make sure eviction didn't finish

    mp.latch.countDown(); // this lets it go

    // wait until the eviction completes
    assertTrue(poll(evictIsComplete, o -> o.get()));
    Thread.sleep(100);
    assertEquals("activation count, 2nd message", 1, mp.activationCount);
    assertEquals("invocation count, 2nd message", 2, mp.invocationCount);

    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
    assertTrue(poll(o -> prototype.cloneCount.intValue() > tmpCloneCount));
    Thread.sleep(1000);
    assertEquals("Clone count, 2nd message", tmpCloneCount + 1, prototype.cloneCount.intValue());
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests.
 *//*from w ww. ja  v a 2s  . co m*/
@Test
public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception {
    // We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process
    // is driven by the same code for Transactions as well.
    final String segmentName = "Segment";
    final long segmentId = 12345;

    HashSet<String> storageSegments = new HashSet<>();
    storageSegments.add(segmentName);

    @Cleanup
    TestContext context = new TestContext();
    setupStorageGetHandler(context, storageSegments,
            sn -> new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate()));
    CompletableFuture<Long> initialAddFuture = new CompletableFuture<>();
    AtomicBoolean operationLogInvoked = new AtomicBoolean(false);
    context.operationLog.addHandler = op -> {
        if (!(op instanceof StreamSegmentMapOperation)) {
            return FutureHelpers.failedFuture(new IllegalArgumentException("unexpected operation"));
        }
        if (operationLogInvoked.getAndSet(true)) {
            return FutureHelpers.failedFuture(new IllegalStateException("multiple calls to OperationLog.add"));
        }

        // Need to set SegmentId on operation.
        ((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId);
        return initialAddFuture;
    };

    CompletableFuture<Long> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT);
    CompletableFuture<Long> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT);
    Thread.sleep(20);
    Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.",
            firstCall.isDone());
    Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.",
            secondCall.isDone());
    initialAddFuture.complete(1L);
    long firstCallResult = firstCall.get(100, TimeUnit.MILLISECONDS);
    long secondCallResult = secondCall.get(100, TimeUnit.MILLISECONDS);

    Assert.assertEquals(
            "Two concurrent calls to getOrAssignStreamSegmentId for the same StreamSegment returned different ids.",
            firstCallResult, secondCallResult);
}

From source file:com.hurence.logisland.service.elasticsearch.Elasticsearch_2_4_0_ClientService.java

@Override
public void copyIndex(String reindexScrollTimeout, String srcIndex, String dstIndex) throws IOException {

    SearchResponse scrollResp = esClient.prepareSearch(srcIndex).setSearchType(SearchType.QUERY_AND_FETCH)
            .setScroll(reindexScrollTimeout).setQuery(QueryBuilders.matchAllQuery()) // Match all query
            .setSize(100) // 100 hits per shard will be returned for each scroll
            .execute().actionGet();//from   w ww.  ja va 2s  .co  m

    AtomicBoolean failed = new AtomicBoolean(false);

    // A user of a BulkProcessor just keeps adding requests to it, and the BulkProcessor itself decides when
    // to send a request to the ES nodes, based on its configuration settings. Calls can be triggerd by number
    // of queued requests, total size of queued requests, and time since previous request. The defaults for
    // these settings are all sensible, so are not overridden here. The BulkProcessor has an internal threadpool
    // which allows it to send multiple batches concurrently; the default is "1" meaning that a single completed
    // batch can be sending in the background while a new batch is being built. When the non-active batch is
    // "full", the add call blocks until the background batch completes.

    while (true) {
        if (scrollResp.getHits().getHits().length == 0) {
            // No more results
            break;
        }

        for (SearchHit hit : scrollResp.getHits()) {
            IndexRequest request = new IndexRequest(dstIndex, hit.type(), hit.id());
            Map<String, Object> source = hit.getSource();
            request.source(source);
            bulkProcessor.add(request);
        }

        String scrollId = scrollResp.getScrollId();
        scrollResp = esClient.prepareSearchScroll(scrollId).setScroll(reindexScrollTimeout).execute()
                .actionGet();
    }

    getLogger().info("Reindex completed");
}

From source file:io.openvidu.server.recording.service.RecordingManager.java

private void checkRecordingPaths(String openviduRecordingPath, String openviduRecordingCustomLayout)
        throws OpenViduException {
    log.info("Initializing recording paths");

    Path recordingPath = null;//from   w  w  w. ja  v a  2s .co  m
    try {
        recordingPath = Files.createDirectories(Paths.get(openviduRecordingPath));
    } catch (IOException e) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server cannot find path \"" + openviduRecordingPath
                + "\" and doesn't have permissions to create it";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    // Check OpenVidu Server write permissions in recording path
    if (!Files.isWritable(recordingPath)) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server needs write permissions. Try running command \"sudo chmod 777 "
                + openviduRecordingPath + "\"";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    } else {
        log.info("OpenVidu Server has write permissions on recording path: {}", openviduRecordingPath);
    }

    final String testFolderPath = openviduRecordingPath + "/TEST_RECORDING_PATH_" + System.currentTimeMillis();
    final String testFilePath = testFolderPath + "/TEST_RECORDING_PATH.webm";

    // Check Kurento Media Server write permissions in recording path
    KurentoClientSessionInfo kcSessionInfo = new OpenViduKurentoClientSessionInfo("TEST_RECORDING_PATH",
            "TEST_RECORDING_PATH");
    MediaPipeline pipeline = this.kcProvider.getKurentoClient(kcSessionInfo).createMediaPipeline();
    RecorderEndpoint recorder = new RecorderEndpoint.Builder(pipeline, "file://" + testFilePath).build();

    final AtomicBoolean kurentoRecorderError = new AtomicBoolean(false);

    recorder.addErrorListener(new EventListener<ErrorEvent>() {
        @Override
        public void onEvent(ErrorEvent event) {
            if (event.getErrorCode() == 6) {
                // KMS write permissions error
                kurentoRecorderError.compareAndSet(false, true);
            }
        }
    });

    recorder.record();

    try {
        // Give the error event some time to trigger if necessary
        Thread.sleep(500);
    } catch (InterruptedException e1) {
        e1.printStackTrace();
    }

    if (kurentoRecorderError.get()) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: Kurento Media Server needs write permissions. Try running command \"sudo chmod 777 "
                + openviduRecordingPath + "\"";
        log.error(errorMessage);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    recorder.stop();
    recorder.release();
    pipeline.release();

    log.info("Kurento Media Server has write permissions on recording path: {}", openviduRecordingPath);

    try {
        new CustomFileManager().deleteFolder(testFolderPath);
        log.info("OpenVidu Server has write permissions over files created by Kurento Media Server");
    } catch (IOException e) {
        String errorMessage = "The recording path \"" + openviduRecordingPath
                + "\" is not valid. Reason: OpenVidu Server does not have write permissions over files created by Kurento Media Server. "
                + "Try running Kurento Media Server as user \"" + System.getProperty("user.name")
                + "\" or run OpenVidu Server as superuser";
        log.error(errorMessage);
        log.error(
                "Be aware that a folder \"{}\" was created and should be manually deleted (\"sudo rm -rf {}\")",
                testFolderPath, testFolderPath);
        throw new OpenViduException(Code.RECORDING_PATH_NOT_VALID, errorMessage);
    }

    if (openviduConfig.openviduRecordingCustomLayoutChanged(openviduRecordingCustomLayout)) {
        // Property openvidu.recording.custom-layout changed
        File dir = new File(openviduRecordingCustomLayout);
        if (dir.exists()) {
            if (!dir.isDirectory()) {
                String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                        + "\" is not valid. Reason: path already exists but it is not a directory";
                log.error(errorMessage);
                throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
            } else {
                if (dir.listFiles() == null) {
                    String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                            + "\" is not valid. Reason: OpenVidu Server needs read permissions. Try running command \"sudo chmod 755 "
                            + openviduRecordingCustomLayout + "\"";
                    log.error(errorMessage);
                    throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
                } else {
                    log.info("OpenVidu Server has read permissions on custom layout path: {}",
                            openviduRecordingCustomLayout);
                    log.info("Custom layouts path successfully initialized at {}",
                            openviduRecordingCustomLayout);
                }
            }
        } else {
            try {
                Files.createDirectories(dir.toPath());
                log.warn(
                        "OpenVidu custom layouts path (system property 'openvidu.recording.custom-layout') has been created, being folder {}. "
                                + "It is an empty folder, so no custom layout is currently present",
                        dir.getAbsolutePath());
            } catch (IOException e) {
                String errorMessage = "The custom layouts path \"" + openviduRecordingCustomLayout
                        + "\" is not valid. Reason: OpenVidu Server cannot find path \""
                        + openviduRecordingCustomLayout + "\" and doesn't have permissions to create it";
                log.error(errorMessage);
                throw new OpenViduException(Code.RECORDING_FILE_EMPTY_ERROR, errorMessage);
            }
        }
    }

    log.info("Recording path successfully initialized at {}", openviduRecordingPath);
}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java

@Test
public void tableAndSchemasFinished() throws InterruptedException, StageException {
    String schema1 = "schema1";
    String table1Name = "table1";
    String table2Name = "table2";
    String schema2 = "schema2";
    String table3Name = "table3";

    String offsetCol = null;//from   w ww.  j  a v  a  2 s.c o m
    final String partitionSize = null;
    int maxActivePartitions = 0;
    int threadNumber = 0;
    int numThreads = 1;

    TableContext tableContext1 = createTableContext(schema1, table1Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext2 = createTableContext(schema1, table2Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext3 = createTableContext(schema2, table3Name, offsetCol, partitionSize,
            maxActivePartitions, false);

    Map<String, TableContext> tableContextMap = new HashMap<>();

    tableContextMap.put(tableContext1.getQualifiedName(), tableContext1);
    tableContextMap.put(tableContext2.getQualifiedName(), tableContext2);
    tableContextMap.put(tableContext3.getQualifiedName(), tableContext3);
    Queue<String> sortedTableOrder = new LinkedList<>();

    sortedTableOrder.add(tableContext1.getQualifiedName());
    sortedTableOrder.add(tableContext2.getQualifiedName());
    sortedTableOrder.add(tableContext3.getQualifiedName());

    Map threadNumToMaxTableSlots = new HashMap<>();

    BatchTableStrategy batchTableStrategy = BatchTableStrategy.PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE;
    MultithreadedTableProvider provider = new MultithreadedTableProvider(tableContextMap, sortedTableOrder,
            threadNumToMaxTableSlots, numThreads, batchTableStrategy);

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));

    TableRuntimeContext table1 = provider.nextTable(threadNumber);
    Assert.equals(table1Name, table1.getSourceTableContext().getTableName());

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));
    // there should be two tables remaining in schema1 (table1 and table2)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(2));
    // and one remaining in schema2 (table3)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    final AtomicBoolean tableFinished = new AtomicBoolean(false);
    final AtomicBoolean schemaFinished = new AtomicBoolean(false);
    final List<String> schemaFinishedTables = new LinkedList<>();

    // finish table1
    provider.reportDataOrNoMoreData(table1, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());

    // schema should not
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(1));

    // there should be a total of two remaining entries in the map
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(2));
    // one of which is in schema1
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(1));
    // and one of which is in schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table1, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table2 = provider.nextTable(threadNumber);
    Assert.equals(table2Name, table2.getSourceTableContext().getTableName());

    // finish table2
    provider.reportDataOrNoMoreData(table2, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());
    // as should the schema this time
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(2));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    // there should only be one entry left now
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());
    // which is for schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table2, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // suppose we did NOT actually reach the end of table3, in which case the conditions should be the same as above
    provider.reportDataOrNoMoreData(table3, 10, 10, false, tableFinished, schemaFinished, schemaFinishedTables);

    // now neither the table
    assertFalse(tableFinished.get());
    // nor schema should be finished
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    // and entries in the map should be the same as above
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());

    provider.releaseOwnedTable(table3, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    // cycle through table1 and table2 again
    table1 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table1, 1);
    table2 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table2, 1);

    // and get back to table3
    table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // now suppose we have finally finished table3
    provider.reportDataOrNoMoreData(table3, 3, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // both table
    assertTrue(tableFinished.get());
    // and schema should be finished
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(1));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(3));
    // there should now be no more entries in this map
    assertTrue(provider.getRemainingSchemasToTableContexts().isEmpty());

    provider.releaseOwnedTable(table3, 1);

    assertTrue(provider.shouldGenerateNoMoreDataEvent());

}

From source file:gov.nasa.arc.geocam.geocam.GeoCamService.java

@Override
public void onCreate() {
    Log.d(GeoCamMobile.DEBUG_ID, "GeoCamService::onCreate called");
    super.onCreate();

    // Prevent this service from being prematurely killed to reclaim memory
    buildNotification("GeoCam uploader starting...", "Starting...");
    Reflect.Service.startForeground(this, NOTIFICATION_ID, mNotification);

    // Location Manager
    mLocationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE);
    registerListener();//from ww  w.  j  a  v  a 2 s  . c om

    mLocationManager.addGpsStatusListener(mGpsStatusListener);

    // Notification Manager
    mNotificationManager = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);

    // Upload queue and thread
    // Initialize with mCv open so we immediately try to upload when the thread is spawned
    // This is important on service restart with non-zero length queue
    // The thread will close mCv if the queue is empty
    mCv = new ConditionVariable(true);
    mIsUploading = new AtomicBoolean(false);
    mLastStatus = new AtomicInteger(0);

    if (mUploadQueue == null) {
        mUploadQueue = new GeoCamDbAdapter(this);
        mUploadQueue.open();
    }

    if (mGpsLog == null) {
        mGpsLog = new GpsDbAdapter(this);
        mGpsLog.open();
    }

    mPrefListener = new SharedPreferences.OnSharedPreferenceChangeListener() {
        public void onSharedPreferenceChanged(SharedPreferences prefs, String key) {
            // wake up upload thread if upload was just enabled
            boolean isUploadEnabled = prefs.getBoolean(GeoCamMobile.SETTINGS_SERVER_UPLOAD_ENABLED, true);
            Log.d(GeoCamMobile.DEBUG_ID, "GeoCamService.mPrefListener.onSharedPreferenceChanged" + " key=" + key
                    + " isUploadEnabled=" + Boolean.toString(isUploadEnabled));
            if (key.equals(GeoCamMobile.SETTINGS_SERVER_UPLOAD_ENABLED) && isUploadEnabled) {
                Log.d(GeoCamMobile.DEBUG_ID,
                        "GeoCamService.mPrefListener.onSharedPreferenceChanged" + " waking up upload thread");
                mCv.open();
            }
        }
    };
    SharedPreferences settings = PreferenceManager.getDefaultSharedPreferences(this);
    settings.registerOnSharedPreferenceChangeListener(mPrefListener);

    mUploadThread = new Thread(null, uploadTask, "UploadThread");
    mUploadThread.start();
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildWithPull() throws Exception {
    assumeTrue("We need Docker API >= v1.19 to run this test." + "This Docker API is "
            + sut.version().apiVersion(), compareVersion(sut.version().apiVersion(), "1.19") >= 0);

    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final String pullMsg = "Pulling from";

    // Build once to make sure we have cached images.
    sut.build(Paths.get(dockerDirectory));

    // Build again with PULL set, and verify we pulled the base image
    final AtomicBoolean pulled = new AtomicBoolean(false);
    sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override//w w  w  .j  av a 2s . co m
        public void progress(ProgressMessage message) throws DockerException {
            if (!isNullOrEmpty(message.status()) && message.status().contains(pullMsg)) {
                pulled.set(true);
            }
        }
    }, PULL_NEWER_IMAGE);
    assertTrue(pulled.get());
}

From source file:net.dempsy.container.TestContainer.java

@Test
public void testEvictCollisionWithBlocking() throws Throwable {
    final TestProcessor mp = createAndGet("foo");

    // now we're going to cause the passivate to be held up.
    mp.blockPassivate = new CountDownLatch(1);
    mp.evict.set(true); // allow eviction

    // now kick off the evict in a separate thread since we expect it to hang
    // until the mp becomes unstuck.
    final AtomicBoolean evictIsComplete = new AtomicBoolean(false); // this will allow us to see the evict pass complete
    final Thread thread = new Thread(new Runnable() {
        @Override/*from w  w  w.  j a v  a  2  s  . c  o  m*/
        public void run() {
            container.evict();
            evictIsComplete.set(true);
        }
    });
    thread.start();

    Thread.sleep(500); // let it get going.
    assertFalse(evictIsComplete.get()); // check to see we're hung.

    final ClusterMetricGetters sc = (ClusterMetricGetters) statsCollector;
    assertEquals(0, sc.getMessageCollisionCount());

    // sending it a message will now cause it to have the collision tick up
    final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));

    // give it some time.
    Thread.sleep(100);

    // make sure there's no collision
    assertEquals(0, sc.getMessageCollisionCount());

    // make sure no message got handled
    assertEquals(1, mp.invocationCount); // 1 is the initial invocation that caused the instantiation.

    // now let the evict finish
    mp.blockPassivate.countDown();

    // wait until the eviction completes
    assertTrue(poll(evictIsComplete, o -> o.get()));

    // Once the poll finishes a new Mp is instantiated and handling messages.
    assertTrue(poll(cache, c -> c.get("foo") != null));
    final TestProcessor mp2 = cache.get("foo");
    assertNotNull("MP not associated with expected key", mp);

    // invocationCount should be 1 from the initial invocation that caused the clone, and no more
    assertEquals(1, mp.invocationCount);
    assertEquals(1, mp2.invocationCount);
    assertTrue(mp != mp2);

    // send a message that should go through
    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
    assertTrue(poll(o -> mp2.invocationCount > 1));
    Thread.sleep(100);
    assertEquals(1, mp.invocationCount);
    assertEquals(2, mp2.invocationCount);
}

From source file:com.lenovo.tensorhusky.common.utils.Shell.java

/**
 * Run a command/*  ww w.  j  av  a  2 s  . c  o  m*/
 */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;
    ShellTimeoutTimerTask timeoutTimerTask = null;
    timedOut = new AtomicBoolean(false);
    completed = new AtomicBoolean(false);

    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }

    builder.redirectErrorStream(redirectErrorStream);

    if (Shell.WINDOWS) {
        synchronized (WindowsProcessLaunchLock) {
            // To workaround the race condition issue with child processes
            // inheriting unintended handles during process launch that can
            // lead to hangs on reading output and error streams, we
            // serialize process creation. More info available at:
            // http://support.microsoft.com/kb/315939
            process = builder.start();
        }
    } else {
        process = builder.start();
    }

    if (timeOutInterval > 0) {
        timeOutTimer = new Timer("Shell command timeout");
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        // One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(
            new InputStreamReader(process.getErrorStream(), Charset.defaultCharset()));
    final BufferedReader inReader = new BufferedReader(
            new InputStreamReader(process.getInputStream(), Charset.defaultCharset()));
    final StringBuffer errMsg = new StringBuffer();

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ise) {
    } catch (OutOfMemoryError oe) {
        LOG.error("Caught " + oe + ". One possible reason is that ulimit"
                + " setting of 'max user processes' is too low. If so, do"
                + " 'ulimit -u <largerNum>' and try again.");
        throw oe;
    }
    try {
        parseExecResult(inReader); // parse the output
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        // make sure that the error thread exits
        joinThread(errThread);
        completed.set(true);
        // the timeout thread handling
        // taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        throw new IOException(ie.toString());
    } finally {
        if (timeOutTimer != null) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            // JDK 7 tries to automatically drain the input streams for us
            // when the process exits, but since close is not synchronized,
            // it creates a race if we close the stream first and the same
            // fd is recycled. the stream draining thread will attempt to
            // drain that fd!! it may block, OOM, or cause bizarre behavior
            // see: https://bugs.openjdk.java.net/browse/JDK-8024521
            // issue is fixed in build 7u60
            InputStream stdout = process.getInputStream();
            synchronized (stdout) {
                inReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        if (!completed.get()) {
            errThread.interrupt();
            joinThread(errThread);
        }
        try {
            InputStream stderr = process.getErrorStream();
            synchronized (stderr) {
                errReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        lastTime = Time.monotonicNow();
    }
}

From source file:com.taobao.gecko.service.impl.BaseRemotingController.java

public void sendToGroups(final Map<String, RequestCommand> groupObjects,
        final MultiGroupCallBackListener listener, final long timeout, final TimeUnit timeUnit,
        final Object... args) throws NotifyRemotingException {
    if (groupObjects == null || groupObjects.size() == 0) {
        throw new NotifyRemotingException("groupObject");
    }/*from w ww  . j  av  a 2 s  .  c om*/
    if (listener == null) {
        throw new NotifyRemotingException("Null GroupCallBackListener");
    }
    if (timeUnit == null) {
        throw new NotifyRemotingException("Null TimeUnit");
    }
    // 
    final CountDownLatch countDownLatch = new CountDownLatch(groupObjects.size());
    final ConcurrentHashMap<String/* group */, ResponseCommand/*  */> resultMap = new ConcurrentHashMap<String, ResponseCommand>();
    // 
    final Map<String/* group */, Connection> connectionMap = new HashMap<String, Connection>();
    // 
    final Map<String/* group */, CommandHeader> headerMap = new HashMap<String, CommandHeader>();
    // 
    final AtomicBoolean responsed = new AtomicBoolean(false);

    InetSocketAddress remoteAddr = null;
    // 
    final long now = System.currentTimeMillis();

    final long timeoutInMillis = TimeUnit.MILLISECONDS.convert(timeout, timeUnit);
    // callBack
    final MultiGroupRequestCallBack groupRequestCallBack = new MultiGroupRequestCallBack(listener,
            countDownLatch, timeoutInMillis, now, resultMap, responsed, args);
    // 
    final TimerRef timerRef = new TimerRef(timeoutInMillis,
            new GroupCallBackRunner(connectionMap, groupRequestCallBack, headerMap, resultMap, remoteAddr));
    groupRequestCallBack.setTimerRef(timerRef);

    for (final Map.Entry<String, RequestCommand> entry : groupObjects.entrySet()) {
        final RequestCommand requestCommand = entry.getValue();
        final String group = entry.getKey();
        final DefaultConnection conn = (DefaultConnection) this.selectConnectionForGroup(group,
                this.connectionSelector, requestCommand);

        if (conn != null) {
            try {
                // 
                connectionMap.put(group, conn);
                // 
                conn.addOpaqueToGroupMapping(requestCommand.getOpaque(), group);
                // 
                conn.addRequestCallBack(requestCommand.getOpaque(), groupRequestCallBack);
                if (remoteAddr == null) {
                    remoteAddr = conn.getRemoteSocketAddress();
                }
                groupRequestCallBack.addWriteFuture(conn, conn.asyncSend(requestCommand));
                headerMap.put(group, requestCommand.getRequestHeader());
            } catch (final Throwable t) {
                groupRequestCallBack.onResponse(group,
                        this.createCommErrorResponseCommand(requestCommand.getRequestHeader(), t.getMessage()),
                        conn);
            }
        } else {
            // 
            groupRequestCallBack.onResponse(group,
                    this.createNoConnectionResponseCommand(requestCommand.getRequestHeader()), null);
        }
    }
    // 
    this.insertTimer(timerRef);
}