Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.wk.lodge.composite.web.tomcat.IntegrationCompositeTests.java

@Test
public void testSync() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> failure = new AtomicReference<>();

    URI uri = new URI("ws://localhost:" + port + "/composite");
    WebSocketStompClient stompClient = new WebSocketStompClient(uri, this.headers, sockJsClient);
    stompClient.setMessageConverter(new MappingJackson2MessageConverter());

    stompClient.connect(new StompMessageHandler() {

        private StompSession stompSession;

        @Override/*from ww  w.j a  va2s . c o m*/
        public void afterConnected(StompSession stompSession, StompHeaderAccessor headers) {
            this.stompSession = stompSession;
            this.stompSession.subscribe("/user/queue/device", null);

            try {
                SyncMessage sync = new SyncMessage();
                sync.setType("sync");
                sync.setTime(new Date().getTime());
                stompSession.send("/app/sync", sync);
            } catch (Throwable t) {
                failure.set(t);
                latch.countDown();
            }
        }

        @Override
        public void handleMessage(Message<byte[]> message) {
            try {
                String json = parseMessageJson(message);
                new JsonPathExpectationsHelper("type").assertValue(json, "sync");
                new JsonPathExpectationsHelper("time").exists(json);
            } catch (Throwable t) {
                failure.set(t);
            } finally {
                this.stompSession.disconnect();
                latch.countDown();
            }
        }

        @Override
        public void handleError(Message<byte[]> message) {
            StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message);
            String error = "[Producer] " + accessor.getShortLogMessage(message.getPayload());
            logger.error(error);
            failure.set(new Exception(error));
        }

        @Override
        public void handleReceipt(String receiptId) {
        }

        @Override
        public void afterDisconnected() {
        }

    });

    if (!latch.await(10, TimeUnit.SECONDS)) {
        fail("Sync response not received");
    } else if (failure.get() != null) {
        throw new AssertionError("", failure.get());
    }

}

From source file:com.microsoft.tfs.client.clc.Application.java

/**
 * Parse the tokens into a command, any options, and free arguments. If the
 * command is not present (or is not recognized), returns null.
 *
 * @param tokens//from w  w  w .j ava2s.  c  o  m
 *        the tokens to parse (command-line arguments, not including the
 *        program being exeucted). Not null.
 * @param foundOptions
 *        an allocated ArrayList the found options will be stored in.
 * @param foundFreeArguments
 *        an allocated ArrayList the found free arguments will be stored in.
 * @return the command that was parsed from the tokens, null if none was
 *         found or recognized.
 * @throws UnknownOptionException
 *         if an unknown option is encountered.
 * @throws UnknownCommandException
 *         if an unknown command is encountered.
 * @throws InvalidOptionValueException
 *         if an invalid value was passed to an option.
 */
private Command parseTokens(final String[] tokens, final ArrayList<Option> foundOptions,
        final ArrayList<String> foundFreeArguments, final AtomicReference<Exception> outException) {
    Command c = null;

    for (int i = 0; i < tokens.length; i++) {
        final String token = tokens[i];

        if (token == null || token.length() == 0) {
            continue;
        }

        /*
         * If this token looks like an option...
         */
        boolean startsWithOptionCharacter = false;
        if (token.length() >= 2) {
            for (int j = 0; j < OptionsMap.getSupportedOptionPrefixes().length; j++) {
                if (token.charAt(0) == OptionsMap.getSupportedOptionPrefixes()[j]) {
                    startsWithOptionCharacter = true;
                    break;
                }
            }
        }

        if (startsWithOptionCharacter) {
            final Option o;

            try {
                o = optionsMap.findOption(token);
            } catch (final InvalidOptionValueException e) {
                outException.set(e);
                break;
            }

            if (o == null) {
                outException.set(new UnknownOptionException(token));
                break;
            }

            foundOptions.add(o);
            continue;
        }

        /*
         * We didn't parse it as an option, or command file trigger, so it
         * is the command (unless we've already found the command).
         */
        if (c == null) {
            final Command possibleCommand = commandsMap.findCommand(token);

            if (possibleCommand == null) {
                outException.set(new UnknownCommandException(token));
                break;
            }

            c = possibleCommand;
            continue;
        }

        /*
         * The remaining items must be free arguments.
         */
        foundFreeArguments.add(token);
    }

    return c;
}

From source file:org.apache.hadoop.hbase.procedure2.TestProcedureNonce.java

private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup,
        final long nonce) throws IOException {
    // register the nonce
    final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce);

    final AtomicReference<Throwable> t1Exception = new AtomicReference();
    final AtomicReference<Throwable> t2Exception = new AtomicReference();

    final CountDownLatch t1NonceRegisteredLatch = new CountDownLatch(1);
    final CountDownLatch t2BeforeNonceRegisteredLatch = new CountDownLatch(1);
    final Thread[] threads = new Thread[2];
    threads[0] = new Thread() {
        @Override/*from www . j a va  2s.c  o m*/
        public void run() {
            try {
                // release the nonce and wake t2
                assertFalse("unexpected already registered nonce", procExecutor.registerNonce(nonceKey) >= 0);
                t1NonceRegisteredLatch.countDown();

                // hold the submission until t2 is registering the nonce
                t2BeforeNonceRegisteredLatch.await();
                Threads.sleep(1000);

                if (submitProcedure) {
                    CountDownLatch latch = new CountDownLatch(1);
                    TestSingleStepProcedure proc = new TestSingleStepProcedure();
                    procEnv.setWaitLatch(latch);

                    procExecutor.submitProcedure(proc, nonceKey);
                    Threads.sleep(100);

                    // complete the procedure
                    latch.countDown();
                } else {
                    procExecutor.unregisterNonceIfProcedureWasNotSubmitted(nonceKey);
                }
            } catch (Throwable e) {
                t1Exception.set(e);
            } finally {
                t1NonceRegisteredLatch.countDown();
                t2BeforeNonceRegisteredLatch.countDown();
            }
        }
    };

    threads[1] = new Thread() {
        @Override
        public void run() {
            try {
                // wait until t1 has registered the nonce
                t1NonceRegisteredLatch.await();

                // register the nonce
                t2BeforeNonceRegisteredLatch.countDown();
                assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0);
            } catch (Throwable e) {
                t2Exception.set(e);
            } finally {
                t1NonceRegisteredLatch.countDown();
                t2BeforeNonceRegisteredLatch.countDown();
            }
        }
    };

    for (int i = 0; i < threads.length; ++i)
        threads[i].start();
    for (int i = 0; i < threads.length; ++i)
        Threads.shutdown(threads[i]);
    ProcedureTestingUtility.waitNoProcedureRunning(procExecutor);
    assertEquals(null, t1Exception.get());
    assertEquals(null, t2Exception.get());
}

From source file:de.sainth.recipe.backend.db.repositories.CookbookRepository.java

public Cookbook save(Cookbook cookbook) {
    AtomicReference<Cookbook> bu = new AtomicReference<>();
    create.transaction(configuration -> {
        Long id = null;/*from   w w w. j  av  a 2s  .c  o m*/
        if (cookbook.getId() != null) {
            id = using(configuration).select(COOKBOOKS.ID).from(COOKBOOKS)
                    .where(COOKBOOKS.ID.eq(cookbook.getId())).forUpdate().fetchOneInto(Long.class);
        }
        CookbooksRecord cookbooksRecord;
        if (cookbook.getId() == null || id == null) {
            cookbooksRecord = using(configuration)
                    .insertInto(COOKBOOKS, COOKBOOKS.NAME, COOKBOOKS.DESCRIPTION, COOKBOOKS.AUTHOR)
                    .values(cookbook.getName(), cookbook.getDescription(), cookbook.getAuthor().getId())
                    .returning().fetchOne();
        } else {
            cookbooksRecord = using(configuration).update(COOKBOOKS).set(COOKBOOKS.NAME, cookbook.getName())
                    .set(COOKBOOKS.DESCRIPTION, cookbook.getDescription()).returning().fetchOne();
        }

        List<CookbookRecipe> cookbookRecipes = selectCookbookRecipes(
                cookbook.getRecipes().stream().map(CookbookRecipe::getId).collect(Collectors.toList()),
                cookbook.getAuthor().getId());

        BatchBindStep batchInsert = using(configuration).batch(
                create.insertInto(COOKBOOKS_RECIPES, COOKBOOKS_RECIPES.COOKBOOK, COOKBOOKS_RECIPES.RECIPE)
                        .values((Long) null, null));
        for (CookbookRecipe r : cookbookRecipes) {
            batchInsert.bind(cookbooksRecord.getId(), r.getId());
        }
        batchInsert.execute();

        bu.set(new Cookbook(cookbooksRecord.getId(), cookbooksRecord.getName(),
                cookbooksRecord.getDescription(), cookbook.getAuthor(), cookbookRecipes));
    });

    return bu.get();
}

From source file:org.apache.hadoop.hbase.client.TestHCM.java

private void testConnectionClose(boolean allowsInterrupt) throws Exception {
    String tableName = "HCM-testConnectionClose" + allowsInterrupt;
    TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();

    boolean previousBalance = TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true);

    Configuration c2 = new Configuration(TEST_UTIL.getConfiguration());
    // We want to work on a separate connection.
    c2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
    c2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 100); // retry a lot
    c2.setInt(HConstants.HBASE_CLIENT_PAUSE, 0); // don't wait between retries.
    c2.setInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, 0); // Server do not really expire
    c2.setBoolean(RpcClient.ALLOWS_INTERRUPTS, allowsInterrupt);

    final HTable table = new HTable(c2, tableName.getBytes());

    Put put = new Put(ROW);
    put.add(FAM_NAM, ROW, ROW);//from ww w .ja  v a  2s. c o  m
    table.put(put);

    // 4 steps: ready=0; doGets=1; mustStop=2; stopped=3
    final AtomicInteger step = new AtomicInteger(0);

    final AtomicReference<Throwable> failed = new AtomicReference<Throwable>(null);
    Thread t = new Thread("testConnectionCloseThread") {
        public void run() {
            int done = 0;
            try {
                step.set(1);
                while (step.get() == 1) {
                    Get get = new Get(ROW);
                    table.get(get);
                    done++;
                    if (done % 100 == 0)
                        LOG.info("done=" + done);
                }
            } catch (Throwable t) {
                failed.set(t);
                LOG.error(t);
            }
            step.set(3);
        }
    };
    t.start();
    TEST_UTIL.waitFor(20000, new Waiter.Predicate<Exception>() {
        @Override
        public boolean evaluate() throws Exception {
            return step.get() == 1;
        }
    });

    ServerName sn = table.getRegionLocation(ROW).getServerName();
    ConnectionManager.HConnectionImplementation conn = (ConnectionManager.HConnectionImplementation) table
            .getConnection();
    RpcClient rpcClient = conn.getRpcClient();

    LOG.info("Going to cancel connections. connection=" + conn.toString() + ", sn=" + sn);
    for (int i = 0; i < 5000; i++) {
        rpcClient.cancelConnections(sn.getHostname(), sn.getPort());
        Thread.sleep(5);
    }

    step.compareAndSet(1, 2);
    // The test may fail here if the thread doing the gets is stuck. The way to find
    //  out what's happening is to look for the thread named 'testConnectionCloseThread'
    TEST_UTIL.waitFor(40000, new Waiter.Predicate<Exception>() {
        @Override
        public boolean evaluate() throws Exception {
            return step.get() == 3;
        }
    });

    table.close();
    Assert.assertTrue("Unexpected exception is " + failed.get(), failed.get() == null);
    TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true);
}

From source file:org.muhia.app.psi.config.http.CustomHttpClientUtilities.java

public String doGetWithResponseHandler(String url, Map<String, String> allRequestParams) {
    AtomicReference<String> result = new AtomicReference<>("");
    // CloseableHttpResponse response = null;
    CloseableHttpClient client = null;/*from  w w  w.j  a  v  a 2 s . c  o m*/
    try {
        HttpGet httpGet = new HttpGet(url);
        URIBuilder uriBuilder = new URIBuilder(httpGet.getURI());

        allRequestParams.entrySet().forEach((entry) -> {
            String key = entry.getKey();
            String value = entry.getValue();
            if (value != null) {
                uriBuilder.setParameter(key, value);
            }
        });

        httpGet.setURI(uriBuilder.build());

        RequestConfig config = RequestConfig.custom().setConnectTimeout(hcp.getConnectionTimeout())
                .setConnectionRequestTimeout(hcp.getConnectionRequestTimeout())
                .setSocketTimeout(hcp.getSockectTimeout()).build();

        client = HttpClientBuilder.create().setDefaultRequestConfig(config).build();

        ResponseHandler<String> responseHandler = (final HttpResponse response) -> {
            int status = response.getStatusLine().getStatusCode();
            if (status >= hcp.getLowerStatusLimit() && status <= hcp.getUpperStatusLimit()) {
                HttpEntity entity = response.getEntity();
                return entity != null ? EntityUtils.toString(entity) : null;
            } else {
                throw new ClientProtocolException(hcp.getUnexpectedStatus() + status);
            }
        };

        result.set(client.execute(httpGet, responseHandler));
        client.close();

    } catch (IOException | URISyntaxException ex) {
        // LoggerFactory.getLogger(CustomHttpClientUtil.class).error(ex.getMessage(),
        // ex);
        Logger.getLogger(CustomHttpClientUtilities.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (client != null) {
                client.close();
            }

        } catch (IOException ex) {
            // LoggerFactory.getLogger(CustomHttpClientUtil.class).error(ex.getMessage(),
            // ex);
            result.set(hcp.getIoExceptionMessage());
            Logger.getLogger(CustomHttpClientUtilities.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    return result.get();
}

From source file:org.apache.jackrabbit.oak.plugins.segment.CompactionAndCleanupIT.java

/**
 * Regression test for OAK-2192 testing for mixed segments. This test does not
 * cover OAK-3348. I.e. it does not assert the segment graph is free of cross
 * gc generation references.//from   ww  w .jav  a  2s  .co m
 */
@Test
public void testMixedSegments() throws Exception {
    FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true)
            .build();
    final SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build();
    final AtomicBoolean compactionSuccess = new AtomicBoolean(true);
    CompactionStrategy strategy = new CompactionStrategy(true, false, CLEAN_NONE, 0, (byte) 5) {
        @Override
        public boolean compacted(Callable<Boolean> setHead) throws Exception {
            compactionSuccess.set(nodeStore.locked(setHead, 1, MINUTES));
            return compactionSuccess.get();
        }
    };
    strategy.setForceAfterFail(true);
    store.setCompactionStrategy(strategy);

    NodeBuilder root = nodeStore.getRoot().builder();
    createNodes(root.setChildNode("test"), 10, 3);
    nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);

    final Set<UUID> beforeSegments = new HashSet<UUID>();
    collectSegments(store.getHead(), beforeSegments);

    final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true);
    final List<String> failedCommits = newArrayList();
    Thread[] threads = new Thread[10];
    for (int k = 0; k < threads.length; k++) {
        final int threadId = k;
        threads[k] = new Thread(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; run.get(); j++) {
                    String nodeName = "b-" + threadId + "," + j;
                    try {
                        NodeBuilder root = nodeStore.getRoot().builder();
                        root.setChildNode(nodeName);
                        nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
                        Thread.sleep(5);
                    } catch (CommitFailedException e) {
                        failedCommits.add(nodeName);
                    } catch (InterruptedException e) {
                        Thread.interrupted();
                        break;
                    }
                }
            }
        });
        threads[k].start();
    }
    store.compact();
    run.set(false);
    for (Thread t : threads) {
        t.join();
    }
    store.flush();

    assumeTrue("Failed to acquire compaction lock", compactionSuccess.get());
    assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty());

    Set<UUID> afterSegments = new HashSet<UUID>();
    collectSegments(store.getHead(), afterSegments);
    try {
        for (UUID u : beforeSegments) {
            assertFalse("Mixed segments found: " + u, afterSegments.contains(u));
        }
    } finally {
        store.close();
    }
}

From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java

@Test
public void testErrorMode() throws Exception {
    Timing timing = new Timing();
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();/*from  ww  w  .  ja v a  2  s . c om*/
    try {
        final AtomicReference<CountDownLatch> latch = new AtomicReference<CountDownLatch>(
                new CountDownLatch(1));
        final AtomicInteger count = new AtomicInteger(0);
        QueueConsumer<TestQueueItem> consumer = new QueueConsumer<TestQueueItem>() {
            @Override
            public void consumeMessage(TestQueueItem message) throws Exception {
                if (count.incrementAndGet() < 2) {
                    throw new Exception();
                }
                latch.get().countDown();
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
            }
        };
        DistributedQueue<TestQueueItem> queue = QueueBuilder.builder(client, consumer, serializer, QUEUE_PATH)
                .lockPath("/locks").buildQueue();
        try {
            queue.start();

            TestQueueItem item = new TestQueueItem("1");
            queue.put(item);

            Assert.assertTrue(timing.awaitLatch(latch.get()));
            Assert.assertEquals(count.get(), 2);

            queue.setErrorMode(ErrorMode.DELETE);

            count.set(0);
            latch.set(new CountDownLatch(1));

            item = new TestQueueItem("1");
            queue.put(item);

            Assert.assertFalse(latch.get().await(5, TimeUnit.SECONDS)); // consumer should get called only once
            Assert.assertEquals(count.get(), 1);
        } finally {
            queue.close();
        }
    } finally {
        client.close();
    }
}

From source file:org.apache.solr.handler.dataimport.XPathEntityProcessor.java

private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) {
    //nothing atomic about it. I just needed a StongReference
    final AtomicReference<Exception> exp = new AtomicReference<>();
    final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<>(blockingQueueSize);
    final AtomicBoolean isEnd = new AtomicBoolean(false);
    final AtomicBoolean throwExp = new AtomicBoolean(true);
    publisherThread = new Thread() {
        @Override//w  ww.jav a  2 s  . c om
        public void run() {
            try {
                xpathReader.streamRecords(data, (record, xpath) -> {
                    if (isEnd.get()) {
                        throwExp.set(false);
                        //To end the streaming . otherwise the parsing will go on forever
                        //though consumer has gone away
                        throw new RuntimeException("BREAK");
                    }
                    Map<String, Object> row;
                    try {
                        row = readRow(record, xpath);
                    } catch (Exception e) {
                        isEnd.set(true);
                        return;
                    }
                    offer(row);
                });
            } catch (Exception e) {
                if (throwExp.get())
                    exp.set(e);
            } finally {
                closeIt(data);
                if (!isEnd.get()) {
                    offer(END_MARKER);
                }
            }
        }

        private void offer(Map<String, Object> row) {
            try {
                while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) {
                    if (isEnd.get())
                        return;
                    LOG.debug("Timeout elapsed writing records.  Perhaps buffer size should be increased.");
                }
            } catch (InterruptedException e) {
                return;
            } finally {
                synchronized (this) {
                    notifyAll();
                }
            }
        }
    };

    publisherThread.start();

    return new Iterator<Map<String, Object>>() {
        private Map<String, Object> lastRow;
        int count = 0;

        @Override
        public boolean hasNext() {
            return !isEnd.get();
        }

        @Override
        public Map<String, Object> next() {
            Map<String, Object> row;

            do {
                try {
                    row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits);
                    if (row == null) {
                        LOG.debug("Timeout elapsed reading records.");
                    }
                } catch (InterruptedException e) {
                    LOG.debug("Caught InterruptedException while waiting for row.  Aborting.");
                    isEnd.set(true);
                    return null;
                }
            } while (row == null);

            if (row == END_MARKER) {
                isEnd.set(true);
                if (exp.get() != null) {
                    String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count;
                    if (lastRow != null)
                        msg += " last row in this xml:" + lastRow;
                    if (ABORT.equals(onError)) {
                        wrapAndThrow(SEVERE, exp.get(), msg);
                    } else if (SKIP.equals(onError)) {
                        wrapAndThrow(DataImportHandlerException.SKIP, exp.get());
                    } else {
                        LOG.warn(msg, exp.get());
                    }
                }
                return null;
            }
            count++;
            return lastRow = row;
        }

        @Override
        public void remove() {
            /*no op*/
        }
    };

}

From source file:org.apache.geode.management.internal.cli.commands.DestroyRegionCommand.java

@CliCommand(value = { CliStrings.DESTROY_REGION }, help = CliStrings.DESTROY_REGION__HELP)
@CliMetaData(relatedTopic = CliStrings.TOPIC_GEODE_REGION)
@ResourceOperation(resource = ResourcePermission.Resource.DATA, operation = ResourcePermission.Operation.MANAGE)
public Result destroyRegion(
        @CliOption(key = CliStrings.DESTROY_REGION__REGION, optionContext = ConverterHint.REGION_PATH, mandatory = true, help = CliStrings.DESTROY_REGION__REGION__HELP) String regionPath) {

    if (regionPath == null) {
        return ResultBuilder.createInfoResult(CliStrings.DESTROY_REGION__MSG__SPECIFY_REGIONPATH_TO_DESTROY);
    }//from  w  ww  .  j ava 2 s.c om

    if (StringUtils.isBlank(regionPath) || regionPath.equals(Region.SEPARATOR)) {
        return ResultBuilder.createInfoResult(CliStrings
                .format(CliStrings.DESTROY_REGION__MSG__REGIONPATH_0_NOT_VALID, new Object[] { regionPath }));
    }

    Result result;
    AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
    try {
        InternalCache cache = getCache();
        ManagementService managementService = ManagementService.getExistingManagementService(cache);
        String regionPathToUse = regionPath;

        if (!regionPathToUse.startsWith(Region.SEPARATOR)) {
            regionPathToUse = Region.SEPARATOR + regionPathToUse;
        }

        Set<DistributedMember> regionMembersList = findMembersForRegion(cache, managementService,
                regionPathToUse);

        if (regionMembersList.size() == 0) {
            return ResultBuilder.createUserErrorResult(
                    CliStrings.format(CliStrings.DESTROY_REGION__MSG__COULD_NOT_FIND_REGIONPATH_0_IN_GEODE,
                            regionPath, "jmx-manager-update-rate milliseconds"));
        }

        CliFunctionResult destroyRegionResult;

        ResultCollector<?, ?> resultCollector = CliUtil.executeFunction(RegionDestroyFunction.INSTANCE,
                regionPath, regionMembersList);
        List<CliFunctionResult> resultsList = (List<CliFunctionResult>) resultCollector.getResult();
        String message = CliStrings.format(CliStrings.DESTROY_REGION__MSG__REGION_0_1_DESTROYED, regionPath,
                "");

        // Only if there is an error is this set to false
        boolean isRegionDestroyed = true;
        for (CliFunctionResult aResultsList : resultsList) {
            destroyRegionResult = aResultsList;
            if (destroyRegionResult.isSuccessful()) {
                xmlEntity.set(destroyRegionResult.getXmlEntity());
            } else if (destroyRegionResult.getThrowable() != null) {
                Throwable t = destroyRegionResult.getThrowable();
                LogWrapper.getInstance().info(t.getMessage(), t);
                message = CliStrings.format(
                        CliStrings.DESTROY_REGION__MSG__ERROR_OCCURRED_WHILE_DESTROYING_0_REASON_1, regionPath,
                        t.getMessage());
                isRegionDestroyed = false;
            } else {
                message = CliStrings.format(
                        CliStrings.DESTROY_REGION__MSG__UNKNOWN_RESULT_WHILE_DESTROYING_REGION_0_REASON_1,
                        regionPath, destroyRegionResult.getMessage());
                isRegionDestroyed = false;
            }
        }
        if (isRegionDestroyed) {
            result = ResultBuilder.createInfoResult(message);
        } else {
            result = ResultBuilder.createUserErrorResult(message);
        }
    } catch (IllegalStateException e) {
        result = ResultBuilder.createUserErrorResult(
                CliStrings.format(CliStrings.DESTROY_REGION__MSG__ERROR_WHILE_DESTROYING_REGION_0_REASON_1,
                        regionPath, e.getMessage()));
    } catch (Exception e) {
        result = ResultBuilder.createGemFireErrorResult(
                CliStrings.format(CliStrings.DESTROY_REGION__MSG__ERROR_WHILE_DESTROYING_REGION_0_REASON_1,
                        regionPath, e.getMessage()));
    }

    if (xmlEntity.get() != null) {
        persistClusterConfiguration(result,
                () -> getSharedConfiguration().deleteXmlEntity(xmlEntity.get(), null));
    }

    return result;
}