Example usage for java.util.concurrent ConcurrentMap remove

List of usage examples for java.util.concurrent ConcurrentMap remove

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap remove.

Prototype

V remove(Object key);

Source Link

Document

Removes the mapping for a key from this map if it is present (optional operation).

Usage

From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java

@Override
public void importDataDirectory(File directory, String pattern, final BatchImportOptions options) {
    if (!directory.exists()) {
        throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist");
    }/*from ww  w. j a v a 2 s.  co m*/

    //Create the file filter to use when searching for files to import
    final FileFilter fileFilter;
    if (pattern != null) {
        fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes);
    } else {
        fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes);
    }

    //Determine the parent directory to log to
    final File logDirectory = determineLogDirectory(options, "import");

    //Setup reporting file
    final File importReport = new File(logDirectory, "data-import.txt");
    final PrintWriter reportWriter;
    try {
        reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport)));
    } catch (IOException e) {
        throw new RuntimeException("Failed to create FileWriter for: " + importReport, e);
    }

    //Convert directory to URI String to provide better logging output
    final URI directoryUri = directory.toURI();
    final String directoryUriStr = directoryUri.toString();
    IMPORT_BASE_DIR.set(directoryUriStr);
    try {
        //Scan the specified directory for files to import
        logger.info("Scanning for files to Import from: {}", directory);
        final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes,
                options);
        this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor);
        final long resourceCount = fileProcessor.getResourceCount();
        logger.info("Found {} files to Import from: {}", resourceCount, directory);

        //See if the import should fail on error
        final boolean failOnError = options != null ? options.isFailOnError() : true;

        //Map of files to import, grouped by type
        final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport();

        //Import the data files
        for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) {
            final Queue<Resource> files = dataToImport.remove(portalDataKey);
            if (files == null) {
                continue;
            }

            final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>();
            final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

            final int fileCount = files.size();
            logger.info("Importing {} files of type {}", fileCount, portalDataKey);
            reportWriter.println(portalDataKey + "," + fileCount);

            while (!files.isEmpty()) {
                final Resource file = files.poll();

                //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception
                final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter,
                        logDirectory, false);
                failedFutures.addAll(newFailed);

                final AtomicLong importTime = new AtomicLong(-1);

                //Create import task
                final Callable<Object> task = new CallableWithoutResult() {
                    @Override
                    protected void callWithoutResult() {
                        IMPORT_BASE_DIR.set(directoryUriStr);
                        importTime.set(System.nanoTime());
                        try {
                            importData(file, portalDataKey);
                        } finally {
                            importTime.set(System.nanoTime() - importTime.get());
                            IMPORT_BASE_DIR.remove();
                        }
                    }
                };

                //Submit the import task
                final Future<?> importFuture = this.importExportThreadPool.submit(task);

                //Add the future for tracking
                importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime));
            }

            //Wait for all of the imports on of this type to complete
            final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory,
                    true);
            failedFutures.addAll(newFailed);

            if (failOnError && !failedFutures.isEmpty()) {
                throw new RuntimeException(
                        failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n"
                                + "\tPer entity exception logs and a full report can be found in "
                                + logDirectory + "\n");
            }

            reportWriter.flush();
        }

        if (!dataToImport.isEmpty()) {
            throw new IllegalStateException(
                    "The following PortalDataKeys are not listed in the dataTypeImportOrder List: "
                            + dataToImport.keySet());
        }

        logger.info("For a detailed report on the data import see " + importReport);
    } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for entities to import", e);
    } finally {
        IOUtils.closeQuietly(reportWriter);
        IMPORT_BASE_DIR.remove();
    }
}

From source file:org.jasig.portal.io.xml.JaxbPortalDataHandlerService.java

@Override
public void importData(File directory, String pattern, final BatchImportOptions options) {
    if (!directory.exists()) {
        throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist");
    }/*w  w w .  jav a 2 s.com*/

    //Create the file filter to use when searching for files to import
    final FileFilter fileFilter;
    if (pattern != null) {
        fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes);
    } else {
        fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes);
    }

    //Determine the parent directory to log to
    final File logDirectory = determineLogDirectory(options, "import");

    //Setup reporting file
    final File importReport = new File(logDirectory, "data-import.txt");
    final PrintWriter reportWriter;
    try {
        reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport)));
    } catch (IOException e) {
        throw new RuntimeException("Failed to create FileWriter for: " + importReport, e);
    }

    //Convert directory to URI String to provide better logging output
    final URI directoryUri = directory.toURI();
    final String directoryUriStr = directoryUri.toString();
    IMPORT_BASE_DIR.set(directoryUriStr);
    try {
        //Scan the specified directory for files to import
        logger.info("Scanning for files to Import from: {}", directory);
        final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes,
                options);
        this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor);
        final long resourceCount = fileProcessor.getResourceCount();
        logger.info("Found {} files to Import from: {}", resourceCount, directory);

        //See if the import should fail on error
        final boolean failOnError = options != null ? options.isFailOnError() : true;

        //Map of files to import, grouped by type
        final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport();

        //Import the data files
        for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) {
            final Queue<Resource> files = dataToImport.remove(portalDataKey);
            if (files == null) {
                continue;
            }

            final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>();
            final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

            final int fileCount = files.size();
            logger.info("Importing {} files of type {}", fileCount, portalDataKey);
            reportWriter.println(portalDataKey + "," + fileCount);

            while (!files.isEmpty()) {
                final Resource file = files.poll();

                //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception
                final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter,
                        logDirectory, false);
                failedFutures.addAll(newFailed);

                final AtomicLong importTime = new AtomicLong(-1);

                //Create import task
                final Callable<Object> task = new CallableWithoutResult() {
                    @Override
                    protected void callWithoutResult() {
                        IMPORT_BASE_DIR.set(directoryUriStr);
                        importTime.set(System.nanoTime());
                        try {
                            importData(file, portalDataKey);
                        } finally {
                            importTime.set(System.nanoTime() - importTime.get());
                            IMPORT_BASE_DIR.remove();
                        }
                    }
                };

                //Submit the import task
                final Future<?> importFuture = this.importExportThreadPool.submit(task);

                //Add the future for tracking
                importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime));
            }

            //Wait for all of the imports on of this type to complete
            final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory,
                    true);
            failedFutures.addAll(newFailed);

            if (failOnError && !failedFutures.isEmpty()) {
                throw new RuntimeException(
                        failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n"
                                + "\tPer entity exception logs and a full report can be found in "
                                + logDirectory + "\n");
            }

            reportWriter.flush();
        }

        if (!dataToImport.isEmpty()) {
            throw new IllegalStateException(
                    "The following PortalDataKeys are not listed in the dataTypeImportOrder List: "
                            + dataToImport.keySet());
        }

        logger.info("For a detailed report on the data import see " + importReport);
    } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for entities to import", e);
    } finally {
        IOUtils.closeQuietly(reportWriter);
        IMPORT_BASE_DIR.remove();
    }
}

From source file:org.opendaylight.controller.protocol_plugin.openflow.internal.TopologyServiceShim.java

/**
 * Update local cache and return true if it needs to notify upper layer
 * Topology listeners./*from w  w w  . j  a  v a  2s  .co m*/
 *
 * @param container
 *            The network container
 * @param edge
 *            The edge
 * @param type
 *            The update type
 * @param props
 *            The edge properties
 * @return true if it needs to notify upper layer Topology listeners
 */
private boolean updateLocalEdgeMap(String container, Edge edge, UpdateType type, Set<Property> props) {
    ConcurrentMap<NodeConnector, Pair<Edge, Set<Property>>> edgePropsMap = edgeMap.get(container);
    NodeConnector src = edge.getTailNodeConnector();
    Pair<Edge, Set<Property>> edgeProps = new ImmutablePair<Edge, Set<Property>>(edge, props);
    boolean rv = false;

    switch (type) {
    case ADDED:
    case CHANGED:
        if (edgePropsMap == null) {
            edgePropsMap = new ConcurrentHashMap<NodeConnector, Pair<Edge, Set<Property>>>();
            rv = true;
        } else {
            if (edgePropsMap.containsKey(src) && edgePropsMap.get(src).equals(edgeProps)) {
                // Entry already exists. No update.
                rv = false;
            } else {
                rv = true;
            }
        }
        if (rv) {
            edgePropsMap.put(src, edgeProps);
            edgeMap.put(container, edgePropsMap);
        }
        break;
    case REMOVED:
        if ((edgePropsMap != null) && edgePropsMap.containsKey(src)) {
            edgePropsMap.remove(src);
            if (edgePropsMap.isEmpty()) {
                edgeMap.remove(container);
            } else {
                edgeMap.put(container, edgePropsMap);
            }
            rv = true;
        }
        break;
    default:
        logger.debug("notifyLocalEdgeMap: invalid {} for Edge {} in container {}",
                new Object[] { type.getName(), edge, container });
    }

    if (rv) {
        logger.debug("notifyLocalEdgeMap: {} for Edge {} in container {}",
                new Object[] { type.getName(), edge, container });
    }

    return rv;
}

From source file:com.edgenius.wiki.service.impl.PageServiceImpl.java

@SuppressWarnings("unchecked")
public void stopEditing(String pageUuid, User user) {
    Element ele = pageEditingCache.get(pageUuid);
    if (ele != null) {
        ConcurrentMap<String, Long> map = (ConcurrentMap<String, Long>) ele.getValue();
        if (map != null)
            map.remove(user.getUsername());
        if (map == null || map.size() == 0)
            pageEditingCache.remove(pageUuid);
    }// ww w .  ja v a2 s  .c om
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Spill message buffers of a particular type of message (current or incoming
 * buffer) for a partition to disk./* w  w  w . j av a 2s  .c  o  m*/
 *
 * @param partitionId Id of the partition to spill the messages for
 * @param pendingMessages The map to get the message buffers from
 * @param superstep Superstep of which we want to offload messages. This is
 *                  equal to current superstep number if we want to offload
 *                  buffers for currentMessageStore, and is equal to next
 *                  superstep number if we want to offload buffer for
 *                  incomingMessageStore
 * @throws IOException
 */
private void spillMessages(Integer partitionId,
        ConcurrentMap<Integer, Pair<Integer, List<VertexIdMessages<I, Writable>>>> pendingMessages,
        long superstep) throws IOException {
    Pair<Integer, List<VertexIdMessages<I, Writable>>> entry;
    messageBufferRWLock.writeLock().lock();
    entry = pendingMessages.remove(partitionId);
    if (entry != null && entry.getLeft() < minBuffSize) {
        pendingMessages.put(partitionId, entry);
        entry = null;
    }
    messageBufferRWLock.writeLock().unlock();

    if (entry == null) {
        return;
    }

    // Sanity check
    checkState(!entry.getRight().isEmpty(),
            "spillMessages: the message buffer that is supposed to be flushed to " + "disk does not exist.");

    File file = new File(getPendingMessagesBufferPath(partitionId, superstep));

    FileOutputStream fos = new FileOutputStream(file, true);
    BufferedOutputStream bos = new BufferedOutputStream(fos);
    DataOutputStream dos = new DataOutputStream(bos);
    for (VertexIdMessages<I, Writable> messages : entry.getRight()) {
        SerializedMessageClass messageClass;
        if (messages instanceof ByteArrayVertexIdMessages) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_VERTEX_ID_MESSAGES;
        } else if (messages instanceof ByteArrayOneMessageToManyIds) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_ONE_MESSAGE_TO_MANY_IDS;
        } else {
            throw new IllegalStateException("spillMessages: serialized message " + "type is not supported");
        }
        dos.writeInt(messageClass.ordinal());
        messages.write(dos);
    }
    dos.close();
}

From source file:org.opendaylight.controller.clustering.services_implementation.internal.ClusteringServicesIT.java

@Test
public void clusterContainerAndGlobalTest()
        throws CacheExistException, CacheConfigException, CacheListenerAddException, InterruptedException {
    String cache1 = "Cache1";
    String cache2 = "Cache2";
    // Lets test the case of caches with same name in different
    // containers (actually global an container case)
    String cache3 = "Cache2";

    HashSet<cacheMode> cacheModeSet = new HashSet<cacheMode>();
    cacheModeSet.add(cacheMode.NON_TRANSACTIONAL);
    ConcurrentMap cm11 = this.clusterDefaultServices.createCache(cache1, cacheModeSet);
    assertNotNull(cm11);/*from   w  w w  .j ava  2s.c o m*/

    assertTrue(this.clusterDefaultServices.existCache(cache1));
    assertEquals(cm11, this.clusterDefaultServices.getCache(cache1));

    ConcurrentMap cm12 = this.clusterDefaultServices.createCache(cache2, cacheModeSet);
    ConcurrentMap cm23 = this.clusterGlobalServices.createCache(cache3, cacheModeSet);

    // Now given cahe2 and cache3 have same name lets make sure
    // they don't return the same reference
    assertNotNull(this.clusterGlobalServices.getCache(cache2));
    // cm12 reference must be different than cm23
    assertTrue(cm12 != cm23);

    HashSet<String> cacheList = (HashSet<String>) this.clusterDefaultServices.getCacheList();
    assertEquals(2, cacheList.size());
    assertTrue(cacheList.contains(cache1));
    assertTrue(cacheList.contains(cache2));

    assertNotNull(this.clusterDefaultServices.getCacheProperties(cache1));

    {
        /***********************************/
        /* Testing cacheAware in Container */
        /***********************************/
        Dictionary<String, Object> props = new Hashtable<String, Object>();
        Set<String> propSet = new HashSet<String>();
        propSet.add(cache1);
        propSet.add(cache2);
        props.put("cachenames", propSet);
        CacheAware listener = new CacheAware();
        CacheAware listenerRepeated = new CacheAware();
        ServiceRegistration updateServiceReg = ServiceHelper.registerServiceWReg(ICacheUpdateAware.class,
                "default", listener, props);
        assertNotNull(updateServiceReg);

        // Register another service for the same caches, this
        // should not get any update because we don't allow to
        // override the existing unless before unregistered
        ServiceRegistration updateServiceRegRepeated = ServiceHelper
                .registerServiceWReg(ICacheUpdateAware.class, "default", listenerRepeated, props);
        assertNotNull(updateServiceRegRepeated);
        CountDownLatch res = null;
        List<Update> ups = null;
        Update up = null;
        Integer k1 = new Integer(10);
        Long k2 = new Long(100L);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm11.put(k1, "foo");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("foo"));
        assertTrue(up.cacheName.equals(cache1));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache1));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache1));

        /**********************************/
        /* RE-UPDATE AN EXISTING KEY CASE */
        /**********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache1));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.remove(k1);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache1));

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm12.put(k2, new Short((short) 15));
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k2));
        assertTrue(up.value.equals(new Short((short) 15)));
        assertTrue(up.cacheName.equals(cache2));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k2));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache2));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm12.put(k2, "BAZ");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k2));
        assertTrue(up.value.equals("BAZ"));
        assertTrue(up.cacheName.equals(cache2));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm12.remove(k2);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k2));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache2));

        /******************************************************************/
        /* NOW LETS REMOVE THE REGISTRATION AND MAKE SURE NO UPDATS COMES */
        /******************************************************************/
        updateServiceReg.unregister();
        // Start monitoring the updates, noone should come in
        res = listener.restart(1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm11.put(k1, "foo");

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm11.put(k1, "baz");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm11.remove(k1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm12.put(k2, new Short((short) 15));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm12.put(k2, "BAZ");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm12.remove(k2);

        // Wait to make sure no updates came in, clearly this is
        // error prone as logic, but cannot find a better way than
        // this to make sure updates didn't get in
        res.await(1L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 0);
    }

    {
        /***********************************/
        /* Testing cacheAware in Global */
        /***********************************/
        Dictionary<String, Object> props = new Hashtable<String, Object>();
        Set<String> propSet = new HashSet<String>();
        propSet.add(cache3);
        props.put("cachenames", propSet);
        CacheAware listener = new CacheAware();
        ServiceRegistration updateServiceReg = ServiceHelper.registerGlobalServiceWReg(ICacheUpdateAware.class,
                listener, props);
        assertNotNull(updateServiceReg);

        CountDownLatch res = null;
        List<Update> ups = null;
        Update up = null;
        Integer k1 = new Integer(10);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm23.put(k1, "foo");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("foo"));
        assertTrue(up.cacheName.equals(cache3));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache3));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm23.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache3));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm23.remove(k1);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache3));

        /******************************************************************/
        /* NOW LETS REMOVE THE REGISTRATION AND MAKE SURE NO UPDATS COMES */
        /******************************************************************/
        updateServiceReg.unregister();
        // Start monitoring the updates, noone should come in
        res = listener.restart(1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm23.put(k1, "foo");

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm23.put(k1, "baz");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm23.remove(k1);

        // Wait to make sure no updates came in, clearly this is
        // error prone as logic, but cannot find a better way than
        // this to make sure updates didn't get in
        res.await(1L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 0);
    }

    InetAddress addr = this.clusterDefaultServices.getMyAddress();
    assertNotNull(addr);

    List<InetAddress> addrList = this.clusterDefaultServices.getClusteredControllers();

    this.clusterDefaultServices.destroyCache(cache1);
    assertFalse(this.clusterDefaultServices.existCache(cache1));
}