Example usage for java.util.concurrent.locks Lock unlock

List of usage examples for java.util.concurrent.locks Lock unlock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock unlock.

Prototype

void unlock();

Source Link

Document

Releases the lock.

Usage

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

@Override
public int getGranulesCount(Query q) throws IOException {
    Utilities.ensureNonNull("query", q);
    q = mergeHints(q);/*from w  w w.  j  a  v  a 2s .  c om*/
    String typeName = q.getTypeName();
    final Lock lock = rwLock.readLock();
    try {
        lock.lock();
        checkStore();

        //
        // Load tiles informations, especially the bounds, which will be
        // reused
        //
        final SimpleFeatureSource featureSource = tileIndexStore.getFeatureSource(typeName);
        if (featureSource == null) {
            throw new NullPointerException(
                    "The provided SimpleFeatureSource is null, it's impossible to create an index!");
        }
        int count = featureSource.getCount(q);
        if (count == -1) {
            return featureSource.getFeatures(q).size();
        }
        return count;

    } catch (Throwable e) {
        final IOException ioe = new IOException();
        ioe.initCause(e);
        throw ioe;
    } finally {
        lock.unlock();

    }
}

From source file:org.jactr.core.module.declarative.basic.DefaultDeclarativeModule.java

public DefaultDeclarativeModule() {
    super("declarative");
    _allChunks = new TreeMap<String, IChunk>();
    _allChunkTypes = new TreeMap<String, IChunkType>();
    _activationSorter = new ChunkActivationComparator();

    _chunksToDispose = FastList.newInstance();

    _searchSystem = new DefaultSearchSystem(this);

    _chunkLock = new ReentrantReadWriteLock();
    _chunkTypeLock = new ReentrantReadWriteLock();

    _chunksToEncode = FastList.newInstance();

    _encodeChunksOnRemove = new IActivationBufferListener() {

        public void chunkMatched(ActivationBufferEvent abe) {
            // noop
        }/*from   www .  ja va 2s .  com*/

        public void requestAccepted(ActivationBufferEvent abe) {
            // noop

        }

        public void sourceChunkAdded(ActivationBufferEvent abe) {
            // noop

        }

        public void sourceChunkRemoved(ActivationBufferEvent abe) {
            /*
             * queue up the encoding. we dont encode it here so that any inline
             * listeners after this one will get the actual instance of the removed
             * chunk and not the merged version after encoding (if a merge occurs)
             */
            try {
                _chunkLock.writeLock().lock();

                if (!abe.getSource().handlesEncoding())
                    _chunksToEncode.addAll(abe.getSourceChunks());
            } finally {
                _chunkLock.writeLock().unlock();
            }
        }

        public void sourceChunksCleared(ActivationBufferEvent abe) {
            sourceChunkRemoved(abe);
        }

        public void statusSlotChanged(ActivationBufferEvent abe) {
            // noop

        }

        @SuppressWarnings("unchecked")
        public void parameterChanged(IParameterEvent pe) {
            // noop

        }

    };

    _chunkEncoder = new ModelListenerAdaptor() {

        @Override
        public void cycleStopped(ModelEvent event) {
            /**
             * encode those that need encoding and dispose of those that need
             * disposing
             */
            FastList<IChunk> chunkList = FastList.newInstance();

            // assignment, check thread safety
            try {
                _chunkLock.writeLock().lock();
                chunkList.addAll(_chunksToEncode);
                _chunksToEncode.clear();
            } finally {
                _chunkLock.writeLock().unlock();
            }

            FastList<IActivationBuffer> containingBuffers = FastList.newInstance();

            // fast, destructive iterator where processing order does not matter
            for (IChunk chunk = null; !chunkList.isEmpty() && (chunk = chunkList.removeLast()) != null;) {
                /*
                 * because this chunk might get merged, effectively changing the lock
                 * instance, we do grab a reference to the lock temporarily
                 */
                Lock lock = chunk.getWriteLock();
                try {
                    lock.lock();
                    if (chunk.hasBeenDisposed())
                        continue;

                    if (chunk.isEncoded())
                        chunk.getSubsymbolicChunk().accessed(event.getSimulationTime());
                    else {
                        BufferUtilities.getContainingBuffers(chunk, true, containingBuffers);
                        if (containingBuffers.size() == 0)
                            addChunk(chunk);
                    }
                } finally {
                    lock.unlock();
                    containingBuffers.clear();
                }
            }

            /**
             * now for the disposal
             */
            // assignment, check thread safety
            try {
                _chunkLock.writeLock().lock();
                chunkList.addAll(_chunksToDispose);
                _chunksToDispose.clear();
            } finally {
                _chunkLock.writeLock().unlock();
            }

            // fast, destructive iterator where processing order does not matter
            for (IChunk chunk = null; !chunkList.isEmpty() && (chunk = chunkList.removeLast()) != null;)
                try {
                    chunk.getWriteLock().lock();

                    if (chunk.isEncoded())
                        continue;
                    if (chunk.hasBeenDisposed())
                        continue;
                    // requeue
                    if (isDisposalSuspended(chunk))
                        dispose(chunk);
                    else {
                        BufferUtilities.getContainingBuffers(chunk, true, containingBuffers);

                        if (containingBuffers.size() == 0)
                            disposeInternal(chunk);
                    }
                } finally {
                    containingBuffers.clear();
                    chunk.getWriteLock().unlock();
                }

            FastList.recycle(containingBuffers);
            FastList.recycle(chunkList);
        }
    };
}

From source file:hudson.plugins.locksandlatches.LockWrapper.java

@Override
public Environment setUp(AbstractBuild abstractBuild, Launcher launcher, BuildListener buildListener)
        throws IOException, InterruptedException {
    final List<NamedReentrantLock> backups = new ArrayList<NamedReentrantLock>();
    List<LockWaitConfig> locks = new ArrayList<LockWaitConfig>(this.locks);

    // sort this list of locks so that we _always_ ask for the locks in order
    Collections.sort(locks, new Comparator<LockWaitConfig>() {
        public int compare(LockWaitConfig o1, LockWaitConfig o2) {
            return o1.getName().compareTo(o2.getName());
        }/*from w w  w. jav  a  2s .  c o m*/
    });

    // build the list of "real" locks
    final Map<String, Boolean> sharedLocks = new HashMap<String, Boolean>();
    for (LockWaitConfig lock : locks) {
        NamedReentrantLock backupLock;
        do {
            backupLock = DESCRIPTOR.backupLocks.get(lock.getName());
            if (backupLock == null) {
                DESCRIPTOR.backupLocks.putIfAbsent(lock.getName(), new NamedReentrantLock(lock.getName()));
            }
        } while (backupLock == null);
        backups.add(backupLock);
        sharedLocks.put(lock.getName(), lock.isShared());
    }

    final StringBuilder locksToGet = new StringBuilder();
    CollectionUtils.forAllDo(backups, new Closure() {
        public void execute(Object input) {
            locksToGet.append(((NamedReentrantLock) input).getName()).append(", ");
        }
    });

    buildListener.getLogger()
            .println("[locks-and-latches] Locks to get: " + locksToGet.substring(0, locksToGet.length() - 2));

    boolean haveAll = false;
    while (!haveAll) {
        haveAll = true;
        List<NamedReentrantLock> locked = new ArrayList<NamedReentrantLock>();

        DESCRIPTOR.lockingLock.lock();
        try {
            for (NamedReentrantLock lock : backups) {
                boolean shared = sharedLocks.get(lock.getName());
                buildListener.getLogger().print("[locks-and-latches] Trying to get " + lock.getName() + " in "
                        + (shared ? "shared" : "exclusive") + " mode... ");
                Lock actualLock;
                if (shared) {
                    actualLock = lock.readLock();
                } else {
                    actualLock = lock.writeLock();
                }
                if (actualLock.tryLock()) {
                    buildListener.getLogger().println(" Success");
                    locked.add(lock);
                } else {
                    buildListener.getLogger().println(" Failed, releasing all locks");
                    haveAll = false;
                    break;
                }
            }
            if (!haveAll) {
                // release them all
                for (NamedReentrantLock lock : locked) {
                    boolean shared = sharedLocks.get(lock.getName());
                    Lock actualLock;
                    if (shared) {
                        actualLock = lock.readLock();
                    } else {
                        actualLock = lock.writeLock();
                    }
                    actualLock.unlock();
                }
            }
        } finally {
            DESCRIPTOR.lockingLock.unlock();
        }

        if (!haveAll) {
            buildListener.getLogger()
                    .println("[locks-and-latches] Could not get all the locks, sleeping for 1 minute...");
            TimeUnit.SECONDS.sleep(60);
        }
    }

    buildListener.getLogger().println("[locks-and-latches] Have all the locks, build can start");

    return new Environment() {
        @Override
        public boolean tearDown(AbstractBuild abstractBuild, BuildListener buildListener)
                throws IOException, InterruptedException {
            buildListener.getLogger().println("[locks-and-latches] Releasing all the locks");
            for (NamedReentrantLock lock : backups) {
                boolean shared = sharedLocks.get(lock.getName());
                Lock actualLock;
                if (shared) {
                    actualLock = lock.readLock();
                } else {
                    actualLock = lock.writeLock();
                }
                actualLock.unlock();
            }
            buildListener.getLogger().println("[locks-and-latches] All the locks released");
            return super.tearDown(abstractBuild, buildListener);
        }
    };
}

From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java

private long getPos(byte[] hash) throws IOException {
    long pos = -1;
    Lock l = gcLock.readLock();
    l.lock();//  w w w .  j av  a2  s  .c  o m
    try {
        if (!runningGC && !lbf.mightContain(hash)) {
            return pos;
        }
        for (AbstractShard m : this.maps.getAL()) {
            try {
                pos = m.get(hash);
            } catch (MapClosedException e) {
                SDFSLogger.getLog().warn("", e);
            }
            if (pos != -1) {
                if (runningGC)
                    this.lbf.put(hash);
                // m.cache();
                return pos;
            }
        }
        return pos;
    } finally {
        l.unlock();
    }
}

From source file:com.gooddata.http.client.GoodDataHttpClient.java

@Override
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws IOException {
    notNull(request, "Request can't be null");

    final boolean logoutRequest = isLogoutRequest(target, request);
    final Lock lock = logoutRequest ? rwLock.writeLock() : rwLock.readLock();

    lock.lock();/*from ww  w.j av  a 2s .co  m*/

    final HttpResponse resp;
    try {
        if (tt != null) {
            // this adds TT header to EVERY request to ALL hosts made by this HTTP client
            // however the server performs additional checks to ensure client is not using forged TT
            request.setHeader(TT_HEADER, tt);

            if (logoutRequest) {
                try {
                    sstStrategy.logout(httpClient, target, request.getRequestLine().getUri(), sst, tt);
                    tt = null;
                    sst = null;

                    return new BasicHttpResponse(new BasicStatusLine(request.getProtocolVersion(),
                            HttpStatus.SC_NO_CONTENT, "Logout successful"));
                } catch (GoodDataLogoutException e) {
                    return new BasicHttpResponse(new BasicStatusLine(request.getProtocolVersion(),
                            e.getStatusCode(), e.getStatusText()));
                }
            }
        }
        resp = this.httpClient.execute(target, request, context);
    } finally {
        lock.unlock();
    }
    return handleResponse(target, request, resp, context);
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {//from  w w  w. ja v  a 2s .  co  m
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = null;
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
        } finally {
            l.unlock();
        }

        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, csz, lbf);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        AbstractShard _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            this.keyLookup.invalidate(new ByteArrayWrapper(p.key));
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ShardedFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {/*from ww  w .  j  ava 2  s  .  co m*/
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = null;
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
        } finally {
            l.unlock();
        }

        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, bf, lbf, csz);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        AbstractShard _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            this.keyLookup.invalidate(new ByteArrayWrapper(p.key));
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ShardedFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}

From source file:jp.aegif.nemaki.cmis.service.impl.NavigationServiceImpl.java

@Override
public ObjectInFolderList getChildren(CallContext callContext, String repositoryId, String folderId,
        String filter, String orderBy, Boolean includeAllowableActions,
        IncludeRelationships includeRelationships, String renditionFilter, Boolean includePathSegments,
        BigInteger maxItems, BigInteger skipCount, ExtensionsData extension,
        Holder<ObjectData> parentObjectData) {

    exceptionService.invalidArgumentRequiredString("folderId", folderId);

    Lock parentLock = threadLockService.getReadLock(repositoryId, folderId);

    try {/*  w w w . j a  v  a  2s . co m*/
        parentLock.lock();

        // //////////////////
        // General Exception
        // //////////////////
        Folder folder = contentService.getFolder(repositoryId, folderId);
        exceptionService.invalidArgumentFolderId(folder, folderId);
        exceptionService.permissionDenied(callContext, repositoryId, PermissionMapping.CAN_GET_CHILDREN_FOLDER,
                folder);

        // //////////////////
        // Body of the method
        // //////////////////
        // Set ObjectData of parent folder for ObjectInfo
        ObjectData _parent = compileService.compileObjectData(callContext, repositoryId, folder, filter,
                includeAllowableActions, includeRelationships, renditionFilter, false);
        parentObjectData.setValue(_parent);

        return getChildrenInternal(callContext, repositoryId, folderId, filter, orderBy,
                includeAllowableActions, includeRelationships, renditionFilter, includePathSegments, maxItems,
                skipCount, false);
    } finally {
        parentLock.unlock();
    }
}

From source file:com.boylesoftware.web.impl.AbstractRouterConfiguration.java

@Override
public RouterRequest findRoute(final HttpServletRequest request, final HttpServletResponse response)
        throws MethodNotAllowedException, ServletException {

    // check if we have mappings
    if (this.mappings.length == 0)
        return null;

    // try to find the matching route mapping
    final Lock readLock = this.mappingsLock.readLock();
    readLock.lock();/*from   w  w  w  .  j ava 2s.co  m*/
    try {

        // test request URI against the mappings
        RouteImpl mapping = this.mappings[0];
        final String requestURI = request.getRequestURI();
        // TODO: reusable matcher?
        final Matcher m = mapping.getURIPattern().matcher(requestURI);
        int mappingInd = 0;
        do {

            // try to match the mapping
            if (m.matches()) {

                // log the match
                if (this.log.isDebugEnabled())
                    this.log.debug("found mapping for URI " + requestURI + " on attempt " + (mappingInd + 1));

                // move the mapping higher if matched more frequently
                final long numMatched = mapping.incrementNumMatched();
                if (mappingInd > 0) {
                    final RouteImpl prevMapping = this.mappings[mappingInd - 1];
                    if (numMatched > prevMapping.getNumMatched()) {
                        final Lock writeLock = this.mappingsLock.writeLock();
                        readLock.unlock();
                        writeLock.lock();
                        try {
                            this.mappings[mappingInd] = prevMapping;
                            this.mappings[mappingInd - 1] = mapping;
                        } finally {
                            readLock.lock();
                            writeLock.unlock();
                        }
                    }
                }

                // wrap the request
                final RouterRequestImpl routerRequest = this.routerRequestPool.getSync();
                boolean success = false;
                try {

                    // initialize the router request
                    routerRequest.wrap(request, response, mapping, this.isAuthenticationRequired(requestURI));

                    // add parameters made from the URI components
                    final int numURIParams = m.groupCount();
                    for (int i = 0; i < numURIParams; i++) {
                        final String uriParamName = mapping.getURIParamName(i);
                        if (uriParamName != null)
                            routerRequest.addParameter(uriParamName, m.group(i + 1));
                    }

                    // convert flash attributes cookie to request attributes
                    routerRequest.flashCookieToAttributes();

                    // return the router request
                    success = true;
                    return routerRequest;

                } finally {
                    if (!success)
                        routerRequest.recycle();
                }
            }

            // next mapping for next iteration
            if (++mappingInd >= this.mappings.length)
                break;
            mapping = this.mappings[mappingInd];

            // reuse the matcher
            m.reset();
            m.usePattern(mapping.getURIPattern());

        } while (true);

    } finally {
        readLock.unlock();
    }

    // no mapping matched
    return null;
}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

@Override
public void addGranules(final String typeName, final Collection<SimpleFeature> granules,
        final Transaction transaction) throws IOException {
    Utilities.ensureNonNull("granuleMetadata", granules);
    final Lock lock = rwLock.writeLock();
    try {/*from ww  w . ja  v  a2  s.  c om*/
        lock.lock();
        // check if the index has been cleared
        checkStore();

        SimpleFeatureStore store = (SimpleFeatureStore) tileIndexStore.getFeatureSource(typeName);
        store.setTransaction(transaction);

        ListFeatureCollection featureCollection = new ListFeatureCollection(tileIndexStore.getSchema(typeName));

        // add them all
        Set<FeatureId> fids = new HashSet<FeatureId>();
        for (SimpleFeature f : granules) {
            // Add the feature to the feature collection
            featureCollection.add(f);
            fids.add(ff.featureId(f.getID()));
        }
        store.addFeatures(featureCollection);

        // update bounds
        if (bounds.containsKey(typeName)) {
            bounds.remove(typeName);
        }

    } finally {
        lock.unlock();

    }
}