Example usage for java.util.concurrent.locks Lock unlock

List of usage examples for java.util.concurrent.locks Lock unlock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock unlock.

Prototype

void unlock();

Source Link

Document

Releases the lock.

Usage

From source file:org.soaplab.services.storage.FileStorage.java

/**************************************************************************
 *
 **************************************************************************/
protected Properties loadJobProperties(File jobDir) {
    File propsFile = new File(jobDir, FILE_JOB_PROPS);
    Properties jobProps = new Properties();
    Lock readlock = getLock(jobDir.getName(), true);
    readlock.lock();/*from  w  w w  . j  a v  a 2 s. co  m*/
    try {
        FileInputStream fis = new FileInputStream(propsFile);
        jobProps.load(fis);
        fis.close();
    } catch (IOException e) {
        // ignore: the file may not even exist yet
    } catch (IllegalArgumentException e) {
        log.error("Malformed Unicode escape appears in file " + propsFile.getAbsolutePath()
                + ". File contents ignored.");
    } finally {
        readlock.unlock();
    }
    return jobProps;
}

From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {//from  w ww  . j a  v  a 2s  . c  o  m
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
        } finally {
            l.unlock();
        }
        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, bf, lbf, csz);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

public void dispose() {
    final Lock l = rwLock.writeLock();
    try {/*from ww w .ja v  a 2 s .  c  o  m*/
        l.lock();
        try {
            if (tileIndexStore != null) {
                tileIndexStore.dispose();
            }
            if (multiScaleROIProvider != null) {
                multiScaleROIProvider.dispose();
            }
        } catch (Throwable e) {
            if (LOGGER.isLoggable(Level.FINE)) {
                LOGGER.log(Level.FINE, e.getLocalizedMessage(), e);
            }
        } finally {
            tileIndexStore = null;
            multiScaleROIProvider = null;
        }

    } finally {
        l.unlock();
    }
}

From source file:org.eclipse.hawkbit.repository.jpa.JpaRolloutManagement.java

@Override
// No transaction, will be created per handled rollout
@Transactional(propagation = Propagation.NEVER)
public void handleRollouts() {
    final List<Long> rollouts = rolloutRepository.findByStatusIn(ACTIVE_ROLLOUTS);

    if (rollouts.isEmpty()) {
        return;/*from   www. j  a v a  2s  .com*/
    }

    final String tenant = tenantAware.getCurrentTenant();

    final String handlerId = tenant + "-rollout";
    final Lock lock = lockRegistry.obtain(handlerId);
    if (!lock.tryLock()) {
        return;
    }

    try {
        rollouts.forEach(rolloutId -> runInNewTransaction(handlerId + "-" + rolloutId,
                status -> executeFittingHandler(rolloutId)));
    } finally {
        lock.unlock();
    }
}

From source file:com.thoughtworks.studios.journey.JourneyService.java

/**
 * API for destroying all data under a namespace
 *
 * @param ns namespace name/*from  w  w  w . j ava  2 s . c  o  m*/
 * @return 200 response
 */
@POST
@Produces(MediaType.TEXT_PLAIN)
@Path("/{ns}/destroy")
public Response destroy(@PathParam("ns") String ns) {
    Lock writingLock = getWritingLock(ns);
    writingLock.lock();
    try {
        Application app = new Application(graphDB, ns);
        try (Transaction tx = graphDB.beginTx()) {
            app.tearDownSchema();
            tx.success();
        }

        try (Transaction tx = graphDB.beginTx()) {
            app.journeys().tearDownLegacyIndex();
            tx.success();
        }

        app.destroyData();
    } finally {
        writingLock.unlock();
    }

    return Response.status(Response.Status.OK).build();
}

From source file:jp.aegif.nemaki.cmis.service.impl.AclServiceImpl.java

@Override
public Acl getAcl(CallContext callContext, String repositoryId, String objectId, Boolean onlyBasicPermissions) {

    exceptionService.invalidArgumentRequired("objectId", objectId);

    Lock lock = threadLockService.getReadLock(repositoryId, objectId);

    try {/*  w ww.  j  av a2 s . c  o m*/
        lock.lock();

        // //////////////////
        // General Exception
        // //////////////////

        Content content = contentService.getContent(repositoryId, objectId);
        exceptionService.objectNotFound(DomainType.OBJECT, content, objectId);
        exceptionService.permissionDenied(callContext, repositoryId, PermissionMapping.CAN_GET_ACL_OBJECT,
                content);

        // //////////////////
        // Body of the method
        // //////////////////
        jp.aegif.nemaki.model.Acl acl = contentService.calculateAcl(repositoryId, content);
        //return compileService.compileAcl(acl, content.isAclInherited(), onlyBasicPermissions);
        return compileService.compileAcl(acl, contentService.getAclInheritedWithDefault(repositoryId, content),
                onlyBasicPermissions);
    } finally {
        lock.unlock();
    }
}

From source file:org.marketcetera.marketdata.core.manager.impl.MarketDataManagerImpl.java

@Override
public long requestMarketData(MarketDataRequest inRequest, ISubscriber inSubscriber) {
    SLF4JLoggerProxy.debug(this, "Received: {}", inRequest);
    // route the request to available providers or to a particular provider
    Collection<MarketDataProvider> successfulProviders = new ArrayList<MarketDataProvider>();
    MarketDataRequestToken token = new Token(inSubscriber, inRequest);
    if (inRequest.getProvider() != null) {
        // a specific provider was requested - use that provider only
        MarketDataProvider provider = getMarketDataProviderForName(inRequest.getProvider());
        if (provider == null) {
            throw new MarketDataProviderNotAvailable();
        }//w  w  w  .j a v a 2 s. c  om
        SLF4JLoggerProxy.debug(this, "Submitting {} to {}", token, provider);
        try {
            provider.requestMarketData(token);
        } catch (RuntimeException e) {
            throw new MarketDataException(e);
        }
        successfulProviders.add(provider);
    } else {
        boolean liveRequestSubmitted = false;
        for (MarketDataProvider provider : getActiveMarketDataProviders()) {
            if (liveRequestSubmitted && provider.getFeedType() != FeedType.LIVE) {
                SLF4JLoggerProxy.debug(this,
                        "Request has been submitted to all live feeds, no more requests will be issued");
                break;
            }
            try {
                SLF4JLoggerProxy.debug(this, "Submitting {} to {} [{}]", token, provider,
                        provider.getFeedType());
                provider.requestMarketData(token);
                successfulProviders.add(provider);
                if (provider.getFeedType() == FeedType.LIVE) {
                    liveRequestSubmitted = true;
                }
            } catch (RuntimeException e) {
                Messages.UNABLE_TO_REQUEST_MARKETDATA.warn(this, e, inRequest, provider.getProviderName());
                // continue to try from the next provider
            }
        }
        if (successfulProviders.isEmpty()) {
            throw new NoMarketDataProvidersAvailable();
        }
    }
    Lock requestLock = requestLockObject.writeLock();
    try {
        requestLock.lockInterruptibly();
        for (MarketDataProvider provider : successfulProviders) {
            providersByToken.put(token, provider);
        }
        tokensByTokenId.put(token.getId(), token);
    } catch (InterruptedException e) {
        Messages.MARKETDATA_REQUEST_INTERRUPTED.warn(this, inRequest);
        throw new MarketDataRequestTimedOut(e);
    } finally {
        requestLock.unlock();
    }
    return token.getId();
}

From source file:com.thoughtworks.studios.journey.JourneyService.java

/**
 * API for adding multiple events./* w ww  .  j  av a 2 s.  c  om*/
 * Post events via request body in json format. e.g.
 *   [{
 *      "action_label":"do x",
 *      "start_at":1451956588844,
 *      "digest":"907e7d49",
 *      "anonymous_id":"9f0d0311",
 *      "session_id":"9f0d0311",
 *      "user":"johndoe@example.com",
 *      "properties":{
 *          "prop-a":"foo",
 *          "prop-b":"bar"
 *      }
 *    }]
 * @param ns: namespace under operation
 * @param eventsJSON: request body, json format, array event
 * @return 201 response
 * @throws IOException
 */
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@Path("/{ns}/add_events")
public Response addEvents(@PathParam("ns") String ns, String eventsJSON) throws IOException {

    Lock writingLock = getWritingLock(ns);
    writingLock.lock();
    try {
        Application app = new Application(graphDB, ns);

        List<Map> eventsAttrs = jsonToListMap(eventsJSON);
        try (Transaction tx = graphDB.beginTx()) {
            for (Map eventAttrs : eventsAttrs) {
                //noinspection unchecked
                app.events().add(eventAttrs);
            }
            tx.success();
        }
    } finally {
        writingLock.unlock();
    }
    return Response.status(Response.Status.CREATED).build();
}

From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java

/**
 * Verifies if the task with the given id is in the queue
 * @param newTask the searched task/*from  w w  w.j av a  2 s.  co  m*/
 * @return true if the task is in the queue
 */
public boolean isTaskWrapperInQueue(TaskWrapper newTask) {

    if (newTask == null) {
        throw new IllegalArgumentException("Task must be not null");
    }

    //
    // Locking the lock for this task so no other thread can handle it avoiding
    // conflict (what happens if a thread is removing it an another threead is
    // updating it ?)
    //
    Lock handlingTaskLock = getHandlingTaskLock(newTask);
    handlingTaskLock.lock();

    try {
        synchronized (taskFutures) {
            Object o = taskFutures.get(newTask);
            if (o != null) {
                return true;
            }
        }
    } finally {
        handlingTaskLock.unlock();
    }

    return false;
}

From source file:com.esofthead.mycollab.module.ecm.esb.impl.DeleteResourcesCommandImpl.java

@Override
public void removeResource(String[] paths, String userDelete, Integer sAccountId) {

    Lock lock = DistributionLockUtil.getLock("ecm-" + sAccountId);
    if (sAccountId == null) {
        return;//from   ww  w . ja  v a2 s.co  m
    }
    try {
        if (lock.tryLock(1, TimeUnit.HOURS)) {
            long totalSize = 0;
            DriveInfo driveInfo = driveInfoService.getDriveInfo(sAccountId);

            for (String path : paths) {
                if (StringUtils.isBlank(path)) {
                    continue;
                }
                totalSize += rawContentService.getSize(path);
                rawContentService.removePath(path);
            }

            if (driveInfo.getUsedvolume() == null || (driveInfo.getUsedvolume() < totalSize)) {
                LOG.error(
                        "Inconsistent storage volumne site of account {}, used storage is less than removed storage ",
                        sAccountId);
                driveInfo.setUsedvolume(0L);
            } else {
                driveInfo.setUsedvolume(driveInfo.getUsedvolume() - totalSize);
            }

            driveInfoService.saveOrUpdateDriveInfo(driveInfo);
        }
    } catch (Exception e) {
        LOG.error("Error while delete content " + paths, e);
    } finally {
        lock.unlock();
    }
}