Example usage for java.util.concurrent.locks Lock lock

List of usage examples for java.util.concurrent.locks Lock lock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock lock.

Prototype

lock

Source Link

Usage

From source file:org.soaplab.services.storage.FileStorage.java

/**************************************************************************
 * Saves last event//from   w  ww. ja  va2 s . co  m
 **************************************************************************/
protected void setLastEvent(File jobDir, AnalysisEvent newEvent) {
    File lastEventFile = new File(jobDir, FILE_LAST_EVENT);
    Lock writelock = getLock(jobDir.getName(), false);
    writelock.lock();
    try {
        FileUtils.writeStringToFile(lastEventFile, newEvent.toString(), System.getProperty("file.encoding"));
    } catch (IOException e) {
        log.error("Cannot write to " + lastEventFile.getAbsolutePath() + ": " + e.toString());
    } finally {
        writelock.unlock();
    }
}

From source file:org.pentaho.platform.plugin.action.olap.impl.OlapServiceImpl.java

public void flushAll(IPentahoSession session) {
    final Lock writeLock = cacheLock.writeLock();
    try {//from w ww  .ja v a  2s  .c  om
        writeLock.lock();

        // Start by flushing the local cache.
        resetCache(session);

        flushHostedAndRemote(session);
    } catch (Exception e) {
        throw new IOlapServiceException(e);
    } finally {
        writeLock.unlock();
    }
}

From source file:org.soaplab.services.storage.FileStorage.java

/**************************************************************************
 *
 **************************************************************************/
protected void saveJobProperties(File jobDir, Properties jobProps) {
    File propsFile = new File(jobDir, FILE_JOB_PROPS);
    Lock writelock = getLock(jobDir.getName(), false);
    writelock.lock();
    jobDir.mkdirs();//from  www  .  j ava 2s.c  om
    try {
        FileOutputStream fos = new FileOutputStream(propsFile);
        jobProps.store(fos, null);
        fos.close();
    } catch (IOException e) {
        log.error("Cannot store job properties into " + propsFile.getAbsolutePath() + ": " + e.toString());
    } finally {
        writelock.unlock();
    }

}

From source file:org.soaplab.services.storage.FileStorage.java

/**************************************************************************
 *
 **************************************************************************/
protected Properties loadJobProperties(File jobDir) {
    File propsFile = new File(jobDir, FILE_JOB_PROPS);
    Properties jobProps = new Properties();
    Lock readlock = getLock(jobDir.getName(), true);
    readlock.lock();
    try {//www . jav  a 2  s .  com
        FileInputStream fis = new FileInputStream(propsFile);
        jobProps.load(fis);
        fis.close();
    } catch (IOException e) {
        // ignore: the file may not even exist yet
    } catch (IllegalArgumentException e) {
        log.error("Malformed Unicode escape appears in file " + propsFile.getAbsolutePath()
                + ". File contents ignored.");
    } finally {
        readlock.unlock();
    }
    return jobProps;
}

From source file:org.soaplab.services.storage.FileStorage.java

/**************************************************************************
 * Returns currently saved last event (from the given 'jobDir').
 * May return null./*  w ww  .  j  a v a  2 s. c om*/
 **************************************************************************/
protected AnalysisEvent getLastEvent(File jobDir) {
    if (deserializer != null) {
        File lastEventFile = new File(jobDir, FILE_LAST_EVENT);
        Lock readlock = getLock(jobDir.getName(), true);
        readlock.lock();
        try {
            if (lastEventFile.exists())
                return deserializer.deserialize(this, lastEventFile);
        } catch (Exception e) {
            log.error("Error by reading '" + lastEventFile + "': " + e.toString());
        } finally {
            readlock.unlock();
        }
    }
    return null;
}

From source file:org.pentaho.platform.plugin.action.olap.impl.OlapServiceImpl.java

public List<IOlapService.Catalog> getCatalogs(IPentahoSession session) throws IOlapServiceException {

    // Make sure the cache is initialized.
    initCache(session);/*from w w w  . j a  va  2 s . com*/
    final List<Catalog> cache = getCache(session);

    final Lock readLock = cacheLock.readLock();
    try {
        readLock.lock();

        final List<IOlapService.Catalog> catalogs = new ArrayList<IOlapService.Catalog>();
        for (Catalog catalog : cache) {
            if (hasAccess(catalog.name, EnumSet.of(RepositoryFilePermission.READ), session)) {
                catalogs.add(catalog);
            }
        }

        // Do not leak the cache list.
        // Do not allow modifications on the list.
        return Collections.unmodifiableList(new ArrayList<IOlapService.Catalog>(cache));

    } finally {
        readLock.unlock();
    }
}

From source file:com.threecrickets.prudence.cache.SqlCache.java

public void store(String key, CacheEntry entry) {
    logger.fine("Store: " + key);

    Lock lock = lockSource.getWriteLock(key);
    lock.lock();
    try {// www.  j ava 2  s.co m
        Connection connection = connect();
        if (connection == null)
            return;

        try {
            boolean tryInsert = true;

            // Try updating this key

            String sql = "UPDATE " + cacheTableName
                    + " SET data=?, media_type=?, language=?, character_set=?, encoding=?, modification_date=?, tag=?, headers=?, expiration_date=?, document_modification_date=? WHERE key=?";
            PreparedStatement statement = connection.prepareStatement(sql);
            try {
                statement.setBytes(1,
                        entry.getString() != null ? entry.getString().getBytes() : entry.getBytes());
                statement.setString(2, entry.getMediaType() != null ? entry.getMediaType().getName() : null);
                statement.setString(3, entry.getLanguage() != null ? entry.getLanguage().getName() : null);
                statement.setString(4,
                        entry.getCharacterSet() != null ? entry.getCharacterSet().getName() : null);
                statement.setString(5, entry.getEncoding() != null ? entry.getEncoding().getName() : null);
                statement.setTimestamp(6,
                        entry.getModificationDate() != null
                                ? new Timestamp(entry.getModificationDate().getTime())
                                : null);
                statement.setString(7, entry.getTag() != null ? entry.getTag().format() : null);
                statement.setString(8, entry.getHeaders() == null ? "" : serializeHeaders(entry.getHeaders()));
                statement.setTimestamp(9,
                        entry.getExpirationDate() != null ? new Timestamp(entry.getExpirationDate().getTime())
                                : null);
                statement.setTimestamp(10,
                        entry.getDocumentModificationDate() != null
                                ? new Timestamp(entry.getDocumentModificationDate().getTime())
                                : null);
                statement.setString(11, key);
                if (!statement.execute() && statement.getUpdateCount() > 0) {
                    logger.fine("Updated " + key);

                    // Update worked, so no need to try insertion

                    tryInsert = false;
                }
            } finally {
                statement.close();
            }

            if (tryInsert) {
                // Try inserting this key

                // But first make sure we have room...

                int size = countEntries(connection);
                if (size >= maxSize) {
                    prune();

                    size = countEntries(connection);
                    if (size >= maxSize) {
                        logger.fine("No room in cache (" + size + ", " + maxSize + ")");
                        return;
                    }
                }

                // delete( connection, key );

                sql = "INSERT INTO " + cacheTableName
                        + " (key, data, media_type, language, character_set, encoding, modification_date, tag, headers, expiration_date, document_modification_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
                statement = connection.prepareStatement(sql);
                try {
                    statement.setString(1, key);
                    statement.setBytes(2,
                            entry.getString() != null ? entry.getString().getBytes() : entry.getBytes());
                    statement.setString(3, getName(entry.getMediaType()));
                    statement.setString(4, getName(entry.getLanguage()));
                    statement.setString(5, getName(entry.getCharacterSet()));
                    statement.setString(6, getName(entry.getEncoding()));
                    statement.setTimestamp(7,
                            entry.getModificationDate() != null
                                    ? new Timestamp(entry.getModificationDate().getTime())
                                    : null);
                    statement.setString(8, entry.getTag() != null ? entry.getTag().format() : null);
                    statement.setString(9,
                            entry.getHeaders() == null ? "" : serializeHeaders(entry.getHeaders()));
                    statement.setTimestamp(10,
                            entry.getExpirationDate() != null
                                    ? new Timestamp(entry.getExpirationDate().getTime())
                                    : null);
                    statement.setTimestamp(11,
                            entry.getDocumentModificationDate() != null
                                    ? new Timestamp(entry.getDocumentModificationDate().getTime())
                                    : null);
                    statement.execute();
                } finally {
                    statement.close();
                }
            }

            // Clean out existing tags for this key

            sql = "DELETE FROM " + cacheTagsTableName + " WHERE key=?";
            statement = connection.prepareStatement(sql);
            try {
                statement.setString(1, key);
                statement.execute();
            } finally {
                statement.close();
            }

            // Add tags for this key

            String[] tags = entry.getTags();
            if ((tags != null) && (tags.length > 0)) {
                sql = "INSERT INTO " + cacheTagsTableName + " (key, tag) VALUES (?, ?)";
                statement = connection.prepareStatement(sql);
                statement.setString(1, key);
                try {
                    for (String tag : tags) {
                        statement.setString(2, tag);
                        statement.execute();
                    }
                } finally {
                    statement.close();
                }
            }
        } finally {
            connection.close();
        }
    } catch (SQLException x) {
        logger.log(Level.WARNING, "Could not store cache entry", x);
    } finally {
        lock.unlock();
    }
}

From source file:org.pentaho.platform.plugin.action.olap.impl.OlapServiceImpl.java

/**
 * Returns a list of catalogs for the current session.
 *
 * <p>The cache is stored in the platform's caches in the region
 * {@link #CATALOG_CACHE_REGION}. It is also segmented by
 * locale, but we only return the correct sub-region according to the
 * session passed as a parameter./*from w  w w.j  a v a2  s. c  om*/
 */
@SuppressWarnings("unchecked")
protected synchronized List<IOlapService.Catalog> getCache(IPentahoSession session) {
    // Create the cache region if necessary.
    final ICacheManager cacheMgr = PentahoSystem.getCacheManager(session);
    final Object cacheKey = makeCacheSubRegionKey(getLocale());

    final Lock writeLock = cacheLock.writeLock();
    try {

        writeLock.lock();

        if (!cacheMgr.cacheEnabled(CATALOG_CACHE_REGION)) {
            // Create the region. This requires write access.
            cacheMgr.addCacheRegion(CATALOG_CACHE_REGION);
        }

        if (cacheMgr.getFromRegionCache(CATALOG_CACHE_REGION, cacheKey) == null) {
            // Create the sub-region. This requires write access.
            cacheMgr.putInRegionCache(CATALOG_CACHE_REGION, cacheKey, new ArrayList<IOlapService.Catalog>());
        }

        return (List<IOlapService.Catalog>) cacheMgr.getFromRegionCache(CATALOG_CACHE_REGION, cacheKey);

    } finally {
        writeLock.unlock();
    }
}

From source file:org.elasticsoftware.elasticactors.cluster.LocalActorSystemInstance.java

/**
 * Distribute the shards over the list of physical nodes
 *
 * @param nodes/*w  w w.  j a va  2 s.  c om*/
 */
public void distributeShards(List<PhysicalNode> nodes, ShardDistributionStrategy strategy) throws Exception {
    final boolean initializing = initialized.compareAndSet(false, true);
    // see if this was the first time, if so we need to initialize the ActorSystem
    if (initializing) {
        logger.info(format("Initializing ActorSystem [%s]", getName()));
    }

    NodeSelector nodeSelector = nodeSelectorFactory.create(nodes);
    // fetch all writelocks
    final Lock[] writeLocks = new Lock[shardLocks.length];
    for (int j = 0; j < shardLocks.length; j++) {
        writeLocks[j] = shardLocks[j].writeLock();
    }
    // store the id's of the new local shard in order to generate the events later
    final List<Integer> newLocalShards = new ArrayList<>(shards.length);
    // this is for reporting the number of shards per node
    final List<String> nodeCount = new ArrayList<>(shards.length);

    // assume we are stable until the resharding process tells us otherwise
    boolean stable = true;

    try {
        for (Lock writeLock : writeLocks) {
            writeLock.lock();
        }

        for (int i = 0; i < configuration.getNumberOfShards(); i++) {
            ShardKey shardKey = new ShardKey(configuration.getName(), i);
            PhysicalNode node = nodeSelector.getPrimary(shardKey.toString());
            nodeCount.add(node.getId());
            if (node.isLocal()) {
                // this instance should start owning the shard now
                final ActorShard currentShard = shards[i];
                if (currentShard == null || !currentShard.getOwningNode().isLocal()) {
                    String owningNodeId = currentShard != null ? currentShard.getOwningNode().getId()
                            : "<No Node>";
                    logger.info(format("I will own %s", shardKey.toString()));
                    // destroy the current remote shard instance
                    if (currentShard != null) {
                        currentShard.destroy();
                    }
                    // create a new local shard and swap it
                    LocalActorShard newShard = new LocalActorShard(node, this, i, shardAdapters[i].myRef,
                            localMessageQueueFactory, shardActorCacheManager);

                    shards[i] = newShard;
                    try {
                        // register with the strategy to wait for shard to be released
                        strategy.registerWaitForRelease(newShard, node);
                    } catch (Exception e) {
                        logger.error(format(
                                "IMPORTANT: waiting on release of shard %s from node %s failed,  ElasticActors cluster is unstable. Please check all nodes",
                                shardKey, owningNodeId), e);
                        stable = false;
                    } finally {
                        // add it to the new local shards
                        newLocalShards.add(i);
                        // initialize
                        // newShard.init();
                        // start owning the scheduler shard (this will start sending messages, but everything is blocked so it should be no problem)
                        scheduler.registerShard(newShard.getKey());
                    }
                } else {
                    // we own the shard already, no change needed
                    logger.info(format("I already own %s", shardKey.toString()));
                }
            } else {
                // the shard will be managed by another node
                final ActorShard currentShard = shards[i];
                if (currentShard == null || currentShard.getOwningNode().isLocal()) {
                    logger.info(format("%s will own %s", node, shardKey));
                    try {
                        // destroy the current local shard instance
                        if (currentShard != null) {
                            // stop owning the scheduler shard
                            scheduler.unregisterShard(currentShard.getKey());
                            currentShard.destroy();
                            strategy.signalRelease(currentShard, node);
                        }
                    } catch (Exception e) {
                        logger.error(format(
                                "IMPORTANT: signalling release of shard %s to node %s failed, ElasticActors cluster is unstable. Please check all nodes",
                                shardKey, node), e);
                        stable = false;
                    } finally {
                        // create a new remote shard and swap it
                        RemoteActorShard newShard = new RemoteActorShard(node, this, i, shardAdapters[i].myRef,
                                remoteMessageQueueFactory);
                        shards[i] = newShard;
                        // initialize
                        newShard.init();
                    }
                } else {
                    // shard was already remote
                    logger.info(format("%s will own %s", node, shardKey));
                }
            }
        }
        // now we have released all local shards, wait for the new local shards to become available
        if (!strategy.waitForReleasedShards(10, TimeUnit.SECONDS)) {
            // timeout while waiting for the shards
            stable = false;
        }
    } finally {
        // unlock all
        for (Lock writeLock : writeLocks) {
            writeLock.unlock();
        }

        this.stable.set(stable);
    }
    // This needs to happen after we initialize the shards as services expect the system to be initialized and
    // should be allowed to send messages to shards
    if (initializing) {
        // initialize the services
        Set<String> serviceActors = configuration.getServices();
        if (serviceActors != null && !serviceActors.isEmpty()) {
            // initialize the service actors in the context
            for (String elasticActorEntry : serviceActors) {
                localNodeAdapter.sendMessage(null, localNodeAdapter.myRef,
                        new ActivateActorMessage(getName(), elasticActorEntry, ActorType.SERVICE));
            }
        }
    }
    // print out the shard distribution here
    Map<String, Long> collect = nodeCount.stream().collect(groupingBy(Function.identity(), counting()));
    SortedMap<String, Long> sortedNodes = new TreeMap<>(collect);
    logger.info("Cluster shard mapping summary:");
    for (Map.Entry<String, Long> entry : sortedNodes.entrySet()) {
        logger.info(format("\t%s has %d shards assigned", entry.getKey(), entry.getValue()));
    }
    // now we need to generate the events for the new local shards (if any)
    logger.info(format("Generating ACTOR_SHARD_INITIALIZED events for %d new shards", newLocalShards.size()));
    for (Integer newLocalShard : newLocalShards) {
        this.actorSystemEventListenerService.generateEvents(shardAdapters[newLocalShard],
                ACTOR_SHARD_INITIALIZED);
    }
}

From source file:com.lonepulse.zombielink.processor.AsyncEndpointTest.java

/**
 * <p>Tests an erroneous asynchronous request where the implementation of the 
 * {@link AsyncHandler#onError(Exception)} callback throws an exception.</p>
 *  /*from   w  w  w  .  jav  a  2s .  c  o m*/
 * @since 1.3.0
 */
@Test
public final void testAsyncErrorCallbackError() throws InterruptedException {

    String subpath = "/errorcallbackerror", body = "non-JSON-content";

    stubFor(get(urlEqualTo(subpath)).willReturn(aResponse().withStatus(200).withBody(body)));

    final Lock lock = new ReentrantLock();
    final Condition condition = lock.newCondition();

    asyncEndpoint.asyncErrorCallbackError(new AsyncHandler<User>() {

        @Override
        public void onSuccess(HttpResponse httpResponse, User user) {
        }

        @Override
        public void onError(InvocationException error) {

            try {

                throw new IllegalStateException();
            } finally {

                lock.lock();
                condition.signal();
                lock.unlock();
            }
        }
    });

    lock.lock();
    condition.await();
    lock.unlock();

    verify(getRequestedFor(urlEqualTo(subpath)));

    successScenario(); //verify that the asynchronous request executor has survived the exception
}