Example usage for org.springframework.dao ConcurrencyFailureException ConcurrencyFailureException

List of usage examples for org.springframework.dao ConcurrencyFailureException ConcurrencyFailureException

Introduction

In this page you can find the example usage for org.springframework.dao ConcurrencyFailureException ConcurrencyFailureException.

Prototype

public ConcurrencyFailureException(String msg, @Nullable Throwable cause) 

Source Link

Document

Constructor for ConcurrencyFailureException.

Usage

From source file:com.alibaba.cobar.client.support.execution.DefaultConcurrentRequestProcessor.java

public List<Object> process(List<ConcurrentRequest> requests) {
    List<Object> resultList = new ArrayList<Object>();

    if (CollectionUtils.isEmpty(requests))
        return resultList;

    List<RequestDepository> requestsDepo = fetchConnectionsAndDepositForLaterUse(requests);
    final CountDownLatch latch = new CountDownLatch(requestsDepo.size());
    List<Future<Object>> futures = new ArrayList<Future<Object>>();
    try {//  w  w w .j  a  v a2  s  . c o  m

        for (RequestDepository rdepo : requestsDepo) {
            ConcurrentRequest request = rdepo.getOriginalRequest();
            final SqlMapClientCallback action = request.getAction();
            final Connection connection = rdepo.getConnectionToUse();

            futures.add(request.getExecutor().submit(new Callable<Object>() {
                public Object call() throws Exception {
                    try {
                        return executeWith(connection, action);
                    } finally {
                        latch.countDown();
                    }
                }
            }));
        }

        try {
            latch.await();
        } catch (InterruptedException e) {
            throw new ConcurrencyFailureException(
                    "interrupted when processing data access request in concurrency", e);
        }

    } finally {
        for (RequestDepository depo : requestsDepo) {
            Connection springCon = depo.getConnectionToUse();
            DataSource dataSource = depo.getOriginalRequest().getDataSource();
            try {
                if (springCon != null) {
                    if (depo.isTransactionAware()) {
                        springCon.close();
                    } else {
                        DataSourceUtils.doReleaseConnection(springCon, dataSource);
                    }
                }
            } catch (Throwable ex) {
                logger.info("Could not close JDBC Connection", ex);
            }
        }
    }

    fillResultListWithFutureResults(futures, resultList);

    return resultList;
}

From source file:com.alibaba.cobar.client.support.execution.DefaultConcurrentRequestProcessor.java

private void fillResultListWithFutureResults(List<Future<Object>> futures, List<Object> resultList) {
    for (Future<Object> future : futures) {
        try {/*from ww  w.  j  ava  2s  .  c  o  m*/
            resultList.add(future.get());
        } catch (InterruptedException e) {
            throw new ConcurrencyFailureException(
                    "interrupted when processing data access request in concurrency", e);
        } catch (ExecutionException e) {
            throw new ConcurrencyFailureException("something goes wrong in processing", e);
        }
    }
}

From source file:com.raycloud.cobarclient.mybatis.spring.MySqlSessionTemplate.java

/**
 * TODO ?Multiple Thread Transaction//from   w ww . j  a  va  2 s . c  o  m
 *
 * @param statement
 * @param collection
 * @param <T>
 * @return
 */
private final <T extends Object> int batchAsync(final String statement, Collection<T> collection) {
    Map<Shard, List<T>> classifiedEntities = classify(statement, collection);
    final CountDownLatch latch = new CountDownLatch(classifiedEntities.size());
    List<Future<Integer>> futures = new ArrayList<Future<Integer>>();
    final MultipleCauseException throwables = new MultipleCauseException();
    ExecutorService _executor = MtContextExecutors.getMtcExecutorService(executor);
    SqlSessionHolder holder = SqlSessionUtils
            .currentSqlSessionHolder(MySqlSessionTemplate.this.sqlSessionFactory);
    for (final Map.Entry<Shard, List<T>> entry : classifiedEntities.entrySet()) {
        futures.add(_executor.submit(new BatchAsyncCallable(entry, statement, latch, throwables, holder)));
    }
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new ConcurrencyFailureException("interrupted when processing data access request in concurrency",
                e);
    }
    if (!throwables.getCauses().isEmpty()) {
        throw new TransientDataAccessResourceException(
                "one or more errors when performing data access operations" + " against multiple shards",
                throwables);
    }
    return counter(getFutureResults(futures));
}

From source file:com.raycloud.cobarclient.mybatis.spring.MySqlSessionTemplate.java

private <T> List<T> getFutureResults(List<Future<T>> futures) {
    List<T> result = new ArrayList<T>();
    for (Future<T> future : futures) {
        try {/*  ww w  .  j a va2 s . co m*/
            result.add(future.get());
        } catch (InterruptedException e) {
            throw new ConcurrencyFailureException(
                    "interrupted when processing data access request in concurrency", e);
        } catch (ExecutionException e) {
            throw new ConcurrencyFailureException("something goes wrong in processing", e);
        }
    }
    return result;
}

From source file:com.alibaba.cobar.client.CobarSqlMapClientTemplate.java

/**
 * we reorder the collection of entities in concurrency and commit them in
 * sequence, because we have to conform to the infrastructure of spring's
 * transaction management layer./*from  w ww . j a v  a 2  s  . c  om*/
 * 
 * @param statementName
 * @param parameterObject
 * @return
 */
private Object batchInsertAfterReordering(final String statementName, final Object parameterObject) {
    Set<String> keys = new HashSet<String>();
    keys.add(getDefaultDataSourceName());
    keys.addAll(getCobarDataSourceService().getDataSources().keySet());

    final CobarMRBase mrbase = new CobarMRBase(keys);

    ExecutorService executor = createCustomExecutorService(Runtime.getRuntime().availableProcessors(),
            "batchInsertAfterReordering");
    try {
        final StringBuffer exceptionStaktrace = new StringBuffer();

        Collection<?> paramCollection = ((BatchInsertTask) parameterObject).getEntities();

        final CountDownLatch latch = new CountDownLatch(paramCollection.size());

        Iterator<?> iter = paramCollection.iterator();
        while (iter.hasNext()) {
            final Object entity = iter.next();
            Runnable task = new Runnable() {
                public void run() {
                    try {
                        SortedMap<String, DataSource> dsMap = lookupDataSourcesByRouter(statementName, entity);
                        if (MapUtils.isEmpty(dsMap)) {
                            logger.info(
                                    "can't find routing rule for {} with parameter {}, so use default data source for it.",
                                    statementName, entity);
                            mrbase.emit(getDefaultDataSourceName(), entity);
                        } else {
                            if (dsMap.size() > 1) {
                                throw new IllegalArgumentException(
                                        "unexpected routing result, found more than 1 target data source for current entity:"
                                                + entity);
                            }
                            mrbase.emit(dsMap.firstKey(), entity);
                        }
                    } catch (Throwable t) {
                        exceptionStaktrace.append(ExceptionUtils.getFullStackTrace(t));
                    } finally {
                        latch.countDown();
                    }
                }
            };
            executor.execute(task);
        }
        try {
            latch.await();
        } catch (InterruptedException e) {
            throw new ConcurrencyFailureException(
                    "unexpected interruption when re-arranging parameter collection into sub-collections ", e);
        }

        if (exceptionStaktrace.length() > 0) {
            throw new ConcurrencyFailureException(
                    "unpected exception when re-arranging parameter collection, check previous log for details.\n"
                            + exceptionStaktrace);
        }
    } finally {
        executor.shutdown();
    }

    List<ConcurrentRequest> requests = new ArrayList<ConcurrentRequest>();
    for (Map.Entry<String, List<Object>> entity : mrbase.getResources().entrySet()) {
        final List<Object> paramList = entity.getValue();
        if (CollectionUtils.isEmpty(paramList)) {
            continue;
        }

        String identity = entity.getKey();

        final DataSource dataSourceToUse = findDataSourceToUse(entity.getKey());

        final SqlMapClientCallback callback = new SqlMapClientCallback() {
            public Object doInSqlMapClient(SqlMapExecutor executor) throws SQLException {
                return executor.insert(statementName, paramList);
            }
        };

        ConcurrentRequest request = new ConcurrentRequest();
        request.setDataSource(dataSourceToUse);
        request.setAction(callback);
        request.setExecutor(getDataSourceSpecificExecutors().get(identity));
        requests.add(request);
    }
    return getConcurrentRequestProcessor().process(requests);
}

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

/**
 * Helper method that updates the node, bringing it into the current transaction with
 * the appropriate <b>cm:auditable</b> and transaction behaviour.
 * <p>/*from ww w  . j a  va  2 s .  c  om*/
 * If the <tt>NodeRef</tt> of the node is changing (usually a store move) then deleted
 * nodes are cleaned out where they might exist.
 * 
 * @param oldNode               the existing node, fully populated
 * @param nodeUpdate            the node update with all update elements populated
 * @param nodeAspects           the node's aspects or <tt>null</tt> to look them up
 * @return                      <tt>true</tt> if any updates were made
 */
private boolean updateNodeImpl(Node oldNode, NodeUpdateEntity nodeUpdate, Set<QName> nodeAspects) {
    Long nodeId = oldNode.getId();

    // Make sure that the ID has been populated
    if (!EqualsHelper.nullSafeEquals(nodeId, nodeUpdate.getId())) {
        throw new IllegalArgumentException("NodeUpdateEntity node ID is not correct: " + nodeUpdate);
    }

    // Copy of the reference data
    nodeUpdate.setStore(oldNode.getStore());
    nodeUpdate.setUuid(oldNode.getUuid());

    // Ensure that other values are set for completeness when caching
    if (!nodeUpdate.isUpdateTypeQNameId()) {
        nodeUpdate.setTypeQNameId(oldNode.getTypeQNameId());
    }
    if (!nodeUpdate.isUpdateLocaleId()) {
        nodeUpdate.setLocaleId(oldNode.getLocaleId());
    }
    if (!nodeUpdate.isUpdateAclId()) {
        nodeUpdate.setAclId(oldNode.getAclId());
    }

    nodeUpdate.setVersion(oldNode.getVersion());
    // Update the transaction
    TransactionEntity txn = getCurrentTransaction();
    nodeUpdate.setTransaction(txn);
    if (!txn.getId().equals(oldNode.getTransaction().getId())) {
        // Only update if the txn has changed
        nodeUpdate.setUpdateTransaction(true);
    }
    // Update auditable
    if (nodeAspects == null) {
        nodeAspects = getNodeAspects(nodeId);
    }
    if (nodeAspects.contains(ContentModel.ASPECT_AUDITABLE)) {
        NodeRef oldNodeRef = oldNode.getNodeRef();
        if (policyBehaviourFilter.isEnabled(oldNodeRef, ContentModel.ASPECT_AUDITABLE)) {
            // Make sure that auditable properties are present
            AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties();
            if (auditableProps == null) {
                auditableProps = new AuditablePropertiesEntity();
            } else {
                auditableProps = new AuditablePropertiesEntity(auditableProps);
            }
            long modifiedDateToleranceMs = 1000L;

            if (nodeUpdate.isUpdateTransaction()) {
                // allow update cm:modified property for new transaction
                modifiedDateToleranceMs = 0L;
            }

            boolean updateAuditableProperties = auditableProps.setAuditValues(null, null, false,
                    modifiedDateToleranceMs);
            nodeUpdate.setAuditableProperties(auditableProps);
            nodeUpdate.setUpdateAuditableProperties(updateAuditableProperties);
        } else if (nodeUpdate.getAuditableProperties() == null) {
            // cache the explicit setting of auditable properties when creating node (note: auditable aspect is not yet present)
            AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties();
            if (auditableProps != null) {
                nodeUpdate.setAuditableProperties(auditableProps); // Can reuse the locked instance
                nodeUpdate.setUpdateAuditableProperties(true);
            }
        } else {
            // ALF-4117: NodeDAO: Allow cm:auditable to be set
            // The nodeUpdate had auditable properties set, so we just use that directly
            nodeUpdate.setUpdateAuditableProperties(true);
        }
    } else {
        // Make sure that any auditable properties are removed
        AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties();
        if (auditableProps != null) {
            nodeUpdate.setAuditableProperties(null);
            nodeUpdate.setUpdateAuditableProperties(true);
        }
    }

    // Just bug out if nothing has changed
    if (!nodeUpdate.isUpdateAnything()) {
        return false;
    }

    // The node is remaining in the current store
    int count = 0;
    Throwable concurrencyException = null;
    try {
        count = updateNode(nodeUpdate);
    } catch (Throwable e) {
        concurrencyException = e;
    }
    // Do concurrency check
    if (count != 1) {
        // Drop the value from the cache in case the cache is stale
        nodesCache.removeByKey(nodeId);
        nodesCache.removeByValue(nodeUpdate);

        throw new ConcurrencyFailureException("Failed to update node " + nodeId, concurrencyException);
    } else {
        // Check for wrap-around in the version number
        if (nodeUpdate.getVersion().equals(LONG_ZERO)) {
            // The version was wrapped back to zero
            // The caches that are keyed by version are now unreliable
            propertiesCache.clear();
            aspectsCache.clear();
            parentAssocsCache.clear();
        }
        // Update the caches
        nodeUpdate.lock();
        nodesCache.setValue(nodeId, nodeUpdate);
        // The node's version has moved on so no need to invalidate caches
    }

    // Done
    if (isDebugEnabled) {
        logger.debug("Updated Node: \n" + "   OLD: " + oldNode + "\n" + "   NEW: " + nodeUpdate);
    }
    return true;
}

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

protected int updatePrimaryParentAssocsImpl(ChildAssocEntity primaryParentAssoc, Node newParentNode,
        Node childNode, Long newChildNodeId, String childNodeName, Long oldParentNodeId, QName assocTypeQName,
        QName assocQName) {//from ww w . j  a  v  a  2s . c om
    Long newParentNodeId = newParentNode.getId();
    Long childNodeId = childNode.getId();

    Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException");
    // We use the child node's UUID if there is no cm:name
    String childNodeNameToUse = childNodeName == null ? childNode.getUuid() : childNodeName;

    try {
        int updated = updatePrimaryParentAssocs(newChildNodeId, newParentNodeId, assocTypeQName, assocQName,
                childNodeNameToUse);
        controlDAO.releaseSavepoint(savepoint);
        // Ensure we invalidate the name cache (the child version key might not have been 'bumped' by the last
        // 'touch')
        if (updated > 0 && primaryParentAssoc != null) {
            Pair<Long, QName> oldTypeQnamePair = qnameDAO.getQName(primaryParentAssoc.getTypeQNameId());
            if (oldTypeQnamePair != null) {
                childByNameCache.remove(new ChildByNameKey(oldParentNodeId, oldTypeQnamePair.getSecond(),
                        primaryParentAssoc.getChildNodeName()));
            }
        }
        return updated;
    } catch (Throwable e) {
        controlDAO.rollbackToSavepoint(savepoint);
        // DuplicateChildNodeNameException implements DoNotRetryException.
        // There are some cases - FK violations, specifically - where we DO actually want to retry.
        // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message
        String lowerMsg = e.getMessage().toLowerCase();
        if (lowerMsg.contains("fk_alf_cass_")) {
            throw new ConcurrencyFailureException(
                    "FK violation updating primary parent association for " + childNodeId, e);
        }
        // We assume that this is from the child cm:name constraint violation
        throw new DuplicateChildNodeNameException(newParentNode.getNodeRef(), assocTypeQName, childNodeName, e);
    }
}

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

protected Long newChildAssocInsertImpl(final ChildAssocEntity assoc, final QName assocTypeQName,
        final String childNodeName) {
    Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException");
    try {//from   w  w  w  .j  av  a 2s  .c om
        Long id = insertChildAssoc(assoc);
        controlDAO.releaseSavepoint(savepoint);
        return id;
    } catch (Throwable e) {
        controlDAO.rollbackToSavepoint(savepoint);
        // DuplicateChildNodeNameException implements DoNotRetryException.

        // Allow real DB concurrency issues (e.g. DeadlockLoserDataAccessException) straight through for a retry
        if (e instanceof ConcurrencyFailureException) {
            throw e;
        }

        // There are some cases - FK violations, specifically - where we DO actually want to retry.
        // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message
        String lowerMsg = e.getMessage().toLowerCase();
        if (lowerMsg.contains("fk_alf_cass_")) {
            throw new ConcurrencyFailureException("FK violation updating primary parent association:" + assoc,
                    e);
        }

        // We assume that this is from the child cm:name constraint violation
        throw new DuplicateChildNodeNameException(assoc.getParentNode().getNodeRef(), assocTypeQName,
                childNodeName, e);
    }
}

From source file:org.alfresco.repo.forum.DiscussableAspect.java

public void onCopyComplete(QName classRef, NodeRef sourceNodeRef, NodeRef targetNodeRef, boolean copyToNewNode,
        Map<NodeRef, NodeRef> copyMap) {
    Set<NodeRef> workingCopyNodeRefs = TransactionalResourceHelper.getSet(KEY_WORKING_COPIES);
    if (!workingCopyNodeRefs.contains(sourceNodeRef)) {
        // This is not one of the nodes that needs to have discussions copied over
        return;/*from ww  w.j  av  a2s .c  o  m*/
    }

    // First check that the source node has forums
    NodeRef sourceForumNodeRef = getForum(sourceNodeRef);
    if (sourceForumNodeRef == null) {
        // Missing!  Clean the source node up!
        nodeService.removeAspect(sourceNodeRef, ForumModel.ASPECT_DISCUSSABLE);
        return;
    }

    // The aspect may or may not exist on the target node
    if (!nodeService.hasAspect(targetNodeRef, ForumModel.ASPECT_DISCUSSABLE)) {
        // Add the aspect
        nodeService.addAspect(targetNodeRef, ForumModel.ASPECT_DISCUSSABLE, null);
    }
    // Get the forum node
    NodeRef targetForumNodeRef = getForum(targetNodeRef);
    // Merge the forum topics
    List<ChildAssociationRef> topicAssocRefs = nodeService.getChildAssocs(sourceForumNodeRef,
            Collections.singleton(ForumModel.TYPE_TOPIC));
    int copied = 0;
    for (ChildAssociationRef topicAssocRef : topicAssocRefs) {
        NodeRef topicNodeRef = topicAssocRef.getChildRef();
        try {
            // work out the name for the copied topic
            String topicName;
            String topicNodeName = nodeService.getProperty(topicNodeRef, ContentModel.PROP_NAME).toString();
            Serializable labelProp = nodeService.getProperty(targetNodeRef, ContentModel.PROP_VERSION_LABEL);
            if (labelProp == null) {
                SimpleDateFormat dateFormat = new SimpleDateFormat("dd-MM-yyyy-HH-mm-ss");
                topicName = topicNodeName + " - " + dateFormat.format(new Date());
            } else {
                topicName = topicNodeName + " (" + labelProp.toString() + ")";
            }

            if (fileFolderService.searchSimple(targetForumNodeRef, topicName) != null) {
                // A topic with that name already exists
                continue;
            }
            fileFolderService.copy(topicNodeRef, targetForumNodeRef, topicName);
            copied++;
        } catch (FileExistsException e) {
            // We checked for this, so this is a concurrency condition
            throw new ConcurrencyFailureException("Target topic exists: " + e.getMessage(), e);
        } catch (FileNotFoundException e) {
            // The node was there, but now it's gone
            throw new ConcurrencyFailureException("Forum was deleted: " + e.getMessage(), e);
        }
    }
}

From source file:org.alfresco.repo.node.index.AbstractReindexComponent.java

/**
 * To allow for possible 'read committed' behaviour in some databases, where a node that previously existed during a
 * transaction can disappear from existence, we treat InvalidNodeRefExceptions as concurrency conditions.
 *///from  w  w w  . jav a2 s . c o  m
protected <T2> T2 doInRetryingTransaction(final RetryingTransactionCallback<T2> callback,
        boolean isReadThrough) {
    return transactionService.getRetryingTransactionHelper()
            .doInTransaction(new RetryingTransactionCallback<T2>() {
                @Override
                public T2 execute() throws Throwable {
                    try {
                        return callback.execute();
                    } catch (InvalidNodeRefException e) {
                        // Turn InvalidNodeRefExceptions into retryable exceptions.
                        throw new ConcurrencyFailureException(
                                "Possible cache integrity issue during reindexing", e);
                    }

                }
            }, true, isReadThrough);
}