List of usage examples for org.springframework.transaction.support TransactionSynchronizationAdapter TransactionSynchronizationAdapter
TransactionSynchronizationAdapter
From source file:org.openvpms.component.business.dao.hibernate.im.lookup.LookupReplacer.java
/** * Executes an update query.//www . j a v a 2s. c o m * <p/> * If any updates are made, the second level caches associated with the persisent classes are also cleared. * <p/> * <strong>NOTE</strong>: There is a small window where the second level cache will not reflect the state of the * database. * * @param query the update query * @param session the hibernate session * @param persistentClasses the persistent classes affected by the update */ private void executeUpdate(Query query, final Session session, final Class... persistentClasses) { int updates = query.executeUpdate(); if (updates != 0) { final SessionFactory factory = session.getSessionFactory(); if (TransactionSynchronizationManager.isActualTransactionActive()) { // clear the cache when the transaction commits TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCompletion(int status) { if (status == STATUS_COMMITTED) { clearCaches(persistentClasses, factory); } } }); } else { clearCaches(persistentClasses, factory); } } }
From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImpl.java
private String saveEventByFingerprint(final byte[] fingerprintHash, final Collection<Event> events, final EventPreCreateContext context, final boolean createClearHash) throws ZepException { try {//from w w w . ja v a2 s. co m return metricRegistry.timer(getClass().getName() + ".saveEventByFingerprint") .time(new Callable<String>() { @Override public String call() throws Exception { final List<EventSummary.Builder> oldSummaryList = template.getJdbcOperations().query( "SELECT event_count,first_seen,last_seen,details_json,status_id,status_change,uuid" + " FROM event_summary WHERE fingerprint_hash=? FOR UPDATE", new RowMapperResultSetExtractor<EventSummary.Builder>(eventDedupMapper, 1), fingerprintHash); final EventSummary.Builder summary; if (!oldSummaryList.isEmpty()) { summary = oldSummaryList.get(0); } else { summary = EventSummary.newBuilder(); summary.setCount(0); summary.addOccurrenceBuilder(0); } boolean isNewer = false; for (Event event : events) { isNewer = merge(summary, event) || isNewer; } if (!events.isEmpty()) { summary.setUpdateTime(System.currentTimeMillis()); final long dedupCount; if (!oldSummaryList.isEmpty()) { dedupCount = events.size(); final Map<String, Object> fields = getUpdateFields(summary, isNewer, context, createClearHash); final StringBuilder updateSql = new StringBuilder("UPDATE event_summary SET "); int i = 0; for (String fieldName : fields.keySet()) { if (++i > 1) updateSql.append(','); updateSql.append(fieldName).append("=:").append(fieldName); } updateSql.append(" WHERE fingerprint_hash=:fingerprint_hash"); fields.put("fingerprint_hash", fingerprintHash); template.update(updateSql.toString(), fields); final String indexSql = "INSERT INTO event_summary_index_queue (uuid, update_time) SELECT uuid, " + String.valueOf(System.currentTimeMillis()) + " FROM event_summary WHERE fingerprint_hash=:fingerprint_hash"; template.update(indexSql, fields); } else { dedupCount = events.size() - 1; summary.setUuid(uuidGenerator.generate().toString()); final Map<String, Object> fields = getInsertFields(summary, context, createClearHash); fields.put(COLUMN_FINGERPRINT_HASH, fingerprintHash); insert.execute(fields); indexSignal(summary.getUuid(), System.currentTimeMillis()); } if (dedupCount > 0) { TransactionSynchronizationManager .registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCommit() { counters.addToDedupedEventCount(dedupCount); } }); } } return summary.getUuid(); } }); } catch (ZepException e) { throw e; } catch (Exception e) { throw new ZepException(e); } }
From source file:org.openvpms.component.business.service.archetype.ArchetypeService.java
/** * Updates the descriptor cache. If a transaction is in progress, the * cache will only be updated on transaction commit. This means that the * descriptor will only be available via the <em>get*Descriptor</em> methods * on successful commit./*from w ww . j ava 2s .co m*/ * * @param object the object to add to the cache */ private void updateCache(final IMObject object) { if (TransactionSynchronizationManager.isActualTransactionActive()) { // update the cache when the transaction commits TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCompletion(int status) { if (status == STATUS_COMMITTED) { addToCache(object); } } }); } else { addToCache(object); } }
From source file:com.thoughtworks.go.server.service.ScheduleService.java
public void updateJobStatus(final JobIdentifier jobIdentifier, final JobState jobState) throws Exception { // have to synchronize at stage-level because cancellation happens at stage-level final String stageMutex = mutexForStageInstance(jobIdentifier); synchronized (stageMutex) { synchronized (mutexForJob(jobIdentifier)) { final JobInstance job = jobInstanceService.buildByIdWithTransitions(jobIdentifier.getBuildId()); transactionTemplate.executeWithExceptionHandling( new com.thoughtworks.go.server.transaction.TransactionCallbackWithoutResult() { public void doInTransactionWithoutResult(TransactionStatus status) { if (job.isNull() || job.getState() == JobState.Rescheduled || job.getResult() == JobResult.Cancelled) { return; }//from w w w . ja va 2s. c o m job.changeState(jobState); //TODO: #2318 JobInstance should contain identifier after it's loaded from database job.setIdentifier(jobIdentifier); jobInstanceService.updateStateAndResult(job); synchronizationManager .registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCommit() { stageDao.clearCachedAllStages(jobIdentifier.getPipelineName(), jobIdentifier.getPipelineCounter(), jobIdentifier.getStageName()); } }); if (job.isCompleted()) { Stage stage = stageService.stageById(job.getStageId()); stageService.updateResult(stage); } } }); // this has to be in a separate transaction because the above should not fail due to errors when scheduling a the next stage // (e.g. CannotScheduleException thrown when there are no agents for run-on-all-agent jobs) transactionTemplate.executeWithExceptionHandling( new com.thoughtworks.go.server.transaction.TransactionCallbackWithoutResult() { @Override public void doInTransactionWithoutResult(TransactionStatus status) throws Exception { if (job.isCompleted()) { Stage stage = stageService.stageById(job.getStageId()); automaticallyTriggerRelevantStagesFollowingCompletionOf(stage); } } }); } } }
From source file:com.thoughtworks.go.server.persistence.MaterialRepository.java
private void save(final PipelineMaterialRevision pipelineMaterialRevision, final String pipelineName) { getHibernateTemplate().save(pipelineMaterialRevision); transactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override// w ww .j av a 2s. c o m public void afterCommit() { String key = cacheKeyForLatestPmrForPipelineKey(pipelineMaterialRevision.getMaterialId(), pipelineName.toLowerCase()); synchronized (key) { goCache.remove(key); } } }); }
From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImpl.java
private List<String> clearEvents(Event event, EventPreCreateContext context) throws ZepException { TypeConverter<Long> timestampConverter = databaseCompatibility.getTimestampConverter(); final List<byte[]> clearHashes = EventDaoUtils.createClearHashes(event, context); if (clearHashes.isEmpty()) { logger.debug("Clear event didn't contain any clear hashes: {}, {}", event, context); return Collections.emptyList(); }/*from w w w . j a va2 s. co m*/ final long lastSeen = event.getCreatedTime(); Map<String, Object> fields = new HashMap<String, Object>(2); fields.put("_clear_created_time", timestampConverter.toDatabaseType(lastSeen)); fields.put("_clear_hashes", clearHashes); long updateTime = System.currentTimeMillis(); String indexSql = "INSERT INTO event_summary_index_queue (uuid, update_time) " + "SELECT uuid, " + String.valueOf(updateTime) + " FROM event_summary " + "WHERE last_seen <= :_clear_created_time " + "AND clear_fingerprint_hash IN (:_clear_hashes) " + "AND closed_status = FALSE "; this.template.update(indexSql, fields); /* Find events that this clear event would clear. */ final String sql = "SELECT uuid FROM event_summary " + "WHERE last_seen <= :_clear_created_time " + "AND clear_fingerprint_hash IN (:_clear_hashes) " + "AND closed_status = FALSE " + "FOR UPDATE"; final List<String> results = this.template.query(sql, new RowMapper<String>() { @Override public String mapRow(ResultSet rs, int rowNum) throws SQLException { return uuidConverter.fromDatabaseType(rs, COLUMN_UUID); } }, fields); TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCommit() { counters.addToClearedEventCount(results.size()); } }); return results; }
From source file:com.thoughtworks.go.server.persistence.MaterialRepository.java
private void removeLatestCachedModification(final MaterialInstance materialInstance, Modification latest) { transactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override/* www. ja va 2s . c o m*/ public void afterCommit() { String cacheKey = latestMaterialModificationsKey(materialInstance); synchronized (cacheKey) { goCache.remove(cacheKey); } } }); }
From source file:com.thoughtworks.go.server.persistence.MaterialRepository.java
private void removeCachedModificationCountFor(final MaterialInstance materialInstance) { transactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override//w ww. j ava2s . c om public void afterCommit() { String key = materialModificationCountKey(materialInstance); synchronized (key) { goCache.remove(key); } } }); }
From source file:com.thoughtworks.go.server.persistence.MaterialRepository.java
private void removeCachedModificationsFor(final MaterialInstance materialInstance) { transactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override/*from w ww . j av a 2 s . co m*/ public void afterCommit() { String key = materialModificationsWithPaginationKey(materialInstance); synchronized (key) { goCache.remove(key); } } }); }
From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImpl.java
@Override @TransactionalRollbackAllExceptions/* w w w .j a v a 2s. co m*/ @Timed public int ageEvents(long agingInterval, TimeUnit unit, EventSeverity maxSeverity, int limit, boolean inclusiveSeverity) throws ZepException { TypeConverter<Long> timestampConverter = databaseCompatibility.getTimestampConverter(); long agingIntervalMs = unit.toMillis(agingInterval); if (agingIntervalMs < 0 || agingIntervalMs == Long.MAX_VALUE) { throw new ZepException("Invalid aging interval: " + agingIntervalMs); } if (limit <= 0) { throw new ZepException("Limit can't be negative: " + limit); } List<Integer> severityIds = getSeverityIds(maxSeverity, inclusiveSeverity); if (severityIds.isEmpty()) { logger.debug("Not aging events - min severity specified"); return 0; } long now = System.currentTimeMillis(); long ageTs = now - agingIntervalMs; Map<String, Object> fields = new HashMap<String, Object>(); fields.put(COLUMN_STATUS_ID, EventStatus.STATUS_AGED.getNumber()); fields.put(COLUMN_CLOSED_STATUS, ZepConstants.CLOSED_STATUSES.contains(EventStatus.STATUS_AGED)); fields.put(COLUMN_STATUS_CHANGE, timestampConverter.toDatabaseType(now)); fields.put(COLUMN_UPDATE_TIME, timestampConverter.toDatabaseType(now)); fields.put(COLUMN_LAST_SEEN, timestampConverter.toDatabaseType(ageTs)); fields.put("_severity_ids", severityIds); fields.put("_limit", limit); final String updateSql; if (databaseCompatibility.getDatabaseType() == DatabaseType.MYSQL) { String indexSql = "INSERT INTO event_summary_index_queue (uuid, update_time) " + "SELECT uuid, " + String.valueOf(now) + " " + "FROM event_summary " + " WHERE last_seen < :last_seen AND" + " severity_id IN (:_severity_ids) AND" + " closed_status = FALSE LIMIT :_limit"; this.template.update(indexSql, fields); // Use UPDATE ... LIMIT updateSql = "UPDATE event_summary SET" + " status_id=:status_id,status_change=:status_change,update_time=:update_time" + ",closed_status=:closed_status" + " WHERE last_seen < :last_seen AND severity_id IN (:_severity_ids)" + " AND closed_status = FALSE LIMIT :_limit"; } else if (databaseCompatibility.getDatabaseType() == DatabaseType.POSTGRESQL) { String indexSql = "INSERT INTO event_summary_index_queue (uuid, update_time) " + "SELECT uuid, " + String.valueOf(now) + " " + "FROM event_summary " + " WHERE uuid IN (SELECT uuid FROM event_summary WHERE" + " last_seen < :last_seen AND severity_id IN (:_severity_ids)" + " AND closed_status = FALSE LIMIT :_limit)"; this.template.update(indexSql, fields); // Use UPDATE ... WHERE pk IN (SELECT ... LIMIT) updateSql = "UPDATE event_summary SET" + " status_id=:status_id,status_change=:status_change,update_time=:update_time" + ",closed_status=:closed_status" + " WHERE uuid IN (SELECT uuid FROM event_summary WHERE" + " last_seen < :last_seen AND severity_id IN (:_severity_ids)" + " AND closed_status = FALSE LIMIT :_limit)"; } else { throw new IllegalStateException( "Unsupported database type: " + databaseCompatibility.getDatabaseType()); } final int numRows = this.template.update(updateSql, fields); if (numRows > 0) { TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() { @Override public void afterCommit() { counters.addToAgedEventCount(numRows); } }); } return numRows; }