List of usage examples for org.springframework.transaction.support TransactionCallbackWithoutResult TransactionCallbackWithoutResult
TransactionCallbackWithoutResult
From source file:org.foxbpm.rest.service.api.model.ModelsResouce.java
@Post public String deploy(Representation entity) { FileOutputStream fileOutputStream = null; final Map<String, InputStream> resourceMap = new HashMap<String, InputStream>(); InputStream is = null;//from www. j a v a 2s .c o m try { File file = File.createTempFile(System.currentTimeMillis() + "flowres", ".zip"); fileOutputStream = new FileOutputStream(file); DiskFileItemFactory factory = new DiskFileItemFactory(); RestletFileUpload upload = new RestletFileUpload(factory); List<FileItem> items = null; try { items = upload.parseRepresentation(entity); } catch (FileUploadException e) { throw new FoxBPMException("??"); } FileItem fileItem = items.get(0); fileItem.write(file); String sysTemp = System.getProperty("java.io.tmpdir"); final File targetDir = new File(sysTemp + File.separator + "ModelsTempFile"); targetDir.mkdirs(); FileUtil.unZip(file.getPath(), targetDir.getPath()); PlatformTransactionManager transactionManager = new DataSourceTransactionManager( DBUtils.getDataSource()); TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager); transactionTemplate.execute(new TransactionCallbackWithoutResult() { protected void doInTransactionWithoutResult(TransactionStatus status) { try { ModelService modelService = FoxBpmUtil.getProcessEngine().getModelService(); for (File tmpFile : targetDir.listFiles()) { if (tmpFile.isDirectory()) { DeploymentBuilder deploymentBuilder = modelService.createDeployment(); String fileName = tmpFile.getName(); if (fileName.indexOf(SEP) == -1) { throw new FoxBPMException("??"); } //???? insert-processExpens-1? insert??processExpens?key,1? String operation = fileName.substring(0, fileName.indexOf(SEP)); String processKey = fileName.substring(fileName.indexOf(SEP) + 1, fileName.lastIndexOf(SEP)); int version = Integer.parseInt(fileName.substring(fileName.lastIndexOf(SEP) + 1)); File[] files = tmpFile.listFiles(); for (File t : files) { InputStream input = new FileInputStream(t); //map? resourceMap.put(t.getName(), input); deploymentBuilder.addInputStream(t.getName(), input, version); } if (PREFIX_ADD.equals(operation)) { deploymentBuilder.deploy(); } else if (PREFIX_UPDATE.equals(operation)) { ProcessDefinition processDefinition = null; processDefinition = modelService.getProcessDefinition(processKey, version); if (processDefinition != null) { String deploymentId = processDefinition.getDeploymentId(); deploymentBuilder.updateDeploymentId(deploymentId); } deploymentBuilder.deploy(); } else if (PREFIX_DELETE.equals(operation)) { ProcessDefinition processDefinitionNew = modelService .getProcessDefinition(processKey, version); if (processDefinitionNew != null) { String deploymentId = processDefinitionNew.getDeploymentId(); modelService.deleteDeployment(deploymentId); } else { log.warn("??key:" + processKey + "version:" + version + "??"); } } else if ("NotModify".equals(operation)) { log.debug(processKey + "????"); } else { throw new FoxBPMException("??" + operation); } } } } catch (Exception ex) { if (ex instanceof FoxBPMException) { throw (FoxBPMException) ex; } else { throw new FoxBPMException("?", ex); } } } }); setStatus(Status.SUCCESS_CREATED); } catch (Exception e) { if (e instanceof FoxBPMException) { throw (FoxBPMException) e; } throw new FoxBPMException(e.getMessage(), e); } finally { if (fileOutputStream != null) { try { fileOutputStream.close(); } catch (IOException e) { log.error("?", e); } } for (String name : resourceMap.keySet()) { InputStream isTmp = resourceMap.get(name); if (isTmp != null) { try { isTmp.close(); } catch (IOException e) { log.error("?", e); } } } if (is != null) { try { is.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } return null; }
From source file:org.geowebcache.diskquota.jdbc.JDBCQuotaStore.java
private void createLayerInternal(final String layerName) { tt.execute(new TransactionCallbackWithoutResult() { @Override// w w w . java 2 s . c om protected void doInTransactionWithoutResult(TransactionStatus status) { Set<TileSet> layerTileSets; if (!GLOBAL_QUOTA_NAME.equals(layerName)) { layerTileSets = calculator.getTileSetsFor(layerName); } else { layerTileSets = Collections.singleton(new TileSet(GLOBAL_QUOTA_NAME)); } for (TileSet tset : layerTileSets) { // other nodes in the cluster might be trying to create the same layer, // so use getOrCreate getOrCreateTileSet(tset); } } }); }
From source file:org.geowebcache.diskquota.jdbc.JDBCQuotaStore.java
public void deleteGridSubset(final String layerName, final String gridSetId) { tt.execute(new TransactionCallbackWithoutResult() { @Override// w ww. j ava 2 s . co m protected void doInTransactionWithoutResult(TransactionStatus status) { // first gather the disk quota used by the gridset, and update the global quota Quota quota = getUsedQuotaByGridsetid(gridSetId); quota.setBytes(quota.getBytes().negate()); String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes"); Map<String, Object> params = new HashMap<String, Object>(); params.put("tileSetId", GLOBAL_QUOTA_NAME); params.put("bytes", new BigDecimal(quota.getBytes())); jt.update(updateQuota, params); // then delete all the gridsets with the specified id String statement = dialect.getLayerGridDeletionStatement(schema, "layerName", "gridSetId"); params = new HashMap<String, Object>(); params.put("layerName", layerName); params.put("gridSetId", gridSetId); jt.update(statement, params); } }); }
From source file:org.geowebcache.diskquota.jdbc.JDBCQuotaStore.java
public void deleteLayerInternal(final String layerName) { getUsedQuotaByLayerName(layerName);// www . j a v a2s.c om tt.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus arg0) { // update the global quota Quota quota = getUsedQuotaByLayerName(layerName); quota.setBytes(quota.getBytes().negate()); String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes"); Map<String, Object> params = new HashMap<String, Object>(); params.put("tileSetId", GLOBAL_QUOTA_NAME); params.put("bytes", new BigDecimal(quota.getBytes())); jt.update(updateQuota, params); // delete the layer log.info("Deleting disk quota information for layer '" + layerName + "'"); String statement = dialect.getLayerDeletionStatement(schema, "layerName"); jt.update(statement, Collections.singletonMap("layerName", layerName)); } }); }
From source file:org.jasig.portal.rdbm.DatabaseMetaDataImpl.java
/** * Test the database to see if it really supports outer joins. * @param conn The connection to use./*from w ww. j a v a 2s . co m*/ */ private void testOuterJoins(final SimpleJdbcTemplate jdbcTemplate) { if (this.dbmdSupportsOuterJoins) { for (final JoinQueryString joinQueryString : joinTests) { final String joinTestQuery = "SELECT COUNT(UP_USER.USER_ID) " + "FROM " + joinQueryString.getTestJoin() + " UP_USER.USER_ID=0"; try { transactionTemplate.execute(new TransactionCallbackWithoutResult() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { jdbcTemplate.getJdbcOperations().execute(joinTestQuery); } }); this.joinTest = joinQueryString; if (LOG.isDebugEnabled()) { LOG.debug("Using join test: " + this.joinTest.getClass().getName()); } break; } catch (Exception e) { final String logMessage = "Join test failed: " + joinQueryString.getClass().getName() + " on statement: '" + joinTestQuery + "':"; if (LOG.isDebugEnabled()) { LOG.debug(logMessage, e); } } } } }
From source file:org.jasig.portal.RDBMUserIdentityStore.java
@Override public void removePortalUID(final String userName) { this.transactionOperations.execute(new TransactionCallbackWithoutResult() { @Override/*from w w w . j av a 2 s . c o m*/ protected void doInTransactionWithoutResult(TransactionStatus arg0) { if (PersonFactory.GUEST_USERNAME.equals(userName)) { throw new IllegalArgumentException("CANNOT RESET LAYOUT FOR A GUEST USER"); } final int userId = jdbcOperations.queryForInt("SELECT USER_ID FROM UP_USER WHERE USER_NAME=?", userName); final int type = jdbcOperations.queryForInt( "SELECT ENTITY_TYPE_ID FROM UP_ENTITY_TYPE WHERE ENTITY_TYPE_NAME = ?", IPerson.class.getName()); jdbcOperations.update("DELETE FROM UP_PERMISSION WHERE PRINCIPAL_KEY=? AND PRINCIPAL_TYPE=?", userName, type); final List<Integer> groupIds = jdbcOperations.queryForList( "SELECT M.GROUP_ID " + "FROM UP_GROUP_MEMBERSHIP M, UP_GROUP G, UP_ENTITY_TYPE E " + "WHERE M.GROUP_ID = G.GROUP_ID " + " AND G.ENTITY_TYPE_ID = E.ENTITY_TYPE_ID " + " AND E.ENTITY_TYPE_NAME = 'org.jasig.portal.security.IPerson'" + " AND M.MEMBER_KEY =? AND M.MEMBER_IS_GROUP = 'F'", Integer.class, userName); // Remove from local group // Delete from DeleteUser.java and place here // must be made before delete user in UP_USER for (final Integer groupId : groupIds) { jdbcOperations.update("DELETE FROM UP_GROUP_MEMBERSHIP WHERE MEMBER_KEY=? AND GROUP_ID=?", userName, groupId); } jdbcOperations.update("DELETE FROM UP_USER WHERE USER_ID = ?", userId); jdbcOperations.update("DELETE FROM UP_USER_LAYOUT WHERE USER_ID = ?", userId); jdbcOperations.update("DELETE FROM UP_USER_PROFILE WHERE USER_ID = ?", userId); jdbcOperations.update("DELETE FROM UP_LAYOUT_PARAM WHERE USER_ID = ?", userId); jdbcOperations.update("DELETE FROM UP_LAYOUT_STRUCT WHERE USER_ID = ?", userId); jdbcOperations.update("DELETE FROM UP_USER_LOCALE WHERE USER_ID = ?", userId); //Purge all portlet entity data final Set<IPortletEntity> portletEntities = portletEntityDao.getPortletEntitiesForUser(userId); for (final IPortletEntity portletEntity : portletEntities) { portletEntityDao.deletePortletEntity(portletEntity); } //Purge all stylesheet preference data final List<? extends IStylesheetUserPreferences> stylesheetUserPreferences = stylesheetUserPreferencesDao .getStylesheetUserPreferencesForUser(userId); for (final IStylesheetUserPreferences stylesheetUserPreference : stylesheetUserPreferences) { stylesheetUserPreferencesDao.deleteStylesheetUserPreferences(stylesheetUserPreference); } final ILocalAccountPerson person = localAccountDao.getPerson(userName); if (person != null) { localAccountDao.deleteAccount(person); } } }); }
From source file:org.jasig.ssp.service.impl.EvaluatedSuccessIndicatorServiceImpl.java
@Override public List<EvaluatedSuccessIndicatorTO> getForPerson(final UUID personId, final ObjectStatus status) throws ObjectNotFoundException { // Elaborate transaction management workaround b/c we can't avoid opening a transaction, but any exception // that crosses a transactional boundary in the code will mark the transaction as rollback only, which is // fine except that if we just tag this method with @Transactional(readOnly=true), the transaction manager // will still attempt a commit if the exception doesn't exit all the way out of this method (which is // what we usually want in this specific case - we want to try to return as many indicators as we can). And // if you attempt to commit a transaction marked as rollback only, you get a // org.springframework.transaction.UnexpectedRollbackException TransactionTemplate transactionTemplate = new TransactionTemplate(platformTransactionManager); transactionTemplate.setReadOnly(true); final AtomicReference<List<EvaluatedSuccessIndicatorTO>> rsltHolder = new AtomicReference<>(); final AtomicReference<ObjectNotFoundException> onfeHolder = new AtomicReference<>(); try {//from ww w .ja v a2 s.c om transactionTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus txnStatus) { try { getForPersonInTransaction(personId, status, rsltHolder); } catch (ObjectNotFoundException e) { onfeHolder.set(e); throw new RuntimeException("Rolling back transaction", e); } } }); } catch (UnexpectedRollbackException e) { // nothing to be done, totally normal. see comments above. } catch (RuntimeException e) { if (onfeHolder.get() == null) { throw e; } // otherwise it's just us, rolling back the transaction, nothing to be done, totally normal } if (onfeHolder.get() != null) { throw onfeHolder.get(); } return rsltHolder.get(); }
From source file:org.jasig.ssp.service.jobqueue.AbstractJobExecutor.java
/** * Finds and executes the given {@link Job} transactionally in {@link #executeInTransaction(java.util.UUID)}. * Expects that method to raise exceptions if the transaction should be rolled back. In the case of a * {@link org.jasig.ssp.service.jobqueue.JobExecutionException}, the {@link JobExecutionResult} is unpacked. If that object's and * {@link JobExecutionStatus} indicates the job should be retried, {@link executionState} is written in * a separate transaction {@link #prepareRetryInTransaction(java.util.UUID, org.jasig.ssp.service.jobqueue.JobExecutionResult)}. * {@link #toNonExceptionalWorkflowResult(org.jasig.ssp.service.jobqueue.JobExecutionResult, java.util.UUID)} * and {@link #toExceptionalWorkflowResult(org.jasig.ssp.service.jobqueue.JobExecutionResult, org.jasig.ssp.service.jobqueue.JobExecutionException, java.util.UUID)} * can be overriden to control how execution results are translated to workflow results. * * * @param jobId//from w ww . j a v a 2 s.com * @return */ @Override public JobExecutionResult<JobWorkflowStatusDescription> execute(final UUID jobId) { final TransactionTemplate txnTemplate = getTransactionTemplate(); if (txnTemplate == null) { return newWorkflowResultWithErrorMessage(JobExecutionStatus.ERROR, INVALID_JOB_EXEC_CONFIG_MSG, MISSING_TRANSACTION_TEMPLATE_MSG); } // TODO would be nice to extract workflow state msg ahead of time to support aggregation of // failures incrementally. Currently the job state would need to maintain a list of such // failures interally, then write them to the workflow status upon completion, which // limits visibility into job progress in any sort of generic way. (Same basic problem for // non-failure progress tracking as well.) try { final AtomicReference<JobExecutionResult<T>> rsltHolder = new AtomicReference<JobExecutionResult<T>>(); txnTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus status) { rsltHolder.set(executeInTransaction(jobId)); } }); return toNonExceptionalWorkflowResult(rsltHolder.get(), jobId); } catch (JobExecutionException e) { final JobExecutionResult<T> exResult = e.getJobExecutionResult(); if (exResult == null) { // really isn't supposed to happen, hence relatively high log level getCurrentLogger().error("Job {} execution exited unexpectedly", jobId, e); return newWorkflowResultWithErrorMessage(JobExecutionStatus.ERROR, e.getMessage()); } if (exResult.getStatus() == JobExecutionStatus.ERROR || exResult.getStatus() == JobExecutionStatus.FAILED || exResult.getStatus() == JobExecutionStatus.INTERRUPTED) { getCurrentLogger().warn("Job {} execution did not complete successfully", jobId, e); try { txnTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus status) { saveJobState(jobId, exResult); } }); } catch (Exception ee) { getCurrentLogger().error("Job {} could not store final execution state.", jobId, e); } return toExceptionalWorkflowResult(exResult, e, jobId); } else if (exResult.getStatus() == JobExecutionStatus.FAILED_PARTIAL) { getCurrentLogger() .warn("Job {} execution encountered a failure, but requested a retry after storing" + " execution state in a separate transaction", jobId, e); // 'main' execution transaction had to be rolled back, but still want to // update job state try { txnTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus status) { prepareRetryInTransaction(jobId, exResult); } }); return toExceptionalWorkflowResult(exResult, e, jobId); } catch (Exception ee) { getCurrentLogger().error( "Job {} could not store retry state so retry will be skipped and Job will error out", jobId, ee); return newWorkflowResultWithErrorMessage(JobExecutionStatus.ERROR, ee.getMessage()); } } else { // programmer error return newWorkflowResultWithErrorMessage(JobExecutionStatus.ERROR, UNEXPECTED_EXECUTION_EXCEPTION_STATUS_MSG, exResult.getStatus(), jobId); } } catch (Exception e) { // really isn't supposed to happen, hence higher log level getCurrentLogger().error("Job {} execution exited unexpectedly", jobId, e); return newWorkflowResultWithErrorMessage(JobExecutionStatus.ERROR, UNHANDLED_EXECUTION_EXCEPTION_STATUS_MSG, jobId); } }
From source file:org.obiba.opal.core.runtime.DefaultOpalRuntime.java
@Override @PreDestroy// www . j av a 2 s .c o m public void stop() { for (Service service : services) { try { if (service.isRunning()) service.stop(); } catch (RuntimeException e) { //noinspection StringConcatenationArgumentToLogCall log.warn("Error stopping service " + service.getClass(), e); } } transactionTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus status) { // Remove all datasources before writing the configuration. // This is done so that Disposable instances are disposed of before being written to the config file for (Datasource ds : MagmaEngine.get().getDatasources()) { try { MagmaEngine.get().removeDatasource(ds); } catch (RuntimeException e) { log.warn("Ignoring exception during shutdown sequence.", e); } } } }); }
From source file:org.obiba.opal.core.service.ProjectsServiceImpl.java
@Override @Transactional(propagation = Propagation.NEVER) public void save(@NotNull final Project project) throws ConstraintViolationException { try {// www . j a v a 2s . co m Project original = getProject(project.getName()); String originalDb = nullToEmpty(original.getDatabase()); String newDb = nullToEmpty(project.getDatabase()); if (!newDb.equals(originalDb)) { transactionTemplate.execute(new TransactionCallbackWithoutResult() { @Override protected void doInTransactionWithoutResult(TransactionStatus status) { Datasource datasource = MagmaEngine.get().getDatasource(project.getName()); MagmaEngine.get().removeDatasource(datasource); viewManager.unregisterDatasource(datasource.getName()); if (datasource.canDrop()) { datasource.drop(); } } }); databaseRegistry.unregister(originalDb, project.getName()); registerDatasource(project); } } catch (NoSuchProjectException e) { registerDatasource(project); } orientDbService.save(project, project); }