Example usage for org.springframework.transaction.support DefaultTransactionDefinition DefaultTransactionDefinition

List of usage examples for org.springframework.transaction.support DefaultTransactionDefinition DefaultTransactionDefinition

Introduction

In this page you can find the example usage for org.springframework.transaction.support DefaultTransactionDefinition DefaultTransactionDefinition.

Prototype

public DefaultTransactionDefinition(int propagationBehavior) 

Source Link

Document

Create a new DefaultTransactionDefinition with the given propagation behavior.

Usage

From source file:edu.ur.file.db.service.DefaultFileServerServiceTest.java

/**
 * Test Empty File persistence//  w  w w .  j a v a  2s. c  o m
 * 
 * - Make sure an empty file can be created.
 * - Makes sure an empty file can be found.
 * @throws LocationAlreadyExistsException 
 * @throws IllegalFileSystemNameException 
 * 
 */
@Test
public void createEmptyFileTest() throws LocationAlreadyExistsException, IllegalFileSystemNameException {

    // Start the transaction this is for lazy loading
    TransactionDefinition td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    TransactionStatus ts = tm.getTransaction(td);

    DefaultFileServer fs = dfss.createFileServer("service_file_server");

    DefaultFileDatabase fileDatabase = fs.createFileDatabase("nates file server", "file_server_1",
            properties.getProperty("defaultFileServerService.server_path"), "description");

    TreeFolderInfo treeFolderInfo = fileDatabase.createRootFolder("Nates Folder", "folder_1");
    assert fileDatabase
            .setCurrentFileStore(treeFolderInfo.getName()) : "Should be able to set current file store";

    dfss.saveFileServer(fs);

    //commit the transaction this will assing an id to the 
    //file database and folder information
    tm.commit(ts);

    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);

    DefaultFileInfo info = dfss.createEmptyFile(fileDatabase, "empty_file_service_file", "txt");
    // commit the transaction
    tm.commit(ts);

    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);

    assert dfss.getFileById(info.getId(), false).equals(info) : "The file information should be the same";

    assert dfss.findFile(info.getName()).equals(info) : "The file information should be the same";

    File myFile = new File(info.getFullPath());
    assert myFile.exists() : "File " + myFile.getAbsolutePath() + " should exist ";
    assert dfss.deleteFile(dfss.getFileById(info.getId(), false)) : "File " + info + " should be deleted";

    assert !myFile.exists() : "File " + myFile.getAbsolutePath() + " Should no longer exist";

    tm.commit(ts);

    dfss.deleteFileServer(fs.getName());
    assert dfss.getFileServer(fs.getName()) == null : "Should not find the file server";
}

From source file:org.fcrepo.camel.FcrepoTransactionManagerTest.java

@Test(expected = TransactionSystemException.class)
public void testTransactionCommitError() throws FcrepoOperationFailedException {
    final String baseUrl = "http://localhost:8080/rest";
    final String tx = "tx:1234567890";
    final URI commitUri = URI.create(baseUrl + "/" + tx + FcrepoConstants.COMMIT);
    final URI beginUri = URI.create(baseUrl + FcrepoConstants.TRANSACTION);
    final FcrepoTransactionManager txMgr = new FcrepoTransactionManager();
    txMgr.setBaseUrl(baseUrl);/* w  w w .  jav  a2 s  . co m*/
    TestUtils.setField(txMgr, "fcrepoClient", mockClient);

    final TransactionTemplate transactionTemplate = new TransactionTemplate(txMgr);
    final DefaultTransactionDefinition txDef = new DefaultTransactionDefinition(
            TransactionDefinition.PROPAGATION_REQUIRED);

    transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRED);
    transactionTemplate.afterPropertiesSet();

    when(mockClient.post(eq(beginUri))).thenReturn(mockPostBuilder);
    when(mockClient.post(eq(commitUri))).thenReturn(mockPostBuilder2);
    when(mockPostBuilder.perform()).thenReturn(new FcrepoResponse(beginUri, 201,
            singletonMap("Location", singletonList(baseUrl + "/" + tx)), null));
    when(mockPostBuilder2.perform())
            .thenThrow(new FcrepoOperationFailedException(commitUri, 400, "Bad Request"));

    DefaultTransactionStatus status = (DefaultTransactionStatus) txMgr.getTransaction(txDef);

    final FcrepoTransactionObject txObj = (FcrepoTransactionObject) status.getTransaction();
    assertEquals(tx, txObj.getSessionId());
    assertFalse(status.isCompleted());

    status = (DefaultTransactionStatus) txMgr.getTransaction(txDef);
    txMgr.commit(status);
}

From source file:net.longfalcon.newsj.Releases.java

public void processReleases() {
    String startDateString = DateUtil.displayDateFormatter.print(System.currentTimeMillis());
    _log.info(String.format("Starting release update process (%s)", startDateString));

    // get site config TODO: use config service
    Site site = siteDAO.getDefaultSite();

    int retcount = 0;

    Directory nzbBaseDir = fileSystemService.getDirectory("/nzbs");

    checkRegexesUptoDate(site.getLatestRegexUrl(), site.getLatestRegexRevision());

    // Stage 0//from   w w  w  .  ja  v  a  2  s  .c om

    // this is a hack - tx is not working ATM
    TransactionStatus transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));

    //
    // Get all regexes for all groups which are to be applied to new binaries
    // in order of how they should be applied
    //
    List<ReleaseRegex> releaseRegexList = releaseRegexDAO.getRegexes(true, "-1", false);
    for (ReleaseRegex releaseRegex : releaseRegexList) {

        String releaseRegexGroupName = releaseRegex.getGroupName();
        _log.info(String.format("Applying regex %d for group %s", releaseRegex.getId(),
                ValidatorUtil.isNull(releaseRegexGroupName) ? "all" : releaseRegexGroupName));

        // compile the regex early, to test them
        String regex = releaseRegex.getRegex();
        Pattern pattern = Pattern.compile(fixRegex(regex), Pattern.CASE_INSENSITIVE); // remove '/' and '/i'

        HashSet<Long> groupMatch = new LinkedHashSet<>();

        //
        // Groups ending in * need to be like matched when getting out binaries for groups and children
        //
        Matcher matcher = _wildcardPattern.matcher(releaseRegexGroupName);
        if (matcher.matches()) {
            releaseRegexGroupName = releaseRegexGroupName.substring(0, releaseRegexGroupName.length() - 1);
            List<Group> groups = groupDAO.findGroupsByName(releaseRegexGroupName);
            for (Group group : groups) {
                groupMatch.add(group.getId());
            }
        } else if (!ValidatorUtil.isNull(releaseRegexGroupName)) {
            Group group = groupDAO.getGroupByName(releaseRegexGroupName);
            if (group != null) {
                groupMatch.add(group.getId());
            }
        }

        List<Binary> binaries = new ArrayList<>();
        if (groupMatch.size() > 0) {
            // Get out all binaries of STAGE0 for current group
            binaries = binaryDAO.findByGroupIdsAndProcStat(groupMatch, Defaults.PROCSTAT_NEW);
        }

        Map<String, String> arrNoPartBinaries = new LinkedHashMap<>();
        DateTime fiveHoursAgo = DateTime.now().minusHours(5);

        // this for loop should probably be a single transaction
        for (Binary binary : binaries) {
            String testMessage = "Test run - Binary Name " + binary.getName();

            Matcher groupRegexMatcher = pattern.matcher(binary.getName());
            if (groupRegexMatcher.find()) {
                String reqIdGroup = null;
                try {
                    reqIdGroup = groupRegexMatcher.group("reqid");
                } catch (IllegalArgumentException e) {
                    _log.debug(e.toString());
                }
                String partsGroup = null;
                try {
                    partsGroup = groupRegexMatcher.group("parts");
                } catch (IllegalArgumentException e) {
                    _log.debug(e.toString());
                }
                String nameGroup = null;
                try {
                    nameGroup = groupRegexMatcher.group("name");
                } catch (Exception e) {
                    _log.debug(e.toString());
                }
                _log.debug(testMessage + " matches with: \n reqId = " + reqIdGroup + " parts = " + partsGroup
                        + " and name = " + nameGroup);

                if ((ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup))
                        && ValidatorUtil.isNull(nameGroup)) {
                    nameGroup = reqIdGroup;
                }

                if (ValidatorUtil.isNull(nameGroup)) {
                    _log.warn(String.format(
                            "regex applied which didnt return right number of capture groups - %s", regex));
                    _log.warn(String.format("regex matched: reqId = %s parts = %s and name = %s", reqIdGroup,
                            partsGroup, nameGroup));
                    continue;
                }

                // If theres no number of files data in the subject, put it into a release if it was posted to usenet longer than five hours ago.
                if ((ValidatorUtil.isNull(partsGroup) && fiveHoursAgo.isAfter(binary.getDate().getTime()))) {
                    //
                    // Take a copy of the name of this no-part release found. This can be used
                    // next time round the loop to find parts of this set, but which have not yet reached 3 hours.
                    //
                    arrNoPartBinaries.put(nameGroup, "1");
                    partsGroup = "01/01";
                }

                if (ValidatorUtil.isNotNull(nameGroup) && ValidatorUtil.isNotNull(partsGroup)) {

                    if (partsGroup.indexOf('/') == -1) {
                        partsGroup = partsGroup.replaceFirst("(-)|(~)|(\\sof\\s)", "/"); // replace weird parts delimiters
                    }

                    Integer regexCategoryId = releaseRegex.getCategoryId();
                    Integer reqId = null;
                    if (ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup)) {
                        reqId = Integer.parseInt(reqIdGroup);
                    }

                    //check if post is repost
                    Pattern repostPattern = Pattern.compile("(repost\\d?|re\\-?up)", Pattern.CASE_INSENSITIVE);
                    Matcher binaryNameRepostMatcher = repostPattern.matcher(binary.getName());

                    if (binaryNameRepostMatcher.find()
                            && !nameGroup.toLowerCase().matches("^[\\s\\S]+(repost\\d?|re\\-?up)")) {
                        nameGroup = nameGroup + (" " + binaryNameRepostMatcher.group(1));
                    }

                    String partsStrings[] = partsGroup.split("/");
                    int relpart = Integer.parseInt(partsStrings[0]);
                    int relTotalPart = Integer.parseInt(partsStrings[1]);

                    binary.setRelName(nameGroup.replace("_", " "));
                    binary.setRelPart(relpart);
                    binary.setRelTotalPart(relTotalPart);
                    binary.setProcStat(Defaults.PROCSTAT_TITLEMATCHED);
                    binary.setCategoryId(regexCategoryId);
                    binary.setRegexId(releaseRegex.getId());
                    binary.setReqId(reqId);
                    binaryDAO.updateBinary(binary);

                }
            }
        }

    }

    transactionManager.commit(transaction);

    // this is a hack - tx is not working ATM
    transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
    //
    // Move all binaries from releases which have the correct number of files on to the next stage.
    //
    _log.info("Stage 2");
    List<MatchedReleaseQuery> matchedReleaseQueries = binaryDAO
            .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_TITLEMATCHED);
    matchedReleaseQueries = combineMatchedQueries(matchedReleaseQueries);

    int siteMinFilestoFormRelease = site.getMinFilesToFormRelease();

    for (MatchedReleaseQuery matchedReleaseQuery : matchedReleaseQueries) {
        retcount++;

        //
        // Less than the site permitted number of files in a release. Dont discard it, as it may
        // be part of a set being uploaded.
        //
        int minFiles = siteMinFilestoFormRelease;
        String releaseName = matchedReleaseQuery.getReleaseName();
        long matchedReleaseQueryGroup = matchedReleaseQuery.getGroup();
        Long matchedReleaseQueryNumberOfBinaries = matchedReleaseQuery.getNumberOfBinaries();
        int matchecReleaseTotalParts = matchedReleaseQuery.getReleaseTotalParts();
        String fromName = matchedReleaseQuery.getFromName();
        Integer reqId = matchedReleaseQuery.getReqId();

        Group group = groupDAO.findGroupByGroupId(matchedReleaseQueryGroup);
        if (group != null && group.getMinFilesToFormRelease() != null) {
            minFiles = group.getMinFilesToFormRelease();
        }

        if (matchedReleaseQueryNumberOfBinaries < minFiles) {

            _log.warn(String.format("Number of files in release %s less than site/group setting (%s/%s)",
                    releaseName, matchedReleaseQueryNumberOfBinaries, minFiles));

            binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                    matchedReleaseQueryGroup, fromName);
        } else if (matchedReleaseQueryNumberOfBinaries >= matchecReleaseTotalParts) {
            // Check that the binary is complete
            List<Binary> releaseBinaryList = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName(
                    releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);

            boolean incomplete = false;
            for (Binary binary : releaseBinaryList) {
                long partsCount = partDAO.countPartsByBinaryId(binary.getId());
                if (partsCount < binary.getTotalParts()) {
                    float percentComplete = ((float) partsCount / (float) binary.getTotalParts()) * 100;
                    _log.warn(String.format("binary %s from %s has missing parts = %s/%s (%s%% complete)",
                            binary.getId(), releaseName, partsCount, binary.getTotalParts(), percentComplete));

                    // Allow to binary to release if posted to usenet longer than four hours ago and we still don't have all the parts
                    DateTime fourHoursAgo = DateTime.now().minusHours(4);
                    if (fourHoursAgo.isAfter(new DateTime(binary.getDate()))) {
                        _log.info("allowing incomplete binary " + binary.getId());
                    } else {
                        incomplete = true;
                    }
                }
            }

            if (incomplete) {
                _log.warn(String.format("Incorrect number of parts %s-%s-%s", releaseName,
                        matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts));
                binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                        matchedReleaseQueryGroup, fromName);
            }

            //
            // Right number of files, but see if the binary is a allfilled/reqid post, in which case it needs its name looked up
            // TODO: Does this even work anymore?
            else if (ValidatorUtil.isNotNull(site.getReqIdUrl()) && ValidatorUtil.isNotNull(reqId)) {

                //
                // Try and get the name using the group
                //
                _log.info("Looking up " + reqId + " in " + group.getName() + "...");
                String newTitle = getReleaseNameForReqId(site.getReqIdUrl(), group, reqId, true);

                //
                // if the feed/group wasnt supported by the scraper, then just use the release name as the title.
                //
                if (ValidatorUtil.isNull(newTitle) || newTitle.equals("no feed")) {
                    newTitle = releaseName;
                    _log.warn("Group not supported");
                }

                //
                // Valid release with right number of files and title now, so move it on
                //
                if (ValidatorUtil.isNotNull(newTitle)) {
                    binaryDAO.updateBinaryNameAndStatus(newTitle, Defaults.PROCSTAT_READYTORELEASE, releaseName,
                            Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                } else {
                    //
                    // Item not found, if the binary was added to the index yages ago, then give up.
                    //
                    Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName(
                            releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                    DateTime maxAddedDate = new DateTime(timestamp);
                    DateTime twoDaysAgo = DateTime.now().minusDays(2);

                    if (maxAddedDate.isBefore(twoDaysAgo)) {
                        binaryDAO.updateBinaryNameAndStatus(releaseName,
                                Defaults.PROCSTAT_NOREQIDNAMELOOKUPFOUND, releaseName,
                                Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                        _log.warn("Not found in 48 hours");
                    }
                }
            } else {
                binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_READYTORELEASE, releaseName,
                        Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);

            }
        } else {
            //
            // Theres less than the expected number of files, so update the attempts and move on.
            //

            _log.info(String.format("Incorrect number of files for %s (%d/%d)", releaseName,
                    matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts));
            binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                    matchedReleaseQueryGroup, fromName);
        }

        if (retcount % 10 == 0) {
            _log.info(String.format("-processed %d binaries stage two", retcount));
        }

    }
    transactionManager.commit(transaction);

    retcount = 0;
    int nfoCount = 0;

    // this is a hack - tx is not working ATM
    transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
    //
    // Get out all distinct relname, group from binaries of STAGE2
    //
    _log.info("Stage 3");
    List<MatchedReleaseQuery> readyReleaseQueries = binaryDAO
            .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_READYTORELEASE);
    readyReleaseQueries = combineMatchedQueries(readyReleaseQueries);
    for (MatchedReleaseQuery readyReleaseQuery : readyReleaseQueries) {
        retcount++;

        String releaseName = readyReleaseQuery.getReleaseName();
        int numParts = readyReleaseQuery.getReleaseTotalParts();
        long binaryCount = readyReleaseQuery.getNumberOfBinaries();
        long groupId = readyReleaseQuery.getGroup();
        //
        // Get the last post date and the poster name from the binary
        //
        String fromName = readyReleaseQuery.getFromName();
        Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName(releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);
        DateTime addedDate = new DateTime(timestamp);

        //
        // Get all releases with the same name with a usenet posted date in a +1-1 date range.
        //
        Date oneDayBefore = addedDate.minusDays(1).toDate();
        Date oneDayAfter = addedDate.plusDays(1).toDate();
        List<Release> relDupes = releaseDAO.findReleasesByNameAndDateRange(releaseName, oneDayBefore,
                oneDayAfter);

        if (!relDupes.isEmpty()) {
            binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_DUPLICATE, releaseName,
                    Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);
            continue;
        }

        //
        // Get total size of this release
        // Done in a big OR statement, not an IN as the mysql binaryID index on parts table
        // was not being used.
        //

        // SM: TODO this should be revisited, using hb mappings

        long totalSize = 0;
        int regexAppliedCategoryId = 0;
        long regexIdUsed = 0;
        int reqIdUsed = 0;
        int relTotalParts = 0;
        float relCompletion;
        List<Binary> binariesForSize = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName(releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);

        long relParts = 0;
        for (Binary binary : binariesForSize) {
            if (ValidatorUtil.isNotNull(binary.getCategoryId()) && regexAppliedCategoryId == 0) {
                regexAppliedCategoryId = binary.getCategoryId();
            }

            if (ValidatorUtil.isNotNull(binary.getRegexId()) && regexIdUsed == 0) {
                regexIdUsed = binary.getRegexId();
            }

            if (ValidatorUtil.isNotNull(binary.getReqId()) && reqIdUsed == 0) {
                reqIdUsed = binary.getReqId();
            }

            relTotalParts += binary.getTotalParts();
            relParts += partDAO.countPartsByBinaryId(binary.getId());
            totalSize += partDAO.sumPartsSizeByBinaryId(binary.getId());
        }
        relCompletion = ((float) relParts / (float) relTotalParts) * 100f;

        //
        // Insert the release
        //

        String releaseGuid = UUID.randomUUID().toString();
        int categoryId;
        Category category = null;
        Long regexId;
        Integer reqId;
        if (regexAppliedCategoryId == 0) {
            categoryId = categoryService.determineCategory(groupId, releaseName);
        } else {
            categoryId = regexAppliedCategoryId;
        }
        if (categoryId > 0) {
            category = categoryService.getCategory(categoryId);
        }

        if (regexIdUsed == 0) {
            regexId = null;
        } else {
            regexId = regexIdUsed;
        }

        if (reqIdUsed == 0) {
            reqId = null;
        } else {
            reqId = reqIdUsed;
        }

        //Clean release name of '#', '@', '$', '%', '^', '', '', '', ''
        String cleanReleaseName = releaseName.replaceAll("[^A-Za-z0-9-_\\ \\.]+", "");
        Release release = new Release();
        release.setName(cleanReleaseName);
        release.setSearchName(cleanReleaseName);
        release.setTotalpart(numParts);
        release.setGroupId(groupId);
        release.setAddDate(new Date());
        release.setGuid(releaseGuid);
        release.setCategory(category);
        release.setRegexId(regexId);
        release.setRageId((long) -1);
        release.setPostDate(addedDate.toDate());
        release.setFromName(fromName);
        release.setSize(totalSize);
        release.setReqId(reqId);
        release.setPasswordStatus(site.getCheckPasswordedRar() == 1 ? -1 : 0); // magic constants
        release.setCompletion(relCompletion);
        releaseDAO.updateRelease(release);
        long releaseId = release.getId();
        _log.info("Added release " + cleanReleaseName);

        //
        // Tag every binary for this release with its parent release id
        // remove the release name from the binary as its no longer required
        //
        binaryDAO.updateBinaryNameStatusReleaseID("", Defaults.PROCSTAT_RELEASED, releaseId, releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);

        //
        // Find an .nfo in the release
        //
        ReleaseNfo releaseNfo = nfo.determineReleaseNfo(release);
        if (releaseNfo != null) {
            nfo.addReleaseNfo(releaseNfo);
            nfoCount++;
        }

        //
        // Write the nzb to disk
        //
        nzb.writeNZBforReleaseId(release, nzbBaseDir, true);

        if (retcount % 5 == 0) {
            _log.info("-processed " + retcount + " releases stage three");
        }

    }

    _log.info("Found " + nfoCount + " nfos in " + retcount + " releases");

    //
    // Process nfo files
    //
    if (site.getLookupNfo() != 1) {
        _log.info("Site config (site.lookupnfo) prevented retrieving nfos");
    } else {
        nfo.processNfoFiles(site.getLookupImdb(), site.getLookupTvRage());
    }

    //
    // Lookup imdb if enabled
    //
    if (site.getLookupImdb() == 1) {
        movieService.processMovieReleases();
    }

    //
    // Lookup music if enabled
    //
    if (site.getLookupMusic() == 1) {
        musicService.processMusicReleases();
    }

    //
    // Lookup games if enabled
    //
    if (site.getLookupGames() == 1) {
        gameService.processConsoleReleases();
    }

    //
    // Check for passworded releases
    //
    if (site.getCheckPasswordedRar() != 1) {
        _log.info("Site config (site.checkpasswordedrar) prevented checking releases are passworded");
    } else {
        processPasswordedReleases(true);
    }

    //
    // Process all TV related releases which will assign their series/episode/rage data
    //
    tvRageService.processTvReleases(site.getLookupTvRage() == 1);

    //
    // Get the current datetime again, as using now() in the housekeeping queries prevents the index being used.
    //
    DateTime now = new DateTime();

    //
    // Tidy away any binaries which have been attempted to be grouped into
    // a release more than x times (SM: or is it days?)
    //
    int attemtpGroupBinDays = site.getAttemtpGroupBinDays();
    _log.info(String.format("Tidying away binaries which cant be grouped after %s days", attemtpGroupBinDays));

    DateTime maxGroupBinDays = now.minusDays(attemtpGroupBinDays);
    binaryDAO.updateProcStatByProcStatAndDate(Defaults.PROCSTAT_WRONGPARTS, Defaults.PROCSTAT_NEW,
            maxGroupBinDays.toDate());

    //
    // Delete any parts and binaries which are older than the site's retention days
    //
    int maxRetentionDays = site.getRawRetentionDays();
    DateTime maxRetentionDate = now.minusDays(maxRetentionDays);
    _log.info(String.format("Deleting parts which are older than %d days", maxRetentionDays));
    partDAO.deletePartByDate(maxRetentionDate.toDate());

    _log.info(String.format("Deleting binaries which are older than %d days", maxRetentionDays));
    binaryDAO.deleteBinaryByDate(maxRetentionDate.toDate());

    //
    // Delete any releases which are older than site's release retention days
    //
    int releaseretentiondays = site.getReleaseRetentionDays();
    if (releaseretentiondays != 0) {
        _log.info("Determining any releases past retention to be deleted.");

        DateTime maxReleaseRetentionDate = DateTime.now().minusDays(releaseretentiondays);
        List<Release> releasesToDelete = releaseDAO.findReleasesBeforeDate(maxReleaseRetentionDate.toDate());
        for (Iterator<Release> iterator = releasesToDelete.iterator(); iterator.hasNext();) {
            Release release = iterator.next();
            releaseDAO.deleteRelease(release);
        }
    }
    transaction.flush(); // may be unneeded
    transactionManager.commit(transaction);

    _log.info(String.format("Processed %d releases", retcount));
    if (!transaction.isCompleted()) {
        throw new IllegalStateException("Transaction is not completed or rolled back.");
    }
    //return retcount;
}

From source file:hoot.services.controllers.osm.ChangesetResource.java

/**
 * Service method endpoint for uploading OSM changeset diff data
 * /*from   ww w  .j  a  va 2s  . c  om*/
 * @param changeset OSM changeset diff data
 * @param changesetId ID of the changeset being uploaded; changeset with the ID must already exist
 * @return response acknowledging the result of the update operation with updated entity ID 
 * information
 * @throws Exception
 * @see http://wiki.openstreetmap.org/wiki/API_0.6 and 
 * http://wiki.openstreetmap.org/wiki/OsmChange
 * @todo why can't I pass in changesetDiff as an XML doc instead of a string?
 */
@POST
@Path("/{changesetId}/upload")
@Consumes(MediaType.TEXT_XML)
@Produces(MediaType.TEXT_XML)
public Response upload(final String changeset, @PathParam("changesetId") final long changesetId,
        @QueryParam("mapId") final String mapId) throws Exception {
    Connection conn = DbUtils.createConnection();
    Document changesetUploadResponse = null;
    try {
        log.debug("Intializing database connection...");

        log.debug("Intializing changeset upload transaction...");
        TransactionStatus transactionStatus = transactionManager
                .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
        conn.setAutoCommit(false);

        try {
            if (mapId == null) {
                throw new Exception("Invalid map id.");
            }
            long mapid = Long.parseLong(mapId);
            changesetUploadResponse = (new ChangesetDbWriter(conn)).write(mapid, changesetId, changeset);
        } catch (Exception e) {
            log.error("Rolling back transaction for changeset upload...");
            transactionManager.rollback(transactionStatus);
            conn.rollback();
            handleError(e, changesetId, StringUtils.abbreviate(changeset, 100));
        }

        log.debug("Committing changeset upload transaction...");
        transactionManager.commit(transactionStatus);
        conn.commit();
    } finally {
        conn.setAutoCommit(true);
        DbUtils.closeConnection(conn);
    }

    log.debug("Returning changeset upload response: "
            + StringUtils.abbreviate(XmlDocumentBuilder.toString(changesetUploadResponse), 100) + " ...");
    return Response.ok(new DOMSource(changesetUploadResponse), MediaType.TEXT_XML)
            .header("Content-type", MediaType.TEXT_XML).build();
}

From source file:edu.ur.file.db.service.DefaultFileServerServiceTest.java

/**
 * Test creating a folder./*ww  w.j a  v a  2  s .  c o  m*/
 * 
 * - Make sure a folder location can be created.
 * @throws LocationAlreadyExistsException 
 * 
 */
@Test
public void createFolderTest() throws LocationAlreadyExistsException {
    // Start the transaction this is for lazy loading
    TransactionDefinition td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    TransactionStatus ts = tm.getTransaction(td);

    DefaultFileServer fs = dfss.createFileServer("service_file_server");

    DefaultFileDatabase fileDatabase = fs.createFileDatabase("nates file server", "file_server_1",
            properties.getProperty("defaultFileServerService.server_path"), "description");

    TreeFolderInfo treeFolderInfo = fileDatabase.createRootFolder("Nates Folder", "folder_1");
    assert fileDatabase
            .setCurrentFileStore(treeFolderInfo.getName()) : "Should be able to set current file store";

    dfss.saveFileServer(fs);
    //commit the transaction this will assigning an id to the 
    //file database and folder information
    tm.commit(ts);

    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);

    FolderInfo info = dfss.createFolder(fileDatabase, "a_folder_name");
    Long id = info.getId();

    URI uri = info.getUri();

    File f = new File(uri.getPath());

    assert f.exists() : "File " + f.getAbsolutePath() + " does not exist!";
    tm.commit(ts);

    //make sure the folder exists.
    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);

    info = dfss.getFolder(id, false);

    assert info != null : "folder info fo id " + id + " could not be found";

    dfss.deleteFolder(info);
    tm.commit(ts);

    //make sure the folder has been deleted.
    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);
    info = dfss.getFolder(id, false);
    assert info == null : "folder info fo id " + id + " was found and SHOULD NOT be";
    tm.commit(ts);

    dfss.deleteFileServer(fs.getName());

}

From source file:org.fcrepo.camel.FcrepoTransactionManagerTest.java

@Test(expected = TransactionSystemException.class)
public void testTransactionRollbackError() throws FcrepoOperationFailedException {
    final String baseUrl = "http://localhost:8080/rest";
    final String tx = "tx:1234567890";
    final URI rollbackUri = URI.create(baseUrl + "/" + tx + FcrepoConstants.ROLLBACK);
    final URI beginUri = URI.create(baseUrl + FcrepoConstants.TRANSACTION);
    final FcrepoTransactionManager txMgr = new FcrepoTransactionManager();
    txMgr.setBaseUrl(baseUrl);/*from w  w w . java 2 s.c  o m*/
    TestUtils.setField(txMgr, "fcrepoClient", mockClient);

    final TransactionTemplate transactionTemplate = new TransactionTemplate(txMgr);
    final DefaultTransactionDefinition txDef = new DefaultTransactionDefinition(
            TransactionDefinition.PROPAGATION_REQUIRED);

    transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRED);
    transactionTemplate.afterPropertiesSet();

    when(mockClient.post(eq(beginUri))).thenReturn(mockPostBuilder);
    when(mockClient.post(eq(rollbackUri))).thenReturn(mockPostBuilder2);
    when(mockPostBuilder.perform()).thenReturn(new FcrepoResponse(beginUri, 201,
            singletonMap("Location", singletonList(baseUrl + "/" + tx)), null));
    when(mockPostBuilder2.perform())
            .thenThrow(new FcrepoOperationFailedException(rollbackUri, 400, "Bad Request"));

    DefaultTransactionStatus status = (DefaultTransactionStatus) txMgr.getTransaction(txDef);

    final FcrepoTransactionObject txObj = (FcrepoTransactionObject) status.getTransaction();
    assertEquals(tx, txObj.getSessionId());
    assertFalse(status.isCompleted());

    status = (DefaultTransactionStatus) txMgr.getTransaction(txDef);
    txMgr.rollback(status);
}

From source file:edu.ur.file.db.service.DefaultFileServerServiceTest.java

/**
 * Test MaxFilesFileStorage Strategy /*from   ww  w.jav  a 2  s. com*/
 * 
 * - Make sure a new root folder is created when the max number of files
 * is reached.
 * @throws LocationAlreadyExistsException 
 * @throws IllegalFileSystemNameException 
 * 
 */
@Test
public void maxFilesStoreStrategy() throws LocationAlreadyExistsException, IllegalFileSystemNameException {
    // Start the transaction this is for lazy loading
    TransactionDefinition td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    TransactionStatus ts = tm.getTransaction(td);

    DefaultFileServer fs = dfss.createFileServer("service_file_server");

    // set the store strategy to 1 and set it in the service
    maxFileStoreStrategy.setMaxNumberOfFilesPerFolder(1l);
    dfss.setDefaultDatabaseFileStoreStrategy(maxFileStoreStrategy);

    DefaultFileDatabase fileDatabase = fs.createFileDatabase("nates file server", "file_server_1",
            properties.getProperty("defaultFileServerService.server_path"), "description");

    TreeFolderInfo treeFolderInfo = fileDatabase.createRootFolder("Nates Folder", "folder_1");
    assert fileDatabase
            .setCurrentFileStore(treeFolderInfo.getName()) : "Should be able to set current file store";

    dfss.saveFileServer(fs);

    //commit the transaction this will assing an id to the 
    //file database and folder information
    tm.commit(ts);

    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);
    // create the first file to store in the temporary folder
    String tempDirectory = properties.getProperty("file_db_temp_directory");
    File directory = new File(tempDirectory);

    // helper to create the file
    FileUtil testUtil = new FileUtil();
    testUtil.createDirectory(directory);

    File f1 = testUtil.creatFile(directory, "basicFile1", "Hello  -  This file is for equals 1");
    File f2 = testUtil.creatFile(directory, "basicFile2", "Hello  -  This file is for equals 2");

    fileDatabase = dfss.getDatabaseById(fileDatabase.getId(), false);
    assert fileDatabase.getFullPath() != null : "Path for file database is null " + fileDatabase.getFullPath();
    TreeFolderInfo currentDefaultFolder1 = fileDatabase.getCurrentFileStore();
    assert currentDefaultFolder1.getFileDatabase() != null : "file databas is null for "
            + currentDefaultFolder1.getFileDatabase();
    assert currentDefaultFolder1.getFullPath() != null : "should be able to get path but couldn't";

    DefaultFileInfo info1 = dfss.addFile(fileDatabase, f1, "file_service_file", "txt");
    TreeFolderInfo currentDefaultFolder2 = dfss.getDatabaseById(fileDatabase.getId(), false)
            .getCurrentFileStore();
    DefaultFileInfo info2 = dfss.addFile(fileDatabase, f2, "file_service_file", "txt");
    assert !currentDefaultFolder1.equals(currentDefaultFolder2) : "Folders should NOT be the same but are "
            + " current folder 1 = " + currentDefaultFolder1 + " current folder 2 = " + currentDefaultFolder2;

    // commit the transaction
    tm.commit(ts);

    //begin a new transaction
    td = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED);
    ts = tm.getTransaction(td);

    DefaultFileInfo myInfo = (DefaultFileInfo) dfss.getFileById(info1.getId(), false);
    DefaultFileInfo myInfo2 = (DefaultFileInfo) dfss.getFileById(info2.getId(), false);

    assert myInfo != null : "Was not able to find " + info1;
    assert myInfo2 != null : "Was not able to find " + info2;

    tm.commit(ts);

    dfss.deleteFileServer(fs.getName());
    assert dfss.getFileServer(fs.getName()) == null : "Should not find the file server";
}

From source file:hoot.services.controllers.job.ReviewResource.java

/**
 * <NAME>Conflated Data Review Service Mark Items as Reviewed</NAME>
 * <DESCRIPTION>//from   w w w .  j a  va 2  s.  c  o  m
 *    After editing reviewable items, this method is called to mark the items as having been reviewed.
 * The inputs to the service method are either a JSON structure which describes the status of the review for
 * each reviewed item, or when setting a boolean true to mark all items as reviewed the structure is not required.
 * Also optionally for convenience sake, a OSM XML changeset may be included in the request to upload a
 * changeset in the same call which marks data as reviewed. If a changeset is uploaded, the service
 * automatically creates and closes the changeset that stores the uploaded data. The response from the server contains
 * the outcome of the changeset upload and the changeset ID created (if a changeset was uploaded) as well as the number
 * of submitted items that were actually marked as reviewed. Clients can compare this number to the number of
 *  items sent for review, and if they are not equal, further troubleshooting should occur.
 *  For each item in the reviewed items JSON, the service will:
 *  mark the reviewable item as reviewed, so that it will not be returned for review again
 *  append the UUID of the item the reviewed item was reviewed against to its "uuid" OSM tag
 *   remove the UUID of the item reviewed against from the "hoot:review:uuid" OSM tag of the reviewed item
 *   remove all OSM review tags from the reviewed item, if it no longer contains items to be reviewed against
 *   The caller is responsible for doing the following things, as the service will not automatically do them:
 *   Adding the "changeset" XML attribute to the elements in the XML changeset being uploaded, as is required by
 *    the standard OSM changeset upload service. The changeset ID attribute value may either be blank or populated with a number,
 *     however, the changeset ID will be overwritten with the ID of the changeset created by the service method execution.
 * </DESCRIPTION>
 * <PARAMETERS>
 *    <mapId>
 *    string; required; ID string or unique name of the map associated with the reviewable conflated data
 *    </mapId>
 *  <markAll>
 *  boolean; optional; defaults to false; indicates whether all items belonging to the map should be marked as reviewed
 *  </markAll>
 *  <markItemsReviewedRequest>
 *   JSON object; sent in request body payload; required; object is made up of:
 *   reviewedItems - JSON object; required if markAll is set to false; optional otherwise; lists the items which should be marked as reviewed
 *   reviewedItemsChangeset - XML string; optional; OSM changeset XML
 *  </markItemsReviewedRequest>
 * </PARAMETERS>
 * <OUTPUT>
 *    A number string to show how many items were marked as reviewed as a result of the service call.
 * </OUTPUT>
 * <EXAMPLE>
 *    <URL>http://localhost:8080/hoot-services/job/review?mapId=1&markAll=false</URL>
 *    <REQUEST_TYPE>PUT</REQUEST_TYPE>
 *    <INPUT>
 * {
 *   "reviewedItems":
 *   [
 *     {
 *       "id": 2402,
 *       "type": "way",
 *       "reviewedAgainstId": 2403,
 *       "reviewedAgainstType": "way",
 *     },
 *     {
 *       "id": 2404,
 *       "type": "way",
 *       "reviewedAgainstId": 2405,
 *       "reviewedAgainstType": "way",
 *     },
 *   ]
 * }
 *   </INPUT>
 * <OUTPUT>
 * 2
 * </OUTPUT>
 * </EXAMPLE>
*
* Marks a set of reviewable items as reviewed and updates the tags of their corresponding OSM
* elements
*
* @param reviewItemsChangeset an OSM changeset to be uploaded into the services database
* @param mapId ID of the map for which items are being marked as reviewed
* @return the number of items marked as reviewed
* @throws Exception
* @see https://insightcloud.digitalglobe.com/redmine/projects/hootenany/wiki/User_-_Conflated_Data_Review_Service_2#Mark-Items-as-Reviewed
*/
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public MarkItemsReviewedResponse markItemsReviewed(MarkItemsReviewedRequest markItemsReviewedRequest,
        @QueryParam("mapId") String mapId, @DefaultValue("false") @QueryParam("markAll") boolean markAll)
        throws Exception {
    Connection conn = DbUtils.createConnection();
    final String errorMessageStart = "marking items as reviewed";
    MarkItemsReviewedResponse markItemsReviewedResponse = null;
    try {
        log.debug("markItemsReviewedRequest: " + markItemsReviewedRequest.toString());

        Map<String, Object> inputParams = new HashMap<String, Object>();
        inputParams.put("mapId", mapId);
        inputParams.put("markAll", markAll);
        ReviewInputParamsValidator inputParamsValidator = new ReviewInputParamsValidator(inputParams);
        mapId = (String) inputParamsValidator.validateAndParseInputParam("mapId", "", null, null, false, null);
        markAll = (Boolean) inputParamsValidator.validateAndParseInputParam("markAll", false, null, null, true,
                false);
        if (!markAll && (markItemsReviewedRequest.getReviewedItems() == null
                || markItemsReviewedRequest.getReviewedItems().getReviewedItems() == null
                || markItemsReviewedRequest.getReviewedItems().getReviewedItems().length == 0)) {
            throw new Exception("Invalid input parameter: markAll set to false and "
                    + "markItemsReviewedRequest.reviewedItems empty.");
        }

        log.debug("Initializing database connection...");

        log.debug("Intializing transaction...");
        TransactionStatus transactionStatus = transactionManager
                .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
        conn.setAutoCommit(false);

        try {
            markItemsReviewedResponse = (new ReviewItemsMarker(conn, mapId))
                    .markItemsReviewed(markItemsReviewedRequest, markAll);
        } catch (Exception e) {
            log.debug("Rolling back database transaction for ReviewResource::markItemsAsReviewed...");
            transactionManager.rollback(transactionStatus);
            conn.rollback();
            throw e;
        }

        log.debug("Committing ReviewResource::markItemsAsReviewed. database transaction...");
        transactionManager.commit(transactionStatus);
        conn.commit();
    } catch (Exception e) {
        ReviewUtils.handleError(e, errorMessageStart, false);
    } finally {
        try {
            conn.setAutoCommit(true);
            DbUtils.closeConnection(conn);
        } catch (Exception e) {
            ReviewUtils.handleError(e, errorMessageStart, false);
        }
    }

    //TODO: MarkItemsReviewedResponse toString() not working
    //    if (markItemsReviewedResponse != null)
    //    {
    //      log.debug("Returning mark items reviewed response: " +
    //        StringUtils.abbreviate(markItemsReviewedResponse.toString(), 100) + " ...");
    //    }

    return markItemsReviewedResponse;
}

From source file:info.jtrac.repository.HibernateJtracDao.java

/**
 * note that this is automatically configured to run on startup
 * as a spring bean "init-method"/*from  w ww .j  av a2 s .  c o  m*/
 */
@PostConstruct
@Transactional(propagation = Propagation.REQUIRED)
public void createSchema() {
    try {
        entityManager.createQuery("from " + Item.class.getName() + " item where item.id = 1", Item.class)
                .getResultList();
        logger.info("database schema exists, normal startup");
    } catch (Exception e) {
        logger.warn("expected database schema does not exist, will create. Error is: " + e.getMessage());
        schemaHelper.createSchema();
        logger.info("inserting default admin user into database");
        TransactionStatus transactionStatus = transactionManager
                .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
        storeUser(createAdminUser());
        transactionManager.commit(transactionStatus);
        logger.info("schema creation complete");
    }
    TransactionStatus transactionStatus = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
    List<SpaceSequence> ssList = entityManager
            .createQuery("FROM " + SpaceSequence.class.getName(), SpaceSequence.class).getResultList();
    Map<Long, SpaceSequence> ssMap = new HashMap<Long, SpaceSequence>(ssList.size());
    for (SpaceSequence ss : ssList) {
        ssMap.put(ss.getId(), ss);
    }
    //      entityManager.flush();
    //      entityManager.createQuery("FROM User").getResultList()
    @SuppressWarnings("unchecked")
    List<Object[]> list = entityManager
            .createQuery("select item.space.id, max(item.sequenceNum) from Item item group by item.space.id")
            .getResultList();
    for (Object[] oa : list) {
        Long spaceId = (Long) oa[0];
        Long maxSeqNum = (Long) oa[1];
        SpaceSequence ss = ssMap.get(spaceId);
        logger.info("checking space sequence id: " + spaceId + ", max: " + maxSeqNum + ", next: "
                + ss.getNextSeqNum());
        if (ss.getNextSeqNum() <= maxSeqNum) {
            logger.warn("fixing sequence number for space id: " + spaceId + ", was: " + ss.getNextSeqNum()
                    + ", should be: " + (maxSeqNum + 1));
            ss.setNextSeqNum(maxSeqNum + 1);
            entityManager.merge(ss);
        }
    }
    transactionManager.commit(transactionStatus);
}

From source file:nl.nn.adapterframework.receivers.PullingListenerContainer.java

public void configure() {
    if (receiver.getNumThreadsPolling() > 0 && receiver.getNumThreadsPolling() < receiver.getNumThreads()) {
        pollToken = new Semaphore(receiver.getNumThreadsPolling());
    }/*w  w  w.  j  av a 2  s  .c  o m*/
    processToken = new Semaphore(receiver.getNumThreads());
    maxThreadCount = receiver.getNumThreads();
    if (receiver.isTransacted()) {
        DefaultTransactionDefinition txDef = new DefaultTransactionDefinition(
                TransactionDefinition.PROPAGATION_REQUIRES_NEW);
        if (receiver.getTransactionTimeout() > 0) {
            txDef.setTimeout(receiver.getTransactionTimeout());
        }
        txNew = txDef;
    }
}