Example usage for javax.persistence FlushModeType COMMIT

List of usage examples for javax.persistence FlushModeType COMMIT

Introduction

In this page you can find the example usage for javax.persistence FlushModeType COMMIT.

Prototype

FlushModeType COMMIT

To view the source code for javax.persistence FlushModeType COMMIT.

Click Source Link

Document

Flushing to occur at transaction commit.

Usage

From source file:org.eurekastreams.server.persistence.DomainGroupMapper.java

/**
 * Look up a group by its short name./*from  ww  w. j a  v a2  s.  c o  m*/
 * 
 * @param groupShortName
 *            the short name of a group to look for
 * @return the DomainGroup corresponding to the provided short name
 */
@SuppressWarnings("unchecked")
public DomainGroup findByShortName(final String groupShortName) {
    Query q = getEntityManager().createQuery("from DomainGroup where shortname = :inName")
            .setParameter("inName", groupShortName.toLowerCase()).setFlushMode(FlushModeType.COMMIT);

    List results = q.getResultList();

    return (results.size() == 0) ? null : (DomainGroup) results.get(0);
}

From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java

@Override
@AggrEventsTransactional//from w  ww  . j a  va 2s  .c om
public EventProcessingResult doCloseAggregations() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }

    final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    cleanUnclosedStatus.setServerName(serverName);
    cleanUnclosedStatus.setLastStart(new DateTime());

    //Determine date of most recently aggregated data
    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.AGGREGATION, false);
    if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) {
        //Nothing has been aggregated, skip unclosed cleanup

        cleanUnclosedStatus.setLastEnd(new DateTime());
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);

        return new EventProcessingResult(0, null, null, true);
    }

    final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate();

    //If lastCleanUnclosedDate is null use the oldest date dimension as there can be 
    //no aggregations that exist before it
    final DateTime lastCleanUnclosedDate;
    if (cleanUnclosedStatus.getLastEventDate() == null) {
        final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension();
        lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime();
    } else {
        lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate();
    }

    if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) {
        logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}",
                lastAggregatedDate);
        return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true);
    }

    //Switch to flush on commit to avoid flushes during queries
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    //Track the number of closed aggregations and the last date of a cleaned interval
    int closedAggregations = 0;
    int cleanedIntervals = 0;
    DateTime cleanUnclosedEnd;

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    try {
        currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate);

        //Local caches used to reduce db io
        final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper();
        final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>();

        //A DateTime within the next interval to close aggregations in
        DateTime nextIntervalDate = lastCleanUnclosedDate;
        do {
            //Reset our goal of catching up to the last aggregated event on every iteration
            cleanUnclosedEnd = lastAggregatedDate;

            //For each interval the aggregator supports, cleanup the unclosed aggregations
            for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) {
                final AggregationIntervalInfo previousInterval = previousIntervals.get(interval);
                if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) {
                    logger.debug(
                            "{} interval before {} has already been cleaned during this execution, ignoring",
                            interval, previousInterval.getEnd());
                    continue;
                }

                //The END date of the last clean session will find us the next interval to clean
                final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval,
                        nextIntervalDate);
                previousIntervals.put(interval, nextIntervalToClean);
                if (nextIntervalToClean == null) {
                    continue;
                }

                final DateTime start = nextIntervalToClean.getStart();
                final DateTime end = nextIntervalToClean.getEnd();
                if (!end.isBefore(lastAggregatedDate)) {
                    logger.debug("{} interval between {} and {} is still active, ignoring",
                            new Object[] { interval, start, end });
                    continue;
                }

                //Track the oldest interval end, this ensures that nothing is missed
                if (end.isBefore(cleanUnclosedEnd)) {
                    cleanUnclosedEnd = end;
                }

                logger.debug("Cleaning unclosed {} aggregations between {} and {}",
                        new Object[] { interval, start, end });

                for (final IPortalEventAggregator<PortalEvent> portalEventAggregator : portalEventAggregators) {
                    checkShutdown();

                    final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass(
                            portalEventAggregator);

                    //Get aggregator specific interval info config
                    final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper
                            .getAggregatorIntervalConfig(aggregatorType);

                    //If the aggregator is being used for the specified interval call cleanUnclosedAggregations
                    if (aggregatorIntervalConfig.isIncluded(interval)) {
                        closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end,
                                interval);
                    }
                }

                cleanedIntervals++;
            }

            //Set the next interval to the end date from the last aggregation run
            nextIntervalDate = cleanUnclosedEnd;

            logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] {
                    closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate });
            //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done
        } while (closedAggregations <= cleanUnclosedAggregationsBatchSize
                && cleanedIntervals <= cleanUnclosedIntervalsBatchSize
                && cleanUnclosedEnd.isBefore(lastAggregatedDate));
    } finally {
        currentThread.setName(currentName);
    }

    //Update the status object and store it
    cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd);
    cleanUnclosedStatus.setLastEnd(new DateTime());
    eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);

    return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate,
            !cleanUnclosedEnd.isBefore(lastAggregatedDate));
}

From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java

private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }/*from  w  w  w .  java2s .  com*/

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
        //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists
        final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
        if (!populatedDimensions) {
            this.logger.warn(
                    "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
            return null;
        }
    }

    //Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.AGGREGATION, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
        this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName);
        final Session session = getEntityManager().unwrap(Session.class);
        final Cache cache = session.getSessionFactory().getCache();
        cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    //Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
        lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

        //No portal events to aggregate, skip aggregation
        if (lastAggregated == null) {
            return new EventProcessingResult(0, null, null, true);
        }

        //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time 
        final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
                .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true);
        AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper
                .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
        cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute()
            .roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
        currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

        logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated,
                newestEventTime);

        //Do aggregation, capturing the start and end dates
        eventAggregatorStatus.setLastStart(DateTime.now());

        complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime,
                this.eventAggregationBatchSize,
                new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

        eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
        eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
        currentThread.setName(currentName);
    }

    //Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete = complete
            && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(events.intValue(), lastAggregated,
            eventAggregatorStatus.getLastEventDate(), complete);
}

From source file:org.rhq.enterprise.server.content.ContentManagerBean.java

@SuppressWarnings("unchecked")
public void mergeDiscoveredPackages(ContentDiscoveryReport report) {
    int resourceId = report.getResourceId();

    // For performance tracking
    long start = System.currentTimeMillis();

    log.debug("Merging [" + report.getDeployedPackages().size() + "] packages for Resource with id ["
            + resourceId + "]...");

    // Load the resource and its installed packages
    Resource resource = entityManager.find(Resource.class, resourceId);
    if (resource == null) {
        log.error("Invalid resource ID specified for merge. Resource ID: " + resourceId);
        return;//from  w w  w .j  a v  a 2s  . c o  m
    }

    // Timestamp to use for all audit trail entries from this report
    long timestamp = System.currentTimeMillis();

    // Before we process the report, get a list of all installed packages on the resource.
    // InstalledPackage objects in this list that are not referenced in the report are to be removed.
    Query currentInstalledPackageQuery = entityManager
            .createNamedQuery(InstalledPackage.QUERY_FIND_BY_RESOURCE_ID);
    currentInstalledPackageQuery.setParameter("resourceId", resource.getId());

    Set<InstalledPackage> doomedPackages = new HashSet<InstalledPackage>(
            currentInstalledPackageQuery.getResultList());

    // The report contains an entire snapshot of packages, so each of these has to be represented
    // as an InstalledPackage
    for (ResourcePackageDetails discoveredPackage : report.getDeployedPackages()) {

        Package generalPackage = null;
        PackageVersion packageVersion = null;

        // Load the overall package (used in a few places later in this loop)
        Query packageQuery = entityManager.createNamedQuery(Package.QUERY_FIND_BY_NAME_PKG_TYPE_RESOURCE_TYPE);
        packageQuery.setFlushMode(FlushModeType.COMMIT);
        packageQuery.setParameter("name", discoveredPackage.getName());
        packageQuery.setParameter("packageTypeName", discoveredPackage.getPackageTypeName());
        packageQuery.setParameter("resourceTypeId", resource.getResourceType().getId());
        List<Package> resultPackages = packageQuery.getResultList();
        if (resultPackages.size() > 0) {
            generalPackage = resultPackages.get(0);
        }

        // If the package exists see if package version already exists
        if (null != generalPackage) {
            Query packageVersionQuery = entityManager
                    .createNamedQuery(PackageVersion.QUERY_FIND_BY_PACKAGE_VERSION);
            packageVersionQuery.setFlushMode(FlushModeType.COMMIT);
            packageVersionQuery.setParameter("packageName", discoveredPackage.getName());
            packageVersionQuery.setParameter("packageTypeName", discoveredPackage.getPackageTypeName());
            packageVersionQuery.setParameter("resourceTypeId", resource.getResourceType().getId());
            packageVersionQuery.setParameter("version", discoveredPackage.getVersion());
            List<PackageVersion> resultPackageVersions = packageVersionQuery.getResultList();
            if (resultPackageVersions.size() > 0) {
                packageVersion = resultPackageVersions.get(0);
            }
        }

        // If we didn't find a package version for this deployed package, we will need to create it
        if (null == packageVersion) {
            if (null == generalPackage) {
                Query packageTypeQuery = entityManager
                        .createNamedQuery(PackageType.QUERY_FIND_BY_RESOURCE_TYPE_ID_AND_NAME);
                packageTypeQuery.setFlushMode(FlushModeType.COMMIT);
                packageTypeQuery.setParameter("typeId", resource.getResourceType().getId());
                packageTypeQuery.setParameter("name", discoveredPackage.getPackageTypeName());

                PackageType packageType = (PackageType) packageTypeQuery.getSingleResult();

                generalPackage = new Package(discoveredPackage.getName(), packageType);
                generalPackage = persistOrMergePackageSafely(generalPackage);
            }

            // Create a new package version and attach to the general package
            Query architectureQuery = entityManager.createNamedQuery(Architecture.QUERY_FIND_BY_NAME);
            architectureQuery.setFlushMode(FlushModeType.COMMIT);
            architectureQuery.setParameter("name", discoveredPackage.getArchitectureName());

            Architecture packageArchitecture;

            // We don't have an architecture enum, so it's very possible the plugin will pass in a crap string here.
            // Catch and log a better error message but continue processing the rest of the report
            // TODO: if arch is "none" we should consider manually switching it to be our standard "noarch"
            try {
                packageArchitecture = (Architecture) architectureQuery.getSingleResult();
            } catch (Exception e) {
                log.warn("Could not load architecture for architecture name ["
                        + discoveredPackage.getArchitectureName() + "] for package ["
                        + discoveredPackage.getName() + "]. Cause: " + ThrowableUtil.getAllMessages(e));
                continue;
            }

            packageVersion = new PackageVersion(generalPackage, discoveredPackage.getVersion(),
                    packageArchitecture);
            packageVersion.setDisplayName(discoveredPackage.getDisplayName());
            packageVersion.setDisplayVersion(discoveredPackage.getDisplayVersion());
            packageVersion.setFileCreatedDate(discoveredPackage.getFileCreatedDate());
            packageVersion.setFileName(discoveredPackage.getFileName());
            packageVersion.setFileSize(discoveredPackage.getFileSize());
            packageVersion.setLicenseName(discoveredPackage.getLicenseName());
            packageVersion.setLicenseVersion(discoveredPackage.getLicenseVersion());
            packageVersion.setLongDescription(discoveredPackage.getLongDescription());
            packageVersion.setMD5(discoveredPackage.getMD5());
            packageVersion.setMetadata(discoveredPackage.getMetadata());
            packageVersion.setSHA256(discoveredPackage.getSHA256());
            packageVersion.setShortDescription(discoveredPackage.getShortDescription());
            packageVersion.setExtraProperties(discoveredPackage.getExtraProperties());

            packageVersion = persistOrMergePackageVersionSafely(packageVersion);
        } // end package version null check
        else {
            // If the package version was already in the system, see if there is an installed package for
            // this package version. If so, we're done processing this package
            Query installedPackageQuery = entityManager
                    .createNamedQuery(InstalledPackage.QUERY_FIND_BY_RESOURCE_AND_PACKAGE_VER);
            installedPackageQuery.setFlushMode(FlushModeType.COMMIT);
            installedPackageQuery.setParameter("resourceId", resource.getId());
            installedPackageQuery.setParameter("packageVersionId", packageVersion.getId());

            List<InstalledPackage> installedPackageList = installedPackageQuery.getResultList();

            if (installedPackageList.size() > 0) {
                if (log.isDebugEnabled()) {
                    log.debug("Discovered package is already known to the inventory "
                            + installedPackageList.iterator().next());
                }

                // This represents a package that was previously installed and still is. We need to remove
                // the reference to this from the doomed packages list so it's not marked as deleted at the end.
                for (InstalledPackage ip : installedPackageList) {
                    doomedPackages.remove(ip);
                }

                continue;
            }
        }

        // At this point, we have the package and package version in the system (now added if they weren't already)
        // We've also punched out early if we already knew about the installed package, so we won't add another
        // reference from the resource to the package nor another audit trail entry saying it was discovered.

        // Create a new installed package entry in the audit
        InstalledPackage newlyInstalledPackage = new InstalledPackage();
        newlyInstalledPackage.setPackageVersion(packageVersion);
        newlyInstalledPackage.setResource(resource);
        newlyInstalledPackage.setInstallationDate(discoveredPackage.getInstallationTimestamp());

        entityManager.persist(newlyInstalledPackage);

        // Create an audit trail entry to show how this package was added to the system
        InstalledPackageHistory history = new InstalledPackageHistory();
        history.setDeploymentConfigurationValues(discoveredPackage.getDeploymentTimeConfiguration());
        history.setPackageVersion(packageVersion);
        history.setResource(resource);
        history.setStatus(InstalledPackageHistoryStatus.DISCOVERED);
        history.setTimestamp(timestamp);

        entityManager.persist(history);

        entityManager.flush();
    } // end resource package loop

    // For any previously active installed packages that were not found again (and thus removed from the doomed
    // list), delete them.
    int deletedPackages = 0;
    for (InstalledPackage doomedPackage : doomedPackages) {
        doomedPackage = entityManager.find(InstalledPackage.class, doomedPackage.getId());

        // Add an audit trail entry to indicate the package was not rediscovered
        InstalledPackageHistory history = new InstalledPackageHistory();
        history.setPackageVersion(doomedPackage.getPackageVersion());
        history.setResource(resource);
        history.setStatus(InstalledPackageHistoryStatus.MISSING);
        history.setTimestamp(timestamp);
        entityManager.persist(history);

        entityManager.remove(doomedPackage);

        // no idea if this helps, but if we are deleting large numbers of packages, it probably does
        if ((++deletedPackages) % 100 == 0) {
            entityManager.flush();
        }
    }

    log.debug("Finished merging [" + report.getDeployedPackages().size() + "] packages in "
            + (System.currentTimeMillis() - start) + "ms");
}

From source file:org.rhq.enterprise.server.content.ContentManagerBean.java

/** Does much of same functionality as createPackageVersion, but uses same named query
 *  as the agent side discovery mechanism, and passes in additional parameters available
 *  when file has been uploaded via the UI.
 */// w  ww . jav a 2s . co m
@SuppressWarnings("unchecked")
public PackageVersion getUploadedPackageVersion(Subject subject, String packageName, int packageTypeId,
        String version, int architectureId, InputStream packageBitStream,
        Map<String, String> packageUploadDetails, Integer repoId) {

    PackageVersion packageVersion = null;

    //default version to 1.0 if is null, not provided for any reason.
    if ((version == null) || (version.trim().length() == 0)) {
        version = "1.0";
    }

    Architecture architecture = entityManager.find(Architecture.class, architectureId);
    PackageType packageType = entityManager.find(PackageType.class, packageTypeId);

    // See if package version already exists for the resource package        
    Query packageVersionQuery = null;

    if (packageType.getResourceType() != null) {
        packageVersionQuery = entityManager
                .createNamedQuery(PackageVersion.QUERY_FIND_BY_PACKAGE_DETAILS_KEY_WITH_NON_NULL_RESOURCE_TYPE);
        packageVersionQuery.setParameter("resourceTypeId", packageType.getResourceType().getId());

    } else {
        packageVersionQuery = entityManager.createNamedQuery(PackageVersion.QUERY_FIND_BY_PACKAGE_DETAILS_KEY);
        packageVersionQuery.setParameter("resourceType", null);
    }

    packageVersionQuery.setFlushMode(FlushModeType.COMMIT);
    packageVersionQuery.setParameter("packageName", packageName);

    packageVersionQuery.setParameter("packageTypeName", packageType.getName());

    packageVersionQuery.setParameter("architectureName", architecture.getName());
    packageVersionQuery.setParameter("version", version);

    // Result of the query should be either 0 or 1
    List<PackageVersion> existingPackageVersionList = packageVersionQuery.getResultList();

    if (existingPackageVersionList.size() > 0) {
        packageVersion = existingPackageVersionList.get(0);
    }

    try {
        PackageTypeBehavior behavior = ContentManagerHelper.getPackageTypeBehavior(packageTypeId);

        if (behavior != null) {
            String packageTypeName = packageType.getName();
            String archName = architecture.getName();
            ValidatablePackageDetailsKey key = new ValidatablePackageDetailsKey(packageName, version,
                    packageTypeName, archName);
            behavior.validateDetails(key, subject);

            //update the details from the validation results
            packageName = key.getName();
            version = key.getVersion();

            if (!architecture.getName().equals(key.getArchitectureName())) {
                Query q = entityManager.createNamedQuery(Architecture.QUERY_FIND_BY_NAME);
                q.setParameter("name", key.getArchitectureName());
                architecture = (Architecture) q.getSingleResult();
            }
        }
    } catch (PackageDetailsValidationException e) {
        throw e;
    } catch (Exception e) {
        log.error("Failed to get the package type plugin container. This is a bug.", e);
        throw new IllegalStateException("Failed to get the package type plugin container.", e);
    }

    Package existingPackage = null;

    Query packageQuery = entityManager.createNamedQuery(Package.QUERY_FIND_BY_NAME_PKG_TYPE_ID);
    packageQuery.setParameter("name", packageName);
    packageQuery.setParameter("packageTypeId", packageTypeId);
    List<Package> existingPackageList = packageQuery.getResultList();

    if (existingPackageList.size() == 0) {
        // If the package doesn't exist, create that here
        existingPackage = new Package(packageName, packageType);
        existingPackage = persistOrMergePackageSafely(existingPackage);
    } else {
        existingPackage = existingPackageList.get(0);
    }

    //initialize package version if not already
    if (packageVersion == null) {
        packageVersion = new PackageVersion(existingPackage, version, architecture);
        packageVersion.setDisplayName(existingPackage.getName());
        entityManager.persist(packageVersion);
    }

    //get the data
    Map<String, String> contentDetails = new HashMap<String, String>();
    PackageBits bits = loadPackageBits(packageBitStream, packageVersion.getId(), packageName, version, null,
            contentDetails);

    packageVersion.setPackageBits(bits);

    packageVersion.setFileSize(Long.valueOf(contentDetails.get(UPLOAD_FILE_SIZE)).longValue());
    packageVersion.setSHA256(contentDetails.get(UPLOAD_SHA256));

    //populate extra details, persist
    if (packageUploadDetails != null) {
        packageVersion.setFileCreatedDate(
                Long.valueOf(packageUploadDetails.get(ContentManagerLocal.UPLOAD_FILE_INSTALL_DATE)));
        packageVersion.setFileName(packageUploadDetails.get(ContentManagerLocal.UPLOAD_FILE_NAME));
        packageVersion.setMD5(packageUploadDetails.get(ContentManagerLocal.UPLOAD_MD5));
        packageVersion.setDisplayVersion(packageUploadDetails.get(ContentManagerLocal.UPLOAD_DISPLAY_VERSION));
    }

    entityManager.merge(packageVersion);

    if (repoId != null) {
        int[] packageVersionIds = new int[] { packageVersion.getId() };
        repoManager.addPackageVersionsToRepo(subject, repoId, packageVersionIds);
    }

    entityManager.flush();

    return packageVersion;

}

From source file:org.rhq.enterprise.server.measurement.AvailabilityManagerBean.java

@SuppressWarnings("unchecked")
public boolean mergeAvailabilityReport(AvailabilityReport report) {
    int reportSize = report.getResourceAvailability().size();
    String agentName = report.getAgentName();
    StopWatch watch = new StopWatch();

    if (reportSize == 0) {
        log.error("Agent [" + agentName
                + "] sent an empty availability report.  This is a bug, please report it");
        return true; // even though this report is bogus, do not ask for an immediate full report to avoid unusual infinite recursion due to this error condition
    }/*www  .  j  a  v a2 s. c o  m*/

    if (log.isDebugEnabled()) {
        if (reportSize > 1) {
            log.debug("Agent [" + agentName + "]: processing availability report of size: " + reportSize);
        }
    }

    // translate data into Availability objects for downstream processing
    List<Availability> availabilities = new ArrayList<Availability>(report.getResourceAvailability().size());
    for (AvailabilityReport.Datum datum : report.getResourceAvailability()) {
        availabilities.add(new Availability(new Resource(datum.getResourceId()), datum.getStartTime(),
                datum.getAvailabilityType()));
    }

    // We will alert only on the avails for enabled resources. Keep track of any that are disabled. 
    List<Availability> disabledAvailabilities = new ArrayList<Availability>();

    boolean askForFullReport = false;
    Integer agentToUpdate = agentManager.getAgentIdByName(agentName);

    // if this report is from an agent update the lastAvailreport time
    if (!report.isEnablementReport() && agentToUpdate != null) {
        // do this now, before we might clear() the entity manager
        availabilityManager.updateLastAvailabilityReport(agentToUpdate.intValue());
    }

    int numInserted = 0;

    // if this report is from an agent, and is a changes-only report, and the agent appears backfilled,
    // then we need to skip this report so as not to waste our time> Then, immediately request and process
    // a full report because, obviously, the agent is no longer down but the server thinks
    // it still is down - we need to know the availabilities for all the resources on that agent
    if (!report.isEnablementReport() && report.isChangesOnlyReport()
            && agentManager.isAgentBackfilled(agentToUpdate.intValue())) {
        askForFullReport = true;

    } else {
        Query q = entityManager.createNamedQuery(Availability.FIND_CURRENT_BY_RESOURCE);
        q.setFlushMode(FlushModeType.COMMIT);

        int count = 0;
        for (Availability reported : availabilities) {
            if ((++count % 100) == 0) {
                entityManager.flush();
                entityManager.clear();
            }

            // availability reports only tell us the current state at the start time; end time is ignored/must be null
            reported.setEndTime(null);

            try {
                q.setParameter("resourceId", reported.getResource().getId());
                Availability latest = (Availability) q.getSingleResult();
                AvailabilityType latestType = latest.getAvailabilityType();
                AvailabilityType reportedType = reported.getAvailabilityType();

                // If the current avail is DISABLED, and this report is not trying to re-enable the resource,
                // Then ignore the reported avail.
                if (AvailabilityType.DISABLED == latestType) {
                    if (!(report.isEnablementReport() && (AvailabilityType.UNKNOWN == reportedType))) {
                        disabledAvailabilities.add(reported);
                        continue;
                    }
                }

                if (reported.getStartTime() >= latest.getStartTime()) {
                    //log.info( "new avail (latest/reported)-->" + latest + "/" + reported );

                    // the new availability data is for a time after our last known state change
                    // we are runlength encoded, so only persist data if the availability changed                        
                    if (latest.getAvailabilityType() != reported.getAvailabilityType()) {
                        entityManager.persist(reported);
                        numInserted++;

                        latest.setEndTime(reported.getStartTime());
                        latest = entityManager.merge(latest);

                        updateResourceAvailability(reported);
                    }

                    // our last known state was unknown, ask for a full report to ensure we are in sync with agent
                    if (latest.getAvailabilityType() == AvailabilityType.UNKNOWN) {
                        askForFullReport = true;
                    }
                } else {
                    //log.info( "past avail (latest/reported)==>" + latest + "/" + reported );

                    // The new data is for a time in the past, probably an agent sending a report after
                    // a network outage has been corrected but after we have already backfilled.
                    // We need to insert it into our past timeline.
                    insertAvailability(reported);
                    numInserted++;

                    // this is an unusual report - ask the agent for a full report so as to ensure we are in sync with agent
                    askForFullReport = true;
                }
            } catch (NoResultException nre) {
                // This condition should never happen. An initial, unknown, Availability/ResourceAvailability
                // are created at resource persist time. But, just in case, handle it...
                log.warn("Resource [" + reported.getResource() + "] has no availability without an endtime ["
                        + nre.getMessage() + "] - will attempt to create one\n" + report.toString(false));

                entityManager.persist(reported);
                updateResourceAvailability(reported);
                numInserted++;

            } catch (NonUniqueResultException nure) {
                // This condition should never happen.  In my world of la-la land, I've done everything
                // correctly so this never happens.  But, due to the asynchronous nature of things,
                // I have to believe that this still might happen (albeit rarely).  If it does happen,
                // and we do nothing about it - bad things arise.  So, if we find that a resource
                // has 2 or more availabilities with endTime of null, we need to delete all but the
                // latest one (the one whose start time is the latest).  This should correct the
                // problem and allow us to continue processing availability reports for that resource
                log.warn("Resource [" + reported.getResource()
                        + "] has multiple availabilities without an endtime [" + nure.getMessage()
                        + "] - will attempt to remove the extra ones\n" + report.toString(false));

                q.setParameter("resourceId", reported.getResource().getId());
                List<Availability> latest = q.getResultList();

                // delete all but the last one (our query sorts in ASC start time order)
                int latestCount = latest.size();
                for (int i = 0; i < (latestCount - 1); i++) {
                    entityManager.remove(latest.get(i));
                }
                updateResourceAvailability(latest.get(latestCount - 1));

                // this is an unusual report - ask the agent for a full report so as to ensure we are in sync with agent
                askForFullReport = true;
            }
        }

        MeasurementMonitor.getMBean().incrementAvailabilityReports(report.isChangesOnlyReport());
        MeasurementMonitor.getMBean().incrementAvailabilitiesInserted(numInserted);
        MeasurementMonitor.getMBean().incrementAvailabilityInsertTime(watch.getElapsed());
        watch.reset();
    }

    // notify alert condition cache manager for all reported avails for for enabled resources
    availabilities.removeAll(disabledAvailabilities);
    notifyAlertConditionCacheManager("mergeAvailabilityReport",
            availabilities.toArray(new Availability[availabilities.size()]));

    if (!report.isEnablementReport()) {
        // a single report comes from a single agent - update the agent's last availability report timestamp
        if (agentToUpdate != null) {
            // don't bother asking for a full report if the one we are currently processing is already full
            if (askForFullReport && report.isChangesOnlyReport()) {
                log.debug("The server is unsure that it has up-to-date availabilities for agent [" + agentName
                        + "]; asking for a full report to be sent");
                return false;
            }
        } else {
            log.error("Could not figure out which agent sent availability report. "
                    + "This error is harmless and should stop appearing after a short while if the platform of the agent ["
                    + agentName + "] was recently removed. In any other case this is a bug." + report);
        }
    }

    return true; // everything is OK and things look to be in sync
}

From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java

/**
 * Return a map of &lt;resource id, List&lt;MetricDisplaySummary&gt;&gt;, where the list contains the
 * {@link MetricDisplaySummary} for the (enabled) schedules of the resource
 *
 * @param subject        Subject of the caller
 * @param resourceTypeId ResourceTypeId of the child resources
 * @param parentId       ID of the common parent resource
 * @param resourceIds    List of primary keys of the resources we are interested in
 * @param begin          begin time/*from  w ww.j ava 2  s .  com*/
 * @param end            end time
 */
@SuppressWarnings("unchecked")
public Map<Integer, List<MetricDisplaySummary>> findNarrowedMetricDisplaySummariesForResourcesAndParent(
        Subject subject, int resourceTypeId, int parentId, List<Integer> resourceIds, long begin, long end) {
    Map<Integer, List<MetricDisplaySummary>> sumMap = new HashMap<Integer, List<MetricDisplaySummary>>();
    if ((parentId <= 0) || (resourceIds == null) || (resourceIds.isEmpty()) || (end < begin)) {
        return sumMap;
    }

    /*
     * Get the schedule(ids) for the passed resources and types and stuff them in a MapMap to easier access them
     * afterwards.
     */
    Query q = entityManager
            .createNamedQuery(MeasurementSchedule.FIND_ENABLED_BY_RESOURCE_IDS_AND_RESOURCE_TYPE_ID);
    q.setFlushMode(FlushModeType.COMMIT);
    q.setParameter("resourceTypeId", resourceTypeId);
    q.setParameter("resourceIds", resourceIds);

    // <schedId, resId, defId>
    List<Object[]> triples = q.getResultList();

    Map<Integer, Map<Integer, Integer>> resDefSchedMap = new HashMap<Integer, Map<Integer, Integer>>();

    List<Integer> scheduleIds = new ArrayList<Integer>(triples.size());
    for (Object[] triple : triples) {
        int sid = (Integer) triple[0];
        scheduleIds.add(sid);
        int res = (Integer) triple[1];
        int def = (Integer) triple[2];
        Map<Integer, Integer> defSchedMap;
        if (!resDefSchedMap.containsKey(res)) {
            defSchedMap = new HashMap<Integer, Integer>();
            resDefSchedMap.put(res, defSchedMap);
        } else {
            defSchedMap = resDefSchedMap.get(res);
        }

        defSchedMap.put(def, sid);
    }

    Map<Integer, Integer> alerts = alertManager.getAlertCountForSchedules(begin, end, scheduleIds);

    List<MeasurementDefinition> definitions = measurementDefinitionManager
            .findMeasurementDefinitionsByResourceType(subject, resourceTypeId, DataType.MEASUREMENT, null);
    Map<Integer, MeasurementDefinition> defMap = new HashMap<Integer, MeasurementDefinition>(
            definitions.size());
    for (MeasurementDefinition def : definitions) {
        defMap.put(def.getId(), def);
    }

    /*
     * Now that we have the data, loop over the data and fill in the MetricDisplaySummaries.
     */
    for (int resourceId : resourceIds) {
        List<MetricDisplaySummary> summaries = new ArrayList<MetricDisplaySummary>();
        if (resDefSchedMap.containsKey(resourceId)) {
            Map<Integer, Integer> defSchedMap = resDefSchedMap.get(resourceId);
            for (int defId : defSchedMap.keySet()) {
                if (defMap.get(defId) == null) {
                    // This is not a DataType.MEASUREMENT type measurement
                    continue;
                }

                int sid = defSchedMap.get(defId);
                MetricDisplaySummary mds = new MetricDisplaySummary();
                mds.setAlertCount(alerts.get(sid));
                mds.setBeginTimeFrame(begin);
                mds.setEndTimeFrame(end);
                mds.setDefinitionId(defId);
                mds.setMetricName(defMap.get(defId).getName());
                mds.setLabel(defMap.get(defId).getDisplayName());
                mds.setParentId(parentId);
                mds.setChildTypeId(resourceTypeId);
                summaries.add(mds);
            }
        }

        sumMap.put(resourceId, summaries);
    }

    return sumMap;
}

From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java

/**
 * Get the {@link MetricDisplaySummary}s for the resources passed in, that all need to be of the same
 * {@link ResourceType}. Summaries only contain a basic selection of fields for the purpose of filling the Child
 * resource popups.// ww  w .  j a  v a  2s . c o m
 */
@SuppressWarnings("unchecked")
public Map<Integer, List<MetricDisplaySummary>> findNarrowedMetricDisplaySummariesForCompatibleResources(
        Subject subject, Collection<Resource> resources, long beginTime, long endTime) {
    Map<Integer, List<MetricDisplaySummary>> resMap = new HashMap<Integer, List<MetricDisplaySummary>>();

    if ((resources == null) || (resources.isEmpty())) {
        return resMap;
    }

    /*
     * Get the resource type and make sure all resources are of the same type
     */
    Iterator<Resource> it = resources.iterator();
    ResourceType type = it.next().getResourceType();
    boolean found = false;
    while (it.hasNext()) {
        ResourceType tmp = it.next().getResourceType();
        if (tmp != type) {
            found = true;
            break;
        }
    }

    if (found) {
        throw new IllegalArgumentException("Resources were of different type: " + resources);
    }

    Set<MeasurementDefinition> defs = type.getMetricDefinitions();

    // get all schedules that are collecting (=enabled)
    Query q = entityManager.createNamedQuery(MeasurementSchedule.FIND_ENABLED_BY_RESOURCES_AND_RESOURCE_TYPE);
    q.setFlushMode(FlushModeType.COMMIT);
    q.setParameter("resourceType", type);
    q.setParameter("resources", resources);
    q.setParameter("dataType", DataType.MEASUREMENT);

    // <schedId, resId, defId>
    List<Object[]> schedules = q.getResultList();

    Map<Integer, Map<Integer, Integer>> resDefSchedMap = new HashMap<Integer, Map<Integer, Integer>>();

    List<Integer> scheduleIds = new ArrayList<Integer>(schedules.size());
    for (Object[] sched : schedules) {
        int sid = (Integer) sched[0];
        scheduleIds.add(sid);
        int res = (Integer) sched[1];
        int def = (Integer) sched[2];
        Map<Integer, Integer> defSchedMap;
        if (!resDefSchedMap.containsKey(res)) {
            defSchedMap = new HashMap<Integer, Integer>();
            resDefSchedMap.put(res, defSchedMap);
        } else {
            defSchedMap = resDefSchedMap.get(res);
        }

        defSchedMap.put(def, sid);
    }

    Map<Integer, Integer> alerts = alertManager.getAlertCountForSchedules(beginTime, endTime, scheduleIds);

    /*
     * Loop over the resources and populate the map with the schedules for the definitions we have There won't be a
     * schedule for each combination, as the list above only contains schedules that are actually collecting. Also
     * if the schedule is not collecting, we don't need to add it to the result.
     */
    for (Resource res : resources) {
        List<MetricDisplaySummary> summaries = new ArrayList<MetricDisplaySummary>();
        for (MeasurementDefinition def : defs) {
            MetricDisplaySummary sum = new MetricDisplaySummary();
            sum.setDefinitionId(def.getId());
            sum.setMetricName(def.getName());
            sum.setLabel(def.getDisplayName());
            sum.setBeginTimeFrame(beginTime);
            sum.setEndTimeFrame(endTime);

            int resId = res.getId();
            if (resDefSchedMap.containsKey(resId)) {
                Map<Integer, Integer> defSched = resDefSchedMap.get(resId);
                if (defSched.containsKey(def.getId())) {
                    int sid = defSched.get(def.getId());
                    sum.setScheduleId(sid);
                    sum.setAlertCount(alerts.get(sid));
                    summaries.add(sum);
                }
            }
        }

        resMap.put(res.getId(), summaries);
    }

    return resMap;
}

From source file:uk.ac.ebi.intact.dataexchange.cvutils.CvUpdater.java

/**
 * Starts the creation and update of CVs by using the ontology provided
 *
 * @param allCvs List of all Cvs/*from w  ww .  ja va2 s  .  co m*/
 * @return An object containing some statistics about the update
 */
@Transactional
@IntactFlushMode(FlushModeType.COMMIT)
public CvUpdaterStatistics createOrUpdateCVs(List<CvDagObject> allCvs) {
    return createOrUpdateCVs(allCvs, new AnnotationInfoDataset());
}

From source file:uk.ac.ebi.intact.dataexchange.cvutils.CvUpdater.java

/**
* Starts the creation and update of CVs by using the latest available CVs from internet
*
* @return An object containing some statistics about the update
*//* w  ww.j  a v  a  2  s  .  c  o  m*/
@Transactional
@IntactFlushMode(FlushModeType.COMMIT)
public CvUpdaterStatistics executeUpdateWithLatestCVs() throws IOException {
    OBOSession oboSession = null;
    try {
        oboSession = OboUtils.createOBOSessionFromLatestMi();
    } catch (OBOParseException e) {
        throw new IOException("Problem creating OBO session from latest MI: " + e.getMessage());
    }

    AnnotationInfoDataset annotationInfoDataset = OboUtils.createAnnotationInfoDatasetFromLatestResource();
    return executeUpdate(oboSession, annotationInfoDataset);
}