Example usage for org.hibernate Query getQueryString

List of usage examples for org.hibernate Query getQueryString

Introduction

In this page you can find the example usage for org.hibernate Query getQueryString.

Prototype

String getQueryString();

Source Link

Document

Get the query string.

Usage

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

public Collection<ConsentStatusField> getSelectedConsentStatusFieldsForSearch(Search search) {
    String queryString = "select csfs.consentStatusField" + " from ConsentStatusFieldSearch csfs "
            + " where csfs.search=:search " + " order by csfs.consentStatusField.entity ";
    Query query = getSession().createQuery(queryString);
    query.setParameter("search", search);
    log.info("QueryString: " + query.getQueryString());
    log.info("getselectedconsentstatusfieldforsearch results:");
    for (ConsentStatusField c : (List<ConsentStatusField>) query.list()) {
        log.info(c.getPublicFieldName());
    }//w  w  w  . j  av  a  2  s.co m
    return query.list();
}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

private Collection<String> getBioCollectionUIDsNotMatchingTheseBioCollectionIdsOrSubjectIds(Study study,
        Collection<String> bioCollectionUIDs, List<Long> bioCollectionIds, List<Long> subjectIds,
        List<Long> biospecimenIds, List<QueryFilter> biospecimenFilters) {

    Query query = null;
    //      Query query2 = null;
    //if there is nothing to start with get out of here.
    if (bioCollectionUIDs.isEmpty()) {
        return new ArrayList<String>();
    }// w  ww.  j av a2s .c o m

    //if there is nothing to reduce the list by...return original list.
    if ((bioCollectionIds.isEmpty() && subjectIds.isEmpty())) {
        return bioCollectionUIDs;
    } else {
        List<Long> subjectIdsNew = new ArrayList<Long>();
        //add a dummy value=0 to get rid of ".QuerySyntaxException: unexpected end of subtree" due to empty list.
        subjectIds.add(new Long(0));
        String queryString = " select distinct bioCollection.biocollectionUid "
                + " from BioCollection bioCollection " + " where (" + " bioCollection.id not in (:idList) or "
                + " bioCollection.linkSubjectStudy.id not in (:subjectIdList) ) and "
                + " bioCollection.biocollectionUid in (:uidList) " + " and bioCollection.study =:study ";
        query = getSession().createQuery(queryString);
        if (!bioCollectionIds.isEmpty())
            query.setParameterList("idList", bioCollectionIds);
        if (!subjectIds.isEmpty())
            query.setParameterList("subjectIdList", subjectIds);
        else {
            query.setParameterList("subjectIdList", subjectIdsNew);
        }
        query.setParameter("study", study);
        query.setParameterList("uidList", bioCollectionUIDs);
        log.info("Query String: " + query.getQueryString());
        List<String> collectionsToDelete = query.list();

        if (biospecimenIds.isEmpty()) {
            //if all biospecimens WERE filtered, then all biocollections deleted
            if (!biospecimenFilters.isEmpty()) {
                return bioCollectionUIDs;
            } else {
                //there were no biospec filters...continue as usual
                return collectionsToDelete;
            }
        } else {
            if (!bioCollectionUIDs.isEmpty() && !subjectIds.isEmpty()) {

                if (!biospecimenFilters.isEmpty()) {
                    List<String> biocollectionsCorrespondingOurFilteredBiospecimens = getBiocollectionUIDsForTheseBiospecimens(
                            biospecimenIds, collectionsToDelete, study);
                    for (String biocollectionUid : bioCollectionUIDs) {
                        if (!biocollectionsCorrespondingOurFilteredBiospecimens.contains(biocollectionUid)) {
                            collectionsToDelete.add(biocollectionUid);
                        }
                    }
                }
            }
        }
        return collectionsToDelete;
    }

}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

/**
 * This will get all the pheno data for the given subjects FOR THIS ONE CustomFieldGroup aka questionaire (aka data set)
 * /*from   w  w  w  .  ja  v  a 2 s  .com*/
 * @param allTheData
 * @param search
 * @param idsToInclude
 * @return the updated list of uids that are still left after the filtering. 
 */
private List<Long> applyPhenoDataSetFilters(DataExtractionVO allTheData, Search search,
        List<Long> idsToInclude) {

    Set<QueryFilter> filters = search.getQueryFilters();

    Collection<PhenoDataSetGroup> pdsgWithFilters = getPhenoDataSetGroupsForPhenoFilters(search, filters);
    List<Long> phenoCollectionIdsSoFar = new ArrayList<Long>();

    for (PhenoDataSetGroup phenoGroup : pdsgWithFilters) {
        log.info("Pheno group: " + phenoGroup.getName());
        if (idsToInclude != null && !idsToInclude.isEmpty()) {
            String queryToGetPhenoIdsForGivenSearchAndCFGFilters = getQueryForPhenoIdsForSearchAndCFGFilters(
                    search, phenoGroup);

            if (!queryToGetPhenoIdsForGivenSearchAndCFGFilters.isEmpty()) {
                Query query = getSession().createQuery(queryToGetPhenoIdsForGivenSearchAndCFGFilters);
                query.setParameterList("idList", idsToInclude);//TODO ASAP...this should be pheno list and not subjuid list now

                QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
                SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession()
                        .getSessionFactory();
                QueryTranslator translator = translatorFactory.createQueryTranslator(query.getQueryString(),
                        query.getQueryString(), Collections.EMPTY_MAP, factory);
                translator.compile(Collections.EMPTY_MAP, false);
                log.info(translator.getSQLString());

                List<Long> phenosForThisCFG = query.list();
                phenoCollectionIdsSoFar.addAll(phenosForThisCFG);
                log.info("rows returned = " + phenoCollectionIdsSoFar.size());
            } else {
                log.info("there were no pheno custom data filters, therefore don't run filter query");
            }
        } else {
            log.info("there are no id's to filter.  therefore won't run filtering query");
        }
    }
    //now that we have all the phenoCollection IDs...get the updated list of subjects
    if (phenoCollectionIdsSoFar.isEmpty()) {
        if (!pdsgWithFilters.isEmpty()) {
            //there were no phenocollectionid's returned because they were validly filtered.  leave idsToIncludeAsItWas
            idsToInclude = new ArrayList<Long>();
        } else {
            //there were no filters so just leave the list of subjects ias it was
        }
    } else {
        idsToInclude = getSubjectIdsForPhenoDataIds(phenoCollectionIdsSoFar);
    }

    //now that we have the pheno collection id, we just find the data for the selected customfields

    if (!idsToInclude.isEmpty()) {
        Collection<PhenoDataSetFieldDisplay> customFieldToGet = getSelectedPhenoDataSetFieldDisplaysForSearch(
                search);//getSelectedPhenoCustomFieldDisplaysForSearch(search);
        // We have the list of phenos, and therefore the list of pheno custom data - now bring back all the custom data rows IF they have any data they need 
        if ((!phenoCollectionIdsSoFar.isEmpty()
                || (phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty()))
                && !customFieldToGet.isEmpty()) {
            String queryString = "select data from PhenoDataSetData data  "
                    + " left join fetch data.phenoDataSetCollection phenoDataSetCollection"
                    + " left join fetch data.phenoDataSetFieldDisplay phenoDataSetFieldDisplay "
                    + " left join fetch phenoDataSetFieldDisplay.phenoDataSetField phenoField "
                    + (((phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty())
                            ? (" where data.phenoDataSetCollection.linkSubjectStudy.id in (:idsToInclude) ")
                            : (" where data.phenoDataSetCollection.id in (:phenoIdsToInclude)")))
                    + " and data.phenoDataSetFieldDisplay in (:customFieldsList)"
                    + " order by data.phenoDataSetCollection.id";
            Query query2 = getSession().createQuery(queryString);
            if (phenoCollectionIdsSoFar.isEmpty() && pdsgWithFilters.isEmpty()) {
                query2.setParameterList("idsToInclude", idsToInclude);
            } else {
                query2.setParameterList("phenoIdsToInclude", phenoCollectionIdsSoFar);
            }
            query2.setParameterList("customFieldsList", customFieldToGet);

            QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
            SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession().getSessionFactory();
            QueryTranslator translator = translatorFactory.createQueryTranslator(query2.getQueryString(),
                    query2.getQueryString(), Collections.EMPTY_MAP, factory);
            translator.compile(Collections.EMPTY_MAP, false);
            log.info(translator.getSQLString());
            List<PhenoDataSetData> phenoData = query2.list();

            HashMap<String, ExtractionVO> hashOfPhenosWithTheirPhenoCustomData = allTheData
                    .getPhenoCustomData();

            ExtractionVO valuesForThisPheno = new ExtractionVO();
            HashMap<String, String> map = null;
            Long previousPhenoId = null;
            //will try to order our results and can therefore just compare to last LSS and either add to or create new Extraction VO
            for (PhenoDataSetData data : phenoData) {

                if (previousPhenoId == null) {
                    map = new HashMap<String, String>();
                    previousPhenoId = data.getPhenoDataSetCollection().getId();
                    valuesForThisPheno.setSubjectUid(
                            data.getPhenoDataSetCollection().getLinkSubjectStudy().getSubjectUID());
                    valuesForThisPheno.setRecordDate(data.getPhenoDataSetCollection().getRecordDate());
                    valuesForThisPheno
                            .setCollectionName(data.getPhenoDataSetCollection().getQuestionnaire().getName());
                } else if (data.getPhenoDataSetCollection().getId().equals(previousPhenoId)) {
                    //then just put the data in
                } else { //if its a new LSS finalize previous map, etc
                    valuesForThisPheno.setKeyValues(map);
                    hashOfPhenosWithTheirPhenoCustomData.put(("" + previousPhenoId), valuesForThisPheno);
                    previousPhenoId = data.getPhenoDataSetCollection().getId();
                    map = new HashMap<String, String>();//reset
                    valuesForThisPheno = new ExtractionVO();
                    valuesForThisPheno.setSubjectUid(
                            data.getPhenoDataSetCollection().getLinkSubjectStudy().getSubjectUID());
                    valuesForThisPheno.setRecordDate(data.getPhenoDataSetCollection().getRecordDate());
                    valuesForThisPheno
                            .setCollectionName(data.getPhenoDataSetCollection().getQuestionnaire().getName());
                }

                //if any error value, then just use that - though, yet again I really question the acceptance of error data
                if (data.getErrorDataValue() != null && !data.getErrorDataValue().isEmpty()) {
                    map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                            data.getErrorDataValue());
                } else {
                    // Determine field type and assign key value accordingly
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_DATE)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getDateDataValue().toString());
                    }
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_NUMBER)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getNumberDataValue().toString());
                    }
                    if (data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getFieldType().getName()
                            .equalsIgnoreCase(Constants.FIELD_TYPE_CHARACTER)) {
                        map.put(data.getPhenoDataSetFieldDisplay().getPhenoDataSetField().getName(),
                                data.getTextDataValue());
                    }
                }
            }

            //finalize the last entered key value sets/extraction VOs
            if (map != null && previousPhenoId != null) {
                valuesForThisPheno.setKeyValues(map);
                hashOfPhenosWithTheirPhenoCustomData.put("" + previousPhenoId, valuesForThisPheno);
            }

            //can probably now go ahead and add these to the dataVO...even though inevitable further filters may further axe this list or parts of it.
            allTheData.setPhenoCustomData(hashOfPhenosWithTheirPhenoCustomData);
        }

    }
    return idsToInclude;

}

From source file:au.org.theark.core.dao.StudyDao.java

License:Open Source License

/**
 * //from   w ww  .  java  2  s. c o  m
 * 
 * @param allTheData
 * @param personFields
 * @param lssFields
 * @param addressFields
 * @param phoneFields
 * @param otherIDFields
 * @param subjectCFDs
 * @param search
 * @param idsAfterFiltering
 */
private void addDataFromMegaDemographicQuery(DataExtractionVO allTheData,
        Collection<DemographicField> personFields, Collection<DemographicField> lssFields,
        Collection<DemographicField> addressFields, Collection<DemographicField> phoneFields,
        Collection<DemographicField> otherIDFields, Collection<DemographicField> linkSubjectTwinsFields,
        Collection<CustomFieldDisplay> subjectCFDs, Search search, List<Long> idsAfterFiltering) {
    log.info("in addDataFromMegaDemographicQuery"); //if no id's, no need to run this
    if ((!lssFields.isEmpty() || !personFields.isEmpty() || !addressFields.isEmpty() || !phoneFields.isEmpty()
            || !linkSubjectTwinsFields.isEmpty() || !subjectCFDs.isEmpty()) && !idsAfterFiltering.isEmpty()) { // hasEmailFields(dfs)
        //note.  filtering is happening previously...we then do the fetch when we have narrowed down the list of subjects to save a lot of processing
        String queryString = "select distinct lss " // , address, lss, email " +
                + " from LinkSubjectStudy lss "
                + ((!personFields.isEmpty()) ? " left join fetch lss.person person " : "")
                + ((!addressFields.isEmpty()) ? " left join lss.person.addresses a " : "")
                + ((!phoneFields.isEmpty()) ? " left join lss.person.phones p " : "")
                + ((!linkSubjectTwinsFields.isEmpty())
                        ? " left join lss.linkSubjectTwinsAsFirstSubject lstAsFirst  "
                        : "")
                + ((!linkSubjectTwinsFields.isEmpty())
                        ? " left join lss.linkSubjectTwinsAsSecondSubject lstAsSecond  "
                        : "")
                + " where lss.study.id = " + search.getStudy().getId() + " and lss.id in (:idsToInclude) "
                + " order by lss.subjectUID";

        Query query = getSession().createQuery(queryString);
        query.setParameterList("idsToInclude", idsAfterFiltering);
        List<LinkSubjectStudy> subjects = query.list();

        QueryTranslatorFactory translatorFactory = new ASTQueryTranslatorFactory();
        SessionFactoryImplementor factory = (SessionFactoryImplementor) getSession().getSessionFactory();
        QueryTranslator translator = translatorFactory.createQueryTranslator(query.getQueryString(),
                query.getQueryString(), Collections.EMPTY_MAP, factory);
        translator.compile(Collections.EMPTY_MAP, false);
        log.info(translator.getSQLString());

        // DataExtractionVO devo; = new DataExtractionVO();
        HashMap<String, ExtractionVO> hashOfSubjectsWithTheirDemographicData = allTheData.getDemographicData();

        /* this is putting the data we extracted into a generic kind of VO doc that will be converted to an appopriate format later (such as csv/xls/pdf/xml/etc) */
        for (LinkSubjectStudy lss : subjects) {
            ExtractionVO sev = new ExtractionVO();
            sev.setKeyValues(constructKeyValueHashmap(lss, personFields, lssFields, addressFields, phoneFields,
                    otherIDFields, linkSubjectTwinsFields));
            hashOfSubjectsWithTheirDemographicData.put(lss.getSubjectUID(), sev);
        }

    }
}

From source file:Bean.LandingPageBean.java

License:Apache License

private void updateMetadataFields(Query query, BaseTable baseTable) {

    if (query != null) {
        PersistentIdentifierAPI pidApi = new PersistentIdentifierAPI();

        this.logger.info("Setting metadata fields");
        this.metaPid = query.getPID();
        this.metaSubsetURL = pidApi.getPIDObjectFromPIDString(this.metaPid).getURI();
        this.metaParentPid = baseTable.getBaseTablePID();

        this.metaParentURL = pidApi.getPIDObjectFromPIDString(this.metaParentPid).getURI();
        this.metaExecutionDate = query.getExecution_timestamp().toString();
        this.metaResultSetHash = query.getResultSetHash();
        this.metaQueryHash = query.getQueryHash();
        this.metaDescription = query.getQueryDescription();
        this.metaSQLString = query.getQueryString();
        this.metaParentAuthor = baseTable.getAuthor();
        this.metaAuthor = query.getUserName();
        this.metaTitle = query.getSubSetTitle();
        this.metaParentTitle = baseTable.getDataSetTitle();
        this.metaSuggestedCitationString = this.metaAuthor + " ("
                + this.getYearFromDate(query.getExecution_timestamp()) + ") \"" + this.metaTitle
                + "\" created at " + this.metaExecutionDate.toString() + ", PID [ark:" + this.metaPid
                + "]. Subset of " + this.metaParentAuthor + ": \"" + this.getMetaParentTitle() + "\", PID [ark:"
                + this.metaParentPid + "]";

    } else {// ww  w  .j a  va 2s  .  c om
        this.logger.severe("basetable or subset does not exist");
        if (query == null) {
            this.logger.info("This was not a query pid. Checking base tables");

            if (baseTable == null) {
                this.logger.severe("Not a valid Pid!");
            } else {
                this.logger.info("Base table found!");
                this.updateBaseTableFields(baseTable);

            }
        }

    }
}

From source file:cgi.lemans.portail.domaine.gamaweb.impl.OrdreDeTravailDao.java

@Override
public List<OrdreDeTravail> findAllDemande(String idRessource) {
    String hql = "from OrdreDeTravail a "

            + "where a.typeActivite!='HTM' "

            + "and a.ressource.idRessource = :idRessource";
    Query query = getSession().createQuery(hql);
    query.setParameter("idRessource", idRessource);
    System.out.println(query.getQueryString());

    List<OrdreDeTravail> results = query.list();
    return results;

}

From source file:cgi.lemans.portail.domaine.gamaweb.impl.RessourceTmaDao.java

@Override
public List<RessourceTma> findQuiEquipe(String tag) {
    String hql = "from RessourceTma a " + "where a.tags " + "like :tag";
    Query query = getSession().createQuery(hql);
    query.setParameter("tag", tag);
    System.out.println(query.getQueryString());

    List<RessourceTma> results = query.list();
    return results;

}

From source file:cognition.pipeline.data.DNCWorkUnitDao.java

License:Apache License

private Object getObjectFromCoordinate(DNCWorkCoordinate coordinate) {
    SessionWrapper sessionWrapper = createSourceSession();
    try {//from www.ja  v  a 2 s  . c o m
        Query coordinateQuery = sessionWrapper.getNamedQuery("getObjectFromCoordinate");
        String queryString = coordinateQuery.getQueryString();
        queryString = queryString.replace(":sourceTable", coordinate.getSourceTable())
                .replace(":sourceColumn", coordinate.getSourceColumn())
                .replace(":pkColumnName", coordinate.getPkColumnName())
                .replace(":id", Long.toString(coordinate.getIdInSourceTable()));

        List result = getSQLResultFromSource(queryString);

        if (CollectionUtils.isEmpty(result)) {
            throw new WorkCoordinateNotFound("Coordinate is invalid. No data found at " + coordinate);
        }

        return result.get(0);
    } finally {
        sessionWrapper.closeSession();
    }
}

From source file:cognition.pipeline.data.DNCWorkUnitDao.java

License:Apache License

/**
 * This method saves the conversion result in the specified saveTextToCoordinate
 * named query.//w  ww . j  a va  2s. c o m
 * @param coordinate The original coordinate of the text
 */
public void saveConvertedText(DNCWorkCoordinate coordinate) {
    SessionWrapper sessionWrapper = createTargetSession();
    try {
        Query query = sessionWrapper.getNamedQuery("saveTextToCoordinate");
        String queryString = query.getQueryString();
        SQLQuery sqlQuery = sessionWrapper.createSQLQuery(queryString);
        sqlQuery.setParameter(0, coordinate.getPatientId());
        sqlQuery.setParameter(1, coordinate.getSourceTable());
        sqlQuery.setParameter(2, coordinate.getSourceColumn());
        sqlQuery.setParameter(3, coordinate.getIdInSourceTable());
        sqlQuery.setParameter(4, coordinate.getConversionResult());
        sqlQuery.executeUpdate();
    } finally {
        sessionWrapper.closeSession();
    }
}

From source file:com.abiquo.abiserver.scheduler.SchedulerRestrictions.java

License:Mozilla Public License

/**
 * Get all the available PhysicalMachines with enough resources for the VirtualImage. It access
 * DB throw Hibernate.//from   w  w  w .j  a  v a  2s.  c o m
 * 
 * @param required, the resources capacity used on the target VirtualImage (ram, hd,
 *            dataCenter).
 * @return all PhysicalMachines available to instantiate a VirtualMachine for the target
 *         VirtualImage.
 * @throws SchedulerException, if there is not any PhysicalMachine with enough resources.
 */
@SuppressWarnings("unchecked")
// PhysicalmachineHB
private List<PhysicalMachine> getFilteredMachines(ImageRequired required) throws SchedulerException {
    List<PhysicalMachine> machines;
    List<PhysicalmachineHB> machinesHibernate;

    Query physicalFiler;
    Session session;

    session = HibernateUtil.getSession();

    physicalFiler = session.createQuery("select pm "
            + "from com.abiquo.abiserver.business.hibernate.pojohb.infrastructure.PhysicalmachineHB pm "
            + "join pm.rack rk " + "join rk.datacenter dc " + "where " + "dc.name in  ('"
            + required.getDataCenter() + "') and " + "(pm.ram - pm.ramUsed) >= " + required.getRam() + " "
            + "and (pm.hd  - pm.hdUsed)  >= " + required.getHd());

    log.debug("HQL : " + physicalFiler.getQueryString());

    machinesHibernate = physicalFiler.list();

    if (machinesHibernate.size() == 0) {

        throw new SchedulerException(
                "Not enough physical machine capacity to instantiate the required virtual appliance", required);
    }

    machines = new ArrayList<PhysicalMachine>();

    for (PhysicalmachineHB machineHib : machinesHibernate) {
        log.debug("PhysicalMachine candidate : " + machineHib.getName());
        machines.add((PhysicalMachine) machineHib.toPojo());
    }

    return machines;
}