Example usage for javax.servlet.http HttpServletRequest getParameterValues

List of usage examples for javax.servlet.http HttpServletRequest getParameterValues

Introduction

In this page you can find the example usage for javax.servlet.http HttpServletRequest getParameterValues.

Prototype

public String[] getParameterValues(String name);

Source Link

Document

Returns an array of String objects containing all of the values the given request parameter has, or null if the parameter does not exist.

Usage

From source file:edu.stanford.muse.webapp.JSPHelper.java

/**
 * This used to be a VIP methods for muse. Now superseded by Searcher.java for ePADD.
 * handle query for term, sentiment, person, attachment, docNum, timeCluster
 * etc//ww  w. j  a v  a2  s  .  co  m
 * note: date range selection is always ANDed
 * if only_apply_to_filtered_docs, looks at emailDocs, i.e. ones selected by
 * the current filter (if there is one)
 * if !only_apply_to_filtered_docs, looks at all docs in archive
 * note: only_apply_to_filtered_docs == true is not honored by lucene lookup
 * by term (callers need to filter by themselves)
 * note2: performance can be improved. e.g., if in AND mode, searches that
 * iterate through documents such as
 * selectDocByTag, getBlobsForAttachments, etc., can take the intermediate
 * resultDocs rather than allDocs.
 * set intersection/union can be done in place to the intermediate
 * resultDocs rather than create a new collection.
 * getDocsForAttachments can be called on the combined result of attachments
 * and attachmentTypes search, rather than individually.
 * note3: should we want options to allow user to choose whether to search
 * only in emails, only in attachments, or both?
 * also, how should we allow variants in combining multiple conditions.
 * there will be work in UI too.
 * note4: the returned resultBlobs may not be tight, i.e., it may include
 * blobs from docs that are not in the returned resultDocs.
 * but for docs that are in resultDocs, it should not include blobs that are
 * not hitting.
 * these extra blobs will not be seen since we only use this info for
 * highlighting blobs in resultDocs.
 */
public static Pair<Collection<Document>, Collection<Blob>> selectDocsWithHighlightAttachments(
        HttpServletRequest request, HttpSession session, boolean only_apply_to_filtered_docs,
        boolean or_not_and) throws UnsupportedEncodingException {
    // below are all the controls for selecting docs 
    String term = request.getParameter("term"); // search term
    String[] contact_ids = request.getParameterValues("contact");
    String[] persons = request.getParameterValues("person");
    String[] attachments = request.getParameterValues("attachment"); // actual attachment name

    String[] attachment_extensions = request.getParameterValues("attachment_extension");

    {
        // if attachment_types specified, parse them and add the values in them to attachment_extensions also
        // types are higher level (video, audio, etc.) and map to more than 1 extension
        String[] attachment_types = request.getParameterValues("attachment_type"); // will be something like ["pdf,doc", "ppt,pptx,key"]
        if (!Util.nullOrEmpty(attachment_types)) {
            // assemble all extensions in a list first
            List<String> list = new ArrayList<>();
            if (!Util.nullOrEmpty(attachment_extensions))
                list.addAll(Arrays.asList(attachment_extensions));

            for (String s : attachment_types)
                list.addAll(Util.tokenize(s, ","));
            // trim all spaces, then convert back to array
            list = list.stream().map(s -> s.trim()).collect(Collectors.toList());
            attachment_extensions = list.toArray(new String[list.size()]);
        }
    }

    String datasetId = request.getParameter("datasetId");
    String[] docIds = request.getParameterValues("docId");
    String[] folders = request.getParameterValues("folder");
    String sortByStr = request.getParameter("sort_by");
    Indexer.SortBy sortBy = Indexer.SortBy.RELEVANCE;
    if (!Util.nullOrEmpty(sortByStr)) {
        if ("relevance".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.RELEVANCE;
        else if ("recent".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.RECENT_FIRST;
        else if ("chronological".equals(sortByStr.toLowerCase()))
            sortBy = Indexer.SortBy.CHRONOLOGICAL_ORDER;
        else {
            log.warn("Unknown sort by option: " + sortBy);
        }
    }

    // compute date requirements. start/end_date are in yyyy/mm/dd format
    int yy = -1, end_yy = -1, mm = -1, end_mm = -1, dd = -1, end_dd = -1;

    String start_date = request.getParameter("start_date");
    if (!Util.nullOrEmpty(start_date)) {
        String[] ss = start_date.split("/");
        if (ss.length > 0) {
            yy = Util.getIntParam(ss[0], -1);
        }
        if (ss.length > 1) {
            mm = Util.getIntParam(ss[1], -1);
        }
        if (ss.length > 2) {
            dd = Util.getIntParam(ss[2], -1);
        }
    }

    String end_date = request.getParameter("end_date");
    if (!Util.nullOrEmpty(end_date)) {
        String[] ss = end_date.split("/");
        if (ss.length > 0) {
            end_yy = Util.getIntParam(ss[0], -1);
        }
        if (ss.length > 1) {
            end_mm = Util.getIntParam(ss[1], -1);
        }
        if (ss.length > 2) {
            end_dd = Util.getIntParam(ss[2], -1);
        }
    }

    //key to large array of docids in session
    //it possible to pass this array as the get request parameter, but is not scalable due to the post and get size limits of tomcat
    String dIdLKey = request.getParameter("dIdLKey");
    if (dIdLKey != null) {
        try {
            Set<String> docIdsLot = (Set<String>) session.getAttribute(dIdLKey);
            Set<String> dIds = new HashSet<String>();
            if (docIds != null)
                for (String docId : docIds)
                    dIds.add(docId);

            if (docIdsLot != null)
                for (String dId : docIdsLot)
                    dIds.add(dId);
            docIds = dIds.toArray(new String[dIds.size()]);
            //System.err.println("Found docIds in the session... read "+docIds.length+" docIds");
        } catch (ClassCastException e) {
            e.printStackTrace();
        }
    }
    String tag = request.getParameter("annotation"); // only one tag supported right now, will revisit if needed

    String[] directions = request.getParameterValues("direction");
    Set<String> directionsSet = new LinkedHashSet<String>();
    if (directions != null)
        for (String d : directions)
            directionsSet.add(d);
    boolean direction_in = directionsSet.contains("in");
    boolean direction_out = directionsSet.contains("out");

    String[] sentiments = request.getParameterValues("sentiment");
    int cluster = HTMLUtils.getIntParam(request, "timeCluster", -1);
    /** usually, there is 1 time cluster per month */

    Set<String> foldersSet = new LinkedHashSet<String>();
    if (folders != null)
        for (String f : folders)
            foldersSet.add(f);

    // a little bit of an asymmetry here, only one groupIdx is considered, can't be multiple
    int groupIdx = HTMLUtils.getIntParam(request, "groupIdx", Integer.MAX_VALUE);
    Archive archive = JSPHelper.getArchive(session);
    AddressBook addressBook = archive.addressBook;
    GroupAssigner groupAssigner = archive.groupAssigner;
    BlobStore attachmentsStore = archive.blobStore;

    Collection<Document> allDocs = getAllDocsAsSet(session, only_apply_to_filtered_docs);
    if (Util.nullOrEmpty(allDocs))
        return new Pair<Collection<Document>, Collection<Blob>>(new ArrayList<Document>(),
                new ArrayList<Blob>());

    //why are there two vars for sentiment and content indexer repns?
    //      Indexer sentiIndexer, indexer;
    //      indexer = sentiIndexer = archive.indexer;

    // the raw request param val is in 8859 encoding, interpret the bytes as utf instead

    /**
     * there is a little overlap between datasetId and docForDocIds.
     * probably datasetIds can be got rid of?
     */
    List<Document> docsForGroup = null, docsForDateRange = null, docsForNumbers = null, docsForFolder = null,
            docsForDirection = null, docsForCluster = null, docsForDocIds = null;
    Collection<Document> docsForTerm = null, docsForPersons = null, docsForSentiments = null, docsForTag = null,
            docsForAttachments = null, docsForAttachmentTypes = null, docsForDoNotTransfer = null,
            docsForTransferWithRestrictions = null, docsForReviewed = null, docsForRegex = null;
    Collection<Blob> blobsForAttachments = null, blobsForAttachmentTypes = null, blobsForTerm = null;

    if (!Util.nullOrEmpty(term)) {
        term = JSPHelper.convertRequestParamToUTF8(term);
        if (isRegexSearch(request)) {
            docsForTerm = new LinkedHashSet<Document>(IndexUtils.selectDocsByRegex(archive, allDocs, term));
            // TODO: regex search in attachments is not implemented yet
        } else {
            Indexer.QueryType qt = null;
            String searchType = request.getParameter("searchType");
            if ("correspondents".equals(searchType))
                qt = Indexer.QueryType.CORRESPONDENTS;
            else if ("subject".equals(searchType))
                qt = Indexer.QueryType.SUBJECT;
            else if ("original".equals(searchType))
                qt = Indexer.QueryType.ORIGINAL;
            else if ("regex".equals(searchType))
                qt = Indexer.QueryType.REGEX;
            else
                qt = Indexer.QueryType.FULL;

            Indexer.QueryOptions options = new Indexer.QueryOptions();
            options.setQueryType(qt);
            options.setSortBy(sortBy);

            docsForTerm = archive.docsForQuery(term, options);
            // also search blobs and merge result, but not for subject/corr. search
            if (!"correspondents".equals(searchType) && !"subject".equals(searchType)) {
                blobsForTerm = archive.blobsForQuery(term);
                Set<Document> blobDocsForTerm = (Set<Document>) EmailUtils
                        .getDocsForAttachments((Collection) allDocs, blobsForTerm);
                log.info("Blob docs for term: " + term + ", " + blobDocsForTerm.size() + ", blobs: "
                        + blobsForTerm.size());
                docsForTerm = Util.setUnion(docsForTerm, blobDocsForTerm);
            }
        }
    }

    if ("true".equals(request.getParameter("sensitive"))) {
        Indexer.QueryType qt = null;
        qt = Indexer.QueryType.PRESET_REGEX;
        docsForRegex = archive.docsForQuery(cluster, qt);
    }

    if (foldersSet.size() > 0) {
        docsForFolder = new ArrayList<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (foldersSet.contains(ed.folderName))
                docsForFolder.add(ed);
        }
    }

    if ((direction_in || direction_out) && addressBook != null) {
        docsForDirection = new ArrayList<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            int sent_or_received = ed.sentOrReceived(addressBook);
            if (direction_in)
                if (((sent_or_received & EmailDocument.RECEIVED_MASK) != 0) || sent_or_received == 0) // if sent_or_received == 0 => we neither directly recd. nor sent it (e.g. it could be received on a mailing list). so count it as received.
                    docsForDirection.add(ed);
            if (direction_out && (sent_or_received & EmailDocument.SENT_MASK) != 0)
                docsForDirection.add(ed);
        }
    }

    String doNotTransfer = request.getParameter("doNotTransfer");
    if (!Util.nullOrEmpty(doNotTransfer)) {
        boolean val = "true".equals(doNotTransfer);
        docsForDoNotTransfer = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.doNotTransfer == val)
                docsForDoNotTransfer.add(ed);
        }
    }

    String transferWithRestrictions = request.getParameter("transferWithRestrictions");
    if (!Util.nullOrEmpty(transferWithRestrictions)) {
        boolean val = "true".equals(transferWithRestrictions);
        docsForTransferWithRestrictions = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.transferWithRestrictions == val)
                docsForTransferWithRestrictions.add(ed);
        }
    }

    String reviewed = request.getParameter("reviewed");
    if (!Util.nullOrEmpty(reviewed)) {
        boolean val = "true".equals(reviewed);
        docsForReviewed = new LinkedHashSet<Document>();
        for (Document d : allDocs) {
            EmailDocument ed = (EmailDocument) d;
            if (ed.reviewed == val)
                docsForReviewed.add(ed);
        }
    }

    if (sentiments != null && sentiments.length > 0) {
        Lexicon lex = (Lexicon) getSessionAttribute(session, "lexicon");
        docsForSentiments = lex.getDocsWithSentiments(sentiments, archive.indexer, allDocs, cluster,
                request.getParameter("originalContentOnly") != null, sentiments);
    }

    // if (!Util.nullOrEmpty(tag))
    if (tag != null) // note: explicitly allowing tag=<empty> as a way to specify no tag.
    {
        docsForTag = Document.selectDocByTag(allDocs, tag, true);
    }
    if (cluster >= 0) {
        docsForCluster = new ArrayList<>(archive.docsForQuery(null, cluster, Indexer.QueryType.FULL)); // null for term returns all docs in cluster
    }

    if (persons != null || contact_ids != null) {
        persons = JSPHelper.convertRequestParamsToUTF8(persons);
        docsForPersons = IndexUtils.selectDocsByAllPersons(addressBook, (Collection) allDocs, persons,
                Util.toIntArray(contact_ids));
    }

    //Some docs with faulty date are assigned 1960/01/01
    if (end_yy >= 0 && yy >= 0) // date range
    {
        docsForDateRange = (List) IndexUtils.selectDocsByDateRange((Collection) allDocs, yy, mm, dd, end_yy,
                end_mm, end_dd);
        log.info("Found " + docsForDateRange.size() + " docs in range: [" + yy + "/" + mm + "/" + dd + " - ["
                + end_yy + "/" + end_mm + "/" + end_dd + "]");
    } else if (yy >= 0) // single month or year
    {
        docsForDateRange = IndexUtils.selectDocsByDateRange((Collection) allDocs, yy, mm, dd);
        log.info("Found " + docsForDateRange.size() + " docs beyond " + yy + "/" + mm + "/" + dd);
    }

    if (groupIdx != Integer.MAX_VALUE) {
        if (groupIdx >= groupAssigner.getSelectedGroups().size())
            groupIdx = -1; // must be the "None" group
        docsForGroup = (List) IndexUtils.getDocsForGroupIdx((Collection) allDocs, addressBook, groupAssigner,
                groupIdx);
    }

    if (!Util.nullOrEmpty(attachments)) {
        attachments = JSPHelper.convertRequestParamsToUTF8(attachments);
        blobsForAttachments = IndexUtils.getBlobsForAttachments(allDocs, attachments, attachmentsStore);
        docsForAttachments = (Set<Document>) EmailUtils.getDocsForAttachments((Collection) allDocs,
                blobsForAttachments);
    }

    if (!Util.nullOrEmpty(attachment_extensions)) {
        attachment_extensions = JSPHelper.convertRequestParamsToUTF8(attachment_extensions);
        blobsForAttachmentTypes = IndexUtils.getBlobsForAttachmentTypes(allDocs, attachment_extensions);
        docsForAttachmentTypes = (Set<Document>) EmailUtils.getDocsForAttachments((Collection) allDocs,
                blobsForAttachmentTypes);
    }

    if (!Util.nullOrEmpty(docIds)) {
        docsForDocIds = new ArrayList<>();
        for (String id : docIds) {
            Document d = archive.docForId(id);
            if (d != null)
                docsForDocIds.add(d);
        }
    }

    if (datasetId != null) {
        // note: these docNums have nothing to do with docIds of the docs.
        // they are just indexes into a dataset, which is a collection of docs from the result of some search.
        DataSet dataset = (DataSet) getSessionAttribute(session, datasetId);
        if (dataset != null)

        {
            String[] docNumbers = request.getParameterValues("docNum");
            if (docNumbers == null)
                docsForNumbers = dataset.getDocs();
            else
                docsForNumbers = (List) IndexUtils.getDocNumbers(dataset.getDocs(), docNumbers);
        }
    }

    // apply the OR or AND of the filters
    boolean initialized = false;
    List<Document> resultDocs;
    List<Blob> resultBlobs;

    // if its an AND selection, and we are applying only to filtered docs, start with it and intersect with the docs for each facet.
    // otherwise, start with nothing as an optimization, since there's no need to intersect with it.
    // the docs for each facet will always be a subset of archive's docs.
    if (only_apply_to_filtered_docs && !or_not_and && allDocs != null) {
        initialized = true;
        resultDocs = new ArrayList<>(allDocs);
    } else
        resultDocs = new ArrayList<>();

    if (docsForTerm != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTerm);
        } else
            resultDocs.retainAll(docsForTerm);
    }

    if (docsForRegex != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForRegex);
        } else
            resultDocs.retainAll(docsForRegex);
    }

    if (docsForSentiments != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForSentiments);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForSentiments);
    }
    if (docsForTag != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTag);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForTag);
    }

    if (docsForCluster != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForCluster);
        } else
            resultDocs.retainAll(docsForCluster);
    }

    if (docsForDocIds != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDocIds);
        } else
            resultDocs.retainAll(docsForDocIds);
    }

    if (docsForPersons != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForPersons);
        } else
            resultDocs = Util.listIntersection(resultDocs, docsForPersons);
    }

    if (docsForDateRange != null) {
        // if (!initialized || or_not_and)
        // note: date range selection is always ANDed, regardless of or_not_and
        if (!initialized) {
            initialized = true;
            resultDocs.addAll(docsForDateRange);
        } else
            resultDocs.retainAll(docsForDateRange);
    }
    if (docsForFolder != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForFolder);
        } else
            resultDocs.retainAll(docsForFolder);
    }

    if (docsForDirection != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDirection);
        } else
            resultDocs.retainAll(docsForDirection);
    }

    if (docsForDoNotTransfer != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForDoNotTransfer);
        } else
            resultDocs.retainAll(docsForDoNotTransfer);
    }

    if (docsForTransferWithRestrictions != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForTransferWithRestrictions);
        } else
            resultDocs.retainAll(docsForTransferWithRestrictions);
    }

    if (docsForReviewed != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForReviewed);
        } else
            resultDocs.retainAll(docsForReviewed);
    }

    if (docsForGroup != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForGroup);
        } else
            resultDocs.retainAll(docsForGroup);
    }

    if (docsForAttachments != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForAttachments);
        } else
            resultDocs.retainAll(docsForAttachments);
    }

    if (docsForAttachmentTypes != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForAttachmentTypes);
        } else
            resultDocs.retainAll(docsForAttachmentTypes);
    }

    if (docsForNumbers != null) {
        if (!initialized || or_not_and) {
            initialized = true;
            resultDocs.addAll(docsForNumbers);
        } else
            resultDocs.retainAll(docsForNumbers);
    }

    if (!initialized) {
        if (cluster >= 0)
            resultDocs = new ArrayList<Document>(archive.docsForQuery(null, cluster, Indexer.QueryType.FULL)); // means all docs in cluster x
        else {
            resultDocs = new ArrayList<Document>();
            resultDocs.addAll(allDocs); // if no filter, all docs are selected
        }
    }

    // compute resultBlobs
    if (or_not_and) {
        resultBlobs = Util.listUnion(blobsForAttachments, blobsForAttachmentTypes);
        resultBlobs = Util.listUnion(resultBlobs, blobsForTerm);
    } else {
        resultBlobs = Util.listIntersection(blobsForAttachments, blobsForAttachmentTypes);
        resultBlobs = Util.listIntersection(resultBlobs, blobsForTerm);
    }

    // we need to sort again if needed. by default, we're here assuming relevance based sort.
    // can't rely on indexer sort.
    // for 2 reasons:
    // 1. blobs vs. docs may not be sorted by date as they are retrieved separately from the index.
    // 2. there may be no search term -- the user can use this as a way to list all docs, but may still want sort by time
    if (sortBy == Indexer.SortBy.CHRONOLOGICAL_ORDER)
        Collections.sort(resultDocs);
    else if (sortBy == Indexer.SortBy.RECENT_FIRST) {
        Collections.sort(resultDocs);
        Collections.reverse(resultDocs);
    }

    return new Pair<Collection<Document>, Collection<Blob>>(resultDocs, resultBlobs);
}

From source file:gov.nih.nci.ncicb.cadsr.common.util.CommonListBean.java

/**
 * Constructor/* ww  w  .jav a 2s. c  o  m*/
 *
 *  @param request
 *  @param response
 *  @param dBBroker
 *  @param mySearch
 *  @param myJspLinkParm
 *  @param myDispParm
 *  @param mySqlParm
 */
public CommonListBean(HttpServletRequest request,
        //DBBroker dBBroker,
        DBUtil dBBroker, String[] mySearch, String[] myJspLinkParm, String[] myDispParm, String[] mySqlParm,
        boolean buildSearchClause, int[] lovPassbackCols)

        throws SQLException {

    myRequest = request;
    this.dBBroker = dBBroker;

    searchParm = mySearch;
    jspLinkParm = myJspLinkParm;
    displayParm = myDispParm;
    sqlStmtParm = mySqlParm;
    myLovCols = lovPassbackCols;
    int as = displayParm.length;
    if (searchParm != null) {
        int numSearchFields = this.searchParm.length / 2;
    }
    int ss = 0;
    //System.out.println("numSearchParm: "+ numSearchFields);

    mySearchStr = request.getParameterValues("SEARCH");
    if (buildSearchClause) {
        if (mySearchStr != null) {
            ss = mySearchStr.length;
            for (int s = 0; s < ss; s++) {
                String myStr = mySearchStr[s];
                //String newStr = StringReplace.strReplace(myStr,"*","%");
                searchClause += " and upper (nvl(" + this.searchParm[2 * s] + ",'%')) like upper ( '%" + myStr
                        + "%') ";
                /*searchClause+= " and upper (nvl(" + this.searchParm[2*s] + ",'%')) like upper ( nvl('" +
                newStr + "','%')) " ;*/
                //System.out.println("myStr: " + myStr + " searchClause: " + searchClause);
            }
        }
    }

    if (request.getParameter("NOT_FIRST_DISPLAY") != null) {
        this.firstTime = false;
        populate();
    } else {
        this.firstTime = true;
    }
}

From source file:fr.paris.lutece.plugins.document.web.publishing.DocumentPublishingJspBean.java

/**
 * Process the publishing article//from w w w  .j  ava 2 s. c o  m
 *
 * @param request
 *            requete Http
 * @return The Jsp URL of the process result
 */
public String doAssignedDocument(HttpServletRequest request) {
    // Recovery of parameters processing
    int nDocumentId = IntegerUtils.convert(request.getParameter(PARAMETER_DOCUMENT_ID));
    String strPortletId = request.getParameter(PARAMETER_PORTLET_ID);

    // retrieve the selected portlets ids
    String[] arrayDocumentListPortletIds = request.getParameterValues(PARAMETER_DOCUMENT_LIST_PORTLET_IDS);
    String[] arrayDocumentPortletIds = request.getParameterValues(PARAMETER_DOCUMENT_PORTLET_IDS);
    List<String> listPortletIds = new ArrayList<String>();

    if (arrayDocumentListPortletIds != null) {
        for (String strId : arrayDocumentListPortletIds) {
            listPortletIds.add(strId);
        }
    }

    if (arrayDocumentPortletIds != null) {
        for (String strId : arrayDocumentPortletIds) {
            listPortletIds.add(strId);
        }
    }

    if ((listPortletIds.size() > 0) || (strPortletId != null)) {
        if (strPortletId == null) {
            for (String strId : listPortletIds) {
                int nPortletId = IntegerUtils.convert(strId);
                int nStatus = IntegerUtils.convert(request.getParameter(PARAMETER_DOCUMENT_PUBLISHED_STATUS));

                if (!PublishingService.getInstance().isAssigned(nDocumentId, nPortletId)) {
                    // Publishing of document : if status =
                    // DocumentListPortlet.STATUS_UNPUBLISHED (=1), the
                    // document is assigned, otherwize is assigned AND
                    // published
                    PublishingService.getInstance().assign(nDocumentId, nPortletId);

                    if (nStatus == DocumentPublication.STATUS_PUBLISHED) {
                        PublishingService.getInstance().publish(nDocumentId, nPortletId);
                    }
                }
            }
        } else {
            int nIdPortlet = IntegerUtils.convert(strPortletId);
            PublishingService.getInstance().publish(nDocumentId, nIdPortlet);
        }
    }

    // Display the page of publishing
    return getUrlAssignedPage(nDocumentId);
}

From source file:br.mdarte.exemplo.academico.accessControl.EntrarLoginValidar.java

public ActionForward execute(ActionMapping mapping, ActionForm form, HttpServletRequest request,
        HttpServletResponse response) throws Exception {

    final ActionForward forward = _verificarLogin(mapping, form, request, response);
    try {//from  www. j  ava  2 s. c  om
        request.getSession().setAttribute("form", form);
    } catch (java.lang.Exception exception) {
        Object formObj = org.andromda.presentation.bpm4struts.PageSessionObjectUtil.getPageForm(request,
                "${action.useCase.name}");

        if (formObj != null) {
            try {
                formObj.getClass()
                        .getMethod("resetCheckBoxes",
                                new Class[] { org.apache.struts.action.ActionMapping.class,
                                        javax.servlet.http.HttpServletRequest.class })
                        .invoke(formObj, new Object[] { null, null });
            } catch (Exception e2) {
                // ignoring
            }

            final java.util.Map parameters = new java.util.HashMap();
            for (final java.util.Enumeration names = request.getParameterNames(); names.hasMoreElements();) {
                final String name = String.valueOf(names.nextElement());
                parameters.put(name, request.getParameterValues(name));
            }
            try {
                org.apache.commons.beanutils.BeanUtils.populate(formObj, parameters);
            } catch (java.lang.Exception populateException) {
                // ignore if we have an exception here (we just don't populate).
            }
        }
        throw exception;
    }

    request.getSession().setAttribute("form", form);
    return forward;
}

From source file:edu.jhu.bremon.servlet.Main.java

/**
 * Processes requests for both HTTP <code>GET</code> and <code>POST</code>
 * methods./*from ww w .ja va  2 s .  c o  m*/
 *
 * @param request servlet request
 * @param response servlet response
 * @throws ServletException if a servlet-specific error occurs
 * @throws IOException if an I/O error occurs
 */
protected void processRequest(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    try {
        User sessionUser = (User) request.getSession().getAttribute("sessionUser");

        // Get user details for sign in, register, or update user
        User loginUser = new User();
        loginUser.setPassword(request.getParameter("password"));
        loginUser.setEmail(request.getParameter("email"));
        loginUser.setAuthenticated(false);
        loginUser.setFname(request.getParameter("fname"));
        loginUser.setLname(request.getParameter("lname"));

        // Get the optional audiobook ID
        Integer audiobookId = null;
        String audiobookIdStr = request.getParameter("audiobookId");
        if (audiobookIdStr != null && !audiobookIdStr.isEmpty()) {
            audiobookId = Integer.parseInt(audiobookIdStr);
        }

        // Load search terms from the params
        String[] queries = request.getParameterValues("query");
        String[] attributes = request.getParameterValues("queryAttribute");
        String[] queryTypes = request.getParameterValues("queryType");
        List<AudiobookSearchDetails> searchDetailsList = new ArrayList<AudiobookSearchDetails>();
        if (queries != null && attributes != null && queryTypes != null) {
            for (int i = 0; i < queries.length && i < attributes.length && i < queryTypes.length; i++) {
                AudiobookSearchDetails search = new AudiobookSearchDetails();
                search.setQuery(queries[i]);
                search.setAttributeType(AudiobookAttributeType.valueOf(attributes[i]));
                search.setQueryType(AudiobookQueryType.valueOf(queryTypes[i]));
                searchDetailsList.add(search);
            }
        }

        // Load all details into ServiceUserInput object to send into logic layer
        ServiceUserInput serviceUserInput = new ServiceUserInput();
        serviceUserInput.setAudiobookId(audiobookId);
        serviceUserInput.setSearchDetails(searchDetailsList);
        serviceUserInput.setUser(loginUser);

        // Determine the operation being applied to the input
        String operationTypeStr = request.getParameter("operation");
        ServiceOperation op = ServiceOperation.HOME;
        if (operationTypeStr != null && !operationTypeStr.isEmpty()) {
            op = ServiceOperation.valueOf(operationTypeStr);
        }

        // Get the operation from the operation factory
        ServiceAction action = ServiceActionFactory.getInstance().getServiceAction(op, userDao, audiobookDao,
                audiobookStatusDao);

        // Perform the operation
        ServiceResults results = action.performOperation(serviceUserInput, sessionUser);

        List<AudiobookListItem> cart = null;
        List<AudiobookListItem> purchased = null;
        if (results.getSessionUser() != null) {
            cart = UserUtil.getInstance().getCart(this.getAudiobookDao(), this.getAudiobookStatusDao(),
                    results.getSessionUser().getId());
            purchased = UserUtil.getInstance().getPurchased(this.getAudiobookDao(),
                    this.getAudiobookStatusDao(), results.getSessionUser().getId());
        }

        // Optionally invalidate the session or update the session user, cart, and purchases
        if (results.isShouldInvalidateSession()) {
            request.getSession().invalidate();
        } else if (results.getSessionUser() != null) {
            request.getSession().setAttribute("sessionUser", results.getSessionUser());
            request.getSession().setAttribute("cart", cart);
            request.getSession().setAttribute("purchased", purchased);
        }

        String forwardedLocation = getForwardedLocation(op, results.getResultStatus(), results.isSuccess());

        String outputType = request.getParameter("outputType");
        if (outputType != null && outputType.equals("debugJson")) {
            response.setContentType("application/json");
            PrintWriter out = response.getWriter();
            try {
                JsonObject object = new JsonObject();
                Gson gson = new GsonBuilder().setPrettyPrinting().create();
                object.add("results", gson.toJsonTree(results));
                object.add("startSessionUser", gson.toJsonTree(sessionUser));
                object.add("cart", gson.toJsonTree(cart, new TypeToken<List<AudiobookListItem>>() {
                }.getType()));
                object.add("purchased", gson.toJsonTree(purchased, new TypeToken<List<AudiobookListItem>>() {
                }.getType()));
                object.addProperty("forwardedLocation", forwardedLocation);
                out.println(gson.toJson(object));
            } finally {
                out.close();
            }
        } else {
            response.setContentType("text/html;charset=UTF-8");
            request.setAttribute("results", results);
            request.setAttribute("searchResults", results.getSearchResults());
            request.getRequestDispatcher(forwardedLocation).forward(request, response);
        }
    } catch (Exception e) {
        // Unknown system error, forward to index
        request.getRequestDispatcher("index.jsp").forward(request, response);
    }

}

From source file:dk.clarin.tools.rest.register.java

public String getarg(HttpServletRequest request, List<FileItem> items, String name) {
    /*//from  ww w.j  a v  a  2 s  . c om
    * Parse the request
    */

    @SuppressWarnings("unchecked")
    boolean is_multipart_formData = ServletFileUpload.isMultipartContent(request);

    logger.debug("is_multipart_formData:" + (is_multipart_formData ? "ja" : "nej"));

    if (is_multipart_formData) {
        try {
            Iterator<FileItem> itr = items.iterator();
            while (itr.hasNext()) {
                FileItem item = (FileItem) itr.next();
                if (item.isFormField()) {
                    if (name.equals(item.getFieldName()))
                        return item.getString("UTF-8").trim();
                }
            }
        } catch (Exception ex) {
            logger.error("uploadHandler.parseRequest Exception");
        }
    }

    Enumeration<String> parmNames = (Enumeration<String>) request.getParameterNames();
    for (Enumeration<String> e = parmNames; e.hasMoreElements();) {
        String parmName = e.nextElement();
        String vals[] = request.getParameterValues(parmName);
        for (int j = 0; j < vals.length; ++j) {
            if (name.equals(parmName)) {
                logger.debug("parmName:" + parmName + " equals:" + name + " , return " + vals[j]);
                return vals[j];
            }
        }
    }
    return null;
}

From source file:com.esri.gpt.control.rest.search.DistributedSearchServlet.java

/**
 * Gets the RIDs specified within the request.
 * //from ww  w. j a va  2s.c o  m
 * @param request
 *          the HTTP request
 * @return the RIDs
 */
@SuppressWarnings("unchecked")
private StringSet getRids(HttpServletRequest request) {
    StringSet rids = new StringSet();
    Map<String, String[]> requestParameterMap = request.getParameterMap();
    for (Map.Entry<String, String[]> e : requestParameterMap.entrySet()) {
        if (e.getKey().equalsIgnoreCase("rids")) {
            String[] values = e.getValue();
            if (values != null) {
                for (String tokens : values) {
                    StringTokenizer st = new StringTokenizer(tokens, ",");
                    while (st.hasMoreElements()) {
                        String value = Val.chkStr((String) st.nextElement());
                        if (value.length() > 0) {
                            try {
                                rids.add(URLDecoder.decode(value, "UTF-8"));
                            } catch (UnsupportedEncodingException e1) {
                                // Should never happen
                                LOG.log(Level.WARNING, "Could not decde uuid", e1);
                            }
                        }
                    }
                }
            }
        }
    }

    String arrRids[] = request.getParameterValues("rid");
    for (int i = 0; arrRids != null && i < arrRids.length; i++) {
        rids.add(arrRids[i]);
    }
    return rids;
}

From source file:gallery.web.controller.cms.CacheControlCms.java

@Override
public ModelAndView handleRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
    //TODO: remake
    String do_param = request.getParameter(DO_PARAM);
    if (PARAM_RUBRICATOR_CLEAR.equals(do_param)) {
        rubrication_service.clearCache();
        common.CommonAttributes.addHelpMessage("operation_succeed", request);
    } else if (PARAM_RUBRICATOR_REFRESH.equals(do_param)) {
        rubrication_service.refreshCache();
        common.CommonAttributes.addHelpMessage("operation_succeed", request);
    } else if (PARAM_RUBRICATOR_IMAGE_CLEAR.equals(do_param)) {
        if (rubric_image_service.clearImages(null)) {
            common.CommonAttributes.addHelpMessage("operation_succeed", request);
        } else {/*w  w  w .  j  a v a 2 s . c  o m*/
            common.CommonAttributes.addErrorMessage("operation_fail", request);
        }
    } else if (PARAM_RUBRICATOR_IMAGE_REFRESH.equals(do_param)) {
        if (rubric_image_service.refreshImages(null)) {
            common.CommonAttributes.addHelpMessage("operation_succeed", request);
        } else {
            common.CommonAttributes.addErrorMessage("operation_fail", request);
        }
    }
    if (PARAM_REGION_CLEAR.equals(do_param)) {
        String[] names = request.getParameterValues(PARAM_REGION_NAME);
        Map<String, Boolean> result = new HashMap<String, Boolean>();
        for (String name : names) {
            if (name != null) {
                Ehcache cache = cacheManager.getEhcache(name);
                if (cache == null) {
                    result.put(name, Boolean.FALSE);
                    common.CommonAttributes.addErrorMessage("operation_fail", request);
                } else {
                    result.put(name, Boolean.TRUE);
                    cache.removeAll();
                    common.CommonAttributes.addHelpMessage("operation_succeed", request);
                }
            }
        }
        request.setAttribute(config.getContentDataAttribute(), result);
    }
    request.setAttribute(config.getContentUrlAttribute(), content_url);
    request.setAttribute(config.getNavigationUrlAttribute(), navigation_url);

    request.setAttribute("title", " ");
    request.setAttribute("top_header", " ");

    return new ModelAndView(config.getTemplateUrl());
}

From source file:org.hdiv.filter.ValidatorHelperRequest.java

/**
 * It validates the parameters of an init page because our application can receive requests that require validation
 * but don't have any HDIV state. So, despite being init pages, editable data validation must be done.
 * //ww w . ja v a 2  s.c  om
 * @param request
 *            HttpServletRequest to validate
 * @param target
 *            Part of the url that represents the target action
 * @return valid result if the values of the editable parameters pass the validations defined in hdiv-config.xml.
 *         False otherwise.
 * @since HDIV 1.1.2
 */
public ValidatorHelperResult validateStartPageParameters(HttpServletRequest request, String target) {

    if (this.hdivConfig.existValidations()) {

        Hashtable unauthorizedEditableParameters = new Hashtable();

        Enumeration parameters = request.getParameterNames();
        while (parameters.hasMoreElements()) {

            String parameter = (String) parameters.nextElement();
            String[] values = request.getParameterValues(parameter);

            this.validateEditableParameter(request, target, parameter, values, "text",
                    unauthorizedEditableParameters);

        }

        if (unauthorizedEditableParameters.size() > 0) {

            return this.processValidateParameterErrors(request, unauthorizedEditableParameters);
        }
    }
    return ValidatorHelperResult.VALID;
}

From source file:com.jada.content.template.TemplateEngine.java

protected ContentFilterBean[] getContentFilterBeans(HttpServletRequest request) throws Exception {
    Vector<ContentFilterBean> vector = new Vector<ContentFilterBean>();
    String filters[] = request.getParameterValues("filter");
    if (filters != null) {
        for (String filter : filters) {
            int pos = filter.indexOf(',');
            String customAttribId = filter.substring(0, pos);
            String customAttribOptionId = filter.substring(pos + 1);
            ContentFilterBean bean = new ContentFilterBean();
            bean.setCustomAttribId(Long.valueOf(customAttribId));
            bean.setCustomAttribOptionId(Long.valueOf(customAttribOptionId));
            CustomAttribute customAttribute = CustomAttributeDAO.load(siteDomain.getSite().getSiteId(),
                    Long.valueOf(customAttribId));
            bean.setSystemRecord(customAttribute.getSystemRecord());
            vector.add(bean);//from   www  .j  a v  a 2 s . c  o  m
        }
    }
    ContentFilterBean contentFilterBeans[] = new ContentFilterBean[vector.size()];
    vector.copyInto(contentFilterBeans);
    return contentFilterBeans;
}