List of usage examples for org.apache.commons.collections.map ListOrderedMap decorate
public static OrderedMap decorate(Map map)
From source file:com.cyclopsgroup.waterview.web.taglib.SelectTag.java
/** * Overwrite or implement method processTag() * * @see com.cyclopsgroup.waterview.utils.TagSupportBase#processTag(org.apache.commons.jelly.XMLOutput) *//*from w w w .j a v a 2 s .co m*/ protected void processTag(XMLOutput output) throws Exception { FieldTag fieldTag = (FieldTag) requireParent(FieldTag.class); options = ListOrderedMap.decorate(new HashMap()); invokeBody(output); if (getItems() != null) { Iterator i = Collections.EMPTY_LIST.iterator(); if (TypeUtils.isIteratable(getItems())) { i = TypeUtils.iterate(getItems()); } else if (getItems() instanceof Map) { i = ((Map) getItems()).entrySet().iterator(); } while (i.hasNext()) { Object item = i.next(); SelectOption option = null; if (item instanceof SelectOption) { option = (SelectOption) item; } else if (item instanceof Map.Entry) { Map.Entry e = (Map.Entry) item; String name = TypeUtils.toString(e.getKey()); option = new DefaultSelectOption(name, TypeUtils.toString(e.getValue())); } else { String name = TypeUtils.toString(item); option = new DefaultSelectOption(name, name); } addOption(option); } } JellyEngine je = (JellyEngine) getServiceManager().lookup(JellyEngine.ROLE); final Script script = je.getScript("/waterview/FormSelectInput.jelly"); Script s = new Script() { public Script compile() throws JellyException { return this; } public void run(JellyContext context, XMLOutput output) throws JellyTagException { context.setVariable("selectTag", SelectTag.this); script.run(context, output); } }; fieldTag.setBodyScript(s); }
From source file:com.cyclopsgroup.waterview.tool.PopulateToolsValve.java
/** * Override or implement method of parent class or interface * /* www . j a va 2s . c om*/ * @see org.apache.avalon.framework.configuration.Configurable#configure(org.apache.avalon.framework.configuration.Configuration) */ public void configure(Configuration conf) throws ConfigurationException { int repoSize = conf.getChild("application-tools").getAttributeAsInteger("size", -1); if (repoSize <= 0) { applicationTools = new Hashtable(); } else { applicationTools = new LRUMap(repoSize); } Configuration[] toolConfs = conf.getChild("tools").getChildren("tool"); toolDefinitions = ListOrderedMap.decorate(new Hashtable(toolConfs.length)); for (int i = 0; i < toolConfs.length; i++) { Configuration toolConf = toolConfs[i]; ToolDef def = new ToolDef(toolConf); toolDefinitions.put(def.name, def); } }
From source file:com.openedit.BaseWebPageRequest.java
public Map getParameterMap() { if (getRequest() != null) { Map combinedparams = null; String[] ordering = getRequest().getParameterValues("fieldorder"); Enumeration enumeration = getRequest().getParameterNames(); if (ordering == null) { combinedparams = new HashMap(); //unsorted } else {/*from ww w . j a va 2 s .co m*/ combinedparams = ListOrderedMap.decorate(new HashMap()); //replace the enumartion with a sorted one for (int i = 0; i < ordering.length; i++) { String[] allv = getRequest().getParameterValues(ordering[i]); if (allv != null && allv.length == 1) { combinedparams.put(ordering[i], allv[0]); } else { combinedparams.put(ordering[i], allv); } } } while (enumeration.hasMoreElements()) { String key = (String) enumeration.nextElement(); if (ordering != null && combinedparams.containsKey(key)) { continue; //Skip if already in there } String[] allv = getRequest().getParameterValues(key); if (allv != null && allv.length == 1) { combinedparams.put(key, allv[0]); } else { combinedparams.put(key, allv); } } if (ordering != null) { combinedparams.remove("fieldorder"); } Map locals = getAllLocalParameters(); combinedparams.putAll(locals); //get json stuff Map jsonRequest = (Map) getPageValue("_jsonRequest"); if (jsonRequest != null) { combinedparams.putAll(jsonRequest); } return combinedparams; } else { Map locals = getAllLocalParameters(); return locals; } }
From source file:org.iplantc.persondir.support.ldap.AttributesMapperImpl.java
/** * Create a Map instance to be used as attribute map. * <br/>/*w ww.j ava 2s. com*/ * By default, a linked case-insensitive Map will be created * * @param attributeCount the attribute count, to be used as initial capacity for the Map * @return the new Map instance */ @SuppressWarnings("unchecked") protected Map<String, Object> createAttributeMap(int attributeCount) { return ListOrderedMap.decorate(new CaseInsensitiveMap(attributeCount > 0 ? attributeCount : 1)); }
From source file:org.jasig.cas.ticket.registry.support.kryo.KryoTranscoderTests.java
@Test public void verifyEncodeDecodeTGTWithListOrderedMap() throws Exception { final Credential userPassCredential = new UsernamePasswordCredential(USERNAME, PASSWORD); @SuppressWarnings("unchecked") final TicketGrantingTicket expectedTGT = new MockTicketGrantingTicket(TGT_ID, userPassCredential, ListOrderedMap.decorate(this.principalAttributes)); expectedTGT.grantServiceTicket(ST_ID, null, null, false); assertEquals(expectedTGT, transcoder.decode(transcoder.encode(expectedTGT))); }
From source file:org.jasig.services.persondir.support.CaseInsensitiveAttributeNamedPersonImpl.java
@SuppressWarnings("unchecked") @Override//from w w w .j a va 2 s .c o m protected Map<String, List<Object>> createImmutableAttributeMap(int size) { return ListOrderedMap.decorate(new CaseInsensitiveMap(size > 0 ? size : 1)); }
From source file:org.jasig.services.persondir.support.jdbc.ColumnMapParameterizedRowMapper.java
/** * Create a Map instance to be used as column map. * <br/>//from w w w.jav a2 s . c o m * By default, a linked case-insensitive Map will be created * * @param columnCount the column count, to be used as initial capacity for the Map * @return the new Map instance */ @SuppressWarnings("unchecked") protected Map<String, Object> createColumnMap(int columnCount) { return ListOrderedMap.decorate(new CaseInsensitiveMap(columnCount > 0 ? columnCount : 1)); }
From source file:org.pentaho.commons.connection.PentahoDataTransmuter.java
/** * This method takes a column of data, and turns it into multiple columns based on the values within the column. The * measure column specified is then distributed among the newly created columns. Sparse data is handled by populating * missing cells with nulls. This version of the method also takes two additional parameters - the column to sort the * new columns by, and a formatter for that column. * /*from w w w. j a v a2 s . co m*/ * @param source * The starting IPentahoResultSet * @param columnToPivot * The column that becomes multiple columns * @param measureColumn * The measures column to distribute to the new columns created * @param columnToSortColumnsBy * The column to use to sort the newly created columns by * @param pivotDataFormatter * If the column to pivot requires formatting, this is the formatter to use * @param sortDataFormatter * The formatter to use to convert the sort column to a string * @param orderedMaps * If true, will sort the new column names alphabetically. If false, the colums will be created in the order * of appearance in the rows. * @return IPentahoResultSet containing crosstabbed data. * * @author mbatchelor * * Assumptions: a- This only works with one dimension going across. This won't do multi-level crosstabbing. * * b- All column numbers given are assumed to be ZERO based. So, the first column is 0. * * c- If a columnToSortColumnsBy column is specified (>=0), the orderedMaps flag will be set to true * regardless of the passed in value. * * d- For now, we assume that the column to sort by is removed from the dataset. In the future, this will not * be an assumption. * * TODO: Update method to make removal of sort-by column optional. * * Example: * * Starting Resultset ================== Month|Vendor|Rank|Counts Jan |A-A-A |2 |92 Jan |Acme |3 |200 Jan * |Ajax |4 |163 Feb |Acme |3 |27 Feb |Ajax |4 |102 Mar |Donn |1 |427 Mar |A-A-A|2 |301 Mar |Acme |3 |82 * * Could Become ============ parameters: (startingResultSet, 1, 3, 2, null, decimalFormatObject, true) * * Month|Donn|A-A-A|Acme|Ajax Jan |null|92 |200 |163 Feb |null|null |27 |102 Mar |427 |301 |82 |null * * */ @SuppressWarnings({ "unchecked" }) public static IPentahoResultSet crossTab(IPentahoResultSet source, int columnToPivot, int measureColumn, int columnToSortColumnsBy, Format pivotDataFormatter, Format sortDataFormatter, boolean orderedMaps) { // System.out.println("*********************Before********************"); // System.out.println(dump(source)); // First, do some error checking... if (source == null) { throw new IllegalArgumentException(Messages.getString("PentahoDataTransmuter.ERROR_0002_NULL_DATASET")); //$NON-NLS-1$ } int sourceColumnCount = source.getColumnCount(); if (columnToPivot > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0003_INVALID_PIVOT_COLUMN")); //$NON-NLS-1$ } if (measureColumn > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0004_INVALID_MEASURE_COLUMN")); //$NON-NLS-1$ } if (columnToSortColumnsBy > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0005_INVALID_SORT_COLUMN")); //$NON-NLS-1$ } // Now, setup so variables and such final String sortPrefixSeparator = "\t"; //$NON-NLS-1$ Map<Object, Object> rowMap = null, newHeadersMap = null; // Force orderedMaps to true if we're sorting using a column in the // input. // See assumption 'c' in the comment-block above. if (columnToSortColumnsBy >= 0) { orderedMaps = true; } if (orderedMaps) { // If we're using ordered maps, then our maps become TreeMaps. rowMap = new TreeMap<Object, Object>(); // Map of the current row newHeadersMap = new TreeMap<Object, Object>(); // New header columns map } else { // Use Apache ListOrderedMap so that columns become ordered by their // position in the data. rowMap = ListOrderedMap.decorate(new HashMap()); newHeadersMap = ListOrderedMap.decorate(new HashMap()); } List<String> columnHeaders = new ArrayList<String>(); // All column headers // Create column headers of the known columns IPentahoMetaData origMetaData = source.getMetaData(); Object[][] origColHeaders = origMetaData.getColumnHeaders(); for (int i = 0; i < origColHeaders[0].length; i++) { if ((i != columnToPivot) && (i != measureColumn)) { columnHeaders.add(origColHeaders[0][i].toString()); } } // Now, we have the starting column headers. Time to start iterating // over the data. Object colPivotData, colMeasureData, cellData, colToSortByRaw; Object[] rowData = source.next(); String columnPrefix = null; Map<Object, Object> currentMap = null; while (rowData != null) { colPivotData = rowData[columnToPivot]; // The data we're pivoting to columns if (colPivotData == null) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0006_CANNOT_PIVOT_NULL_DATA")); //$NON-NLS-1$ } colMeasureData = rowData[measureColumn]; // The value data we're // using as the final set. if (columnToSortColumnsBy >= 0) { colToSortByRaw = rowData[columnToSortColumnsBy]; if (colToSortByRaw == null) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0007_CANNOT_SORT_NULL_DATA")); //$NON-NLS-1$ } if (sortDataFormatter != null) { columnPrefix = sortDataFormatter.format(colToSortByRaw); } else { columnPrefix = colToSortByRaw.toString(); } } currentMap = rowMap; // Start at the top... for (int i = 0; i < rowData.length; i++) { if ((i != columnToPivot) && (i != measureColumn) && (i != columnToSortColumnsBy)) { // I'm on a data row (like a column header). Find it in the // row map. cellData = currentMap.get(rowData[i]); if (cellData == null) { // Add the column to the current map of maps Map newColumnMap = null; if (orderedMaps) { newColumnMap = new TreeMap(); } else { newColumnMap = ListOrderedMap.decorate(new HashMap()); } currentMap.put(rowData[i], newColumnMap); currentMap = newColumnMap; } else { // Found something here - it should be a map. currentMap = (Map) cellData; } } } // Done iterating over columns creating other columns. Now, create // (or locate) pivoted data as a column String formattedPivotData = null; if (pivotDataFormatter != null) { if (pivotDataFormatter instanceof MessageFormat) { formattedPivotData = pivotDataFormatter.format(new Object[] { colPivotData }); } else { formattedPivotData = pivotDataFormatter.format(colPivotData); } } else { formattedPivotData = colPivotData.toString(); } // Do column sorting based on another input column. if (columnToSortColumnsBy >= 0) { formattedPivotData = columnPrefix + sortPrefixSeparator + formattedPivotData; } // For this row, look for the pivoted data in newHeaders. Object header = newHeadersMap.get(formattedPivotData); if (header == null) { // Create a map containing just the new column headers newHeadersMap.put(formattedPivotData, ""); //$NON-NLS-1$ } // Put the measure data in the final spot in the map currentMap.put(formattedPivotData, colMeasureData); // Get next row rowData = source.next(); } // Add the new headers to the columnHeaders list Iterator hIt = newHeadersMap.keySet().iterator(); while (hIt.hasNext()) { columnHeaders.add(hIt.next().toString()); } // Create each individual row ArrayList rows = new ArrayList(); // The uniqueItems collections allows me to handle null/missing values Collection uniqueItems = rowMap.keySet(); // For each unique item outer-column, iterate and create the rows // recursively Iterator it = uniqueItems.iterator(); List newCurRow = new ArrayList(); while (it.hasNext()) { // Iterate over each unique value in the outermost map recurseCreateRow(it.next(), rowMap, rows, newCurRow, newHeadersMap); newCurRow.clear(); } // Now, if there was a sort-column specified, we need to remove the // prefix from the // column header before creating the final set of headers. if (columnToSortColumnsBy >= 0) { String aHeader; int tabIdx; for (int i = 0; i < columnHeaders.size(); i++) { aHeader = columnHeaders.get(i); tabIdx = aHeader.indexOf(sortPrefixSeparator); if (tabIdx >= 0) { columnHeaders.set(i, aHeader.substring(tabIdx + 1)); } } } // Create the final resultset. IPentahoResultSet result = MemoryResultSet.createFromLists(columnHeaders, rows); // System.out.println("*************************After***********************"); // System.out.println(dump(result)); return result; }
From source file:org.pentaho.commons.connection.PentahoDataTransmuter.java
@SuppressWarnings({ "unchecked" }) public static IPentahoResultSet crossTabOrdered(IPentahoResultSet source, int columnToPivot, int measureColumn, int columnToSortColumnsBy, Format pivotDataFormatter, Format sortDataFormatter, boolean orderedMaps, int uniqueRowIdentifierColumn) { // System.out.println("*********************Before********************"); // System.out.println(dump(source)); // First, do some error checking... if (source == null) { throw new IllegalArgumentException(Messages.getString("PentahoDataTransmuter.ERROR_0002_NULL_DATASET")); //$NON-NLS-1$ }/*from w ww.j a v a 2s . c om*/ int sourceColumnCount = source.getColumnCount(); if (columnToPivot > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0003_INVALID_PIVOT_COLUMN")); //$NON-NLS-1$ } if (measureColumn > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0004_INVALID_MEASURE_COLUMN")); //$NON-NLS-1$ } if (columnToSortColumnsBy > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0005_INVALID_SORT_COLUMN")); //$NON-NLS-1$ } if (uniqueRowIdentifierColumn > sourceColumnCount) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0008_INVALID_UNIQUE_COLUMN")); //$NON-NLS-1$ } // Now, setup so variables and such final String sortPrefixSeparator = "\t"; //$NON-NLS-1$ Map newHeadersMap = null; Map uniqueColumnIdentifierMap = null; if (uniqueRowIdentifierColumn >= 0) { uniqueColumnIdentifierMap = new HashMap(); } int uniqueRowIdentifierColumnPostShift = -1; // Force orderedMaps to true if we're sorting using a column in the // input. // See assumption 'c' in the comment-block above. if (columnToSortColumnsBy >= 0) { orderedMaps = true; } if (orderedMaps) { // If we're using ordered maps, then our maps become TreeMaps. newHeadersMap = new TreeMap(); // New header columns map } else { // Use Apache ListOrderedMap so that columns become ordered by their // position in the data. newHeadersMap = ListOrderedMap.decorate(new HashMap()); } List columnHeaders = new ArrayList(); // All column headers // Create column headers of the known columns IPentahoMetaData origMetaData = source.getMetaData(); Object[][] origColHeaders = origMetaData.getColumnHeaders(); for (int i = 0; i < origColHeaders[0].length; i++) { if ((i != columnToPivot) && (i != measureColumn) && ((i != columnToSortColumnsBy))) { columnHeaders.add(origColHeaders[0][i].toString()); if (i == uniqueRowIdentifierColumn) { uniqueRowIdentifierColumnPostShift = columnHeaders.size() - 1; } } } int baseColumnsCount = columnHeaders.size(); // Now, we have the starting column headers. Time to start iterating // over the data. Object colPivotData, colMeasureData, colToSortByRaw; Object[] rowData = source.next(); String columnPrefix = null; /* * First, find out what the new columns will be - this will traverse the dataset gathering the unique values for the * column containing the values that will become the new columns. */ Map newColumnHeadersRaw = new HashMap(); Integer placeHolder = new Integer(0); while (rowData != null) { colPivotData = rowData[columnToPivot]; // The data we're pivoting to columns if (colPivotData == null) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0006_CANNOT_PIVOT_NULL_DATA")); //$NON-NLS-1$ } // newColumnHeadersRaw.add(colPivotData); if (columnToSortColumnsBy >= 0) { colToSortByRaw = rowData[columnToSortColumnsBy]; if (colToSortByRaw == null) { throw new IllegalArgumentException( Messages.getString("PentahoDataTransmuter.ERROR_0007_CANNOT_SORT_NULL_DATA")); //$NON-NLS-1$ } if (sortDataFormatter != null) { columnPrefix = sortDataFormatter.format(colToSortByRaw); } else { columnPrefix = colToSortByRaw.toString(); } } if (!newColumnHeadersRaw.containsKey(colPivotData)) { newColumnHeadersRaw.put(colPivotData, placeHolder); // Do column sorting based on another input column. String formattedPivotData = formatPivotData(colPivotData, pivotDataFormatter); if (columnToSortColumnsBy >= 0) { formattedPivotData = columnPrefix + sortPrefixSeparator + formattedPivotData; } newHeadersMap.put(formattedPivotData, colPivotData); } rowData = source.next(); } source.beforeFirst(); // Now, we have all the new headers. Next, update the rawHeaders with the // target column number. Iterator it = newHeadersMap.entrySet().iterator(); int columnIndex = columnHeaders.size(); // start adding columns where the fixed columns leave off. while (it.hasNext()) { Map.Entry me = (Map.Entry) it.next(); newColumnHeadersRaw.put(me.getValue(), new Integer(columnIndex)); columnHeaders.add(formatPivotData(me.getValue(), pivotDataFormatter)); columnIndex++; } // OK - we now know the new column headers, and the place they'll // appear in all the rows. Now, it's time to construct each row. int columnCount = columnHeaders.size(); int rowPos; MemoryResultSet mrs = new MemoryResultSet(); MemoryMetaData md = new MemoryMetaData(columnHeaders); mrs.setMetaData(md); Object[] thisRow = new Object[baseColumnsCount]; Object[] currentRow = new Object[columnCount]; rowData = source.next(); boolean isFirstRow = true; while (rowData != null) { colMeasureData = rowData[measureColumn]; // The value data we're rowPos = 0; for (int i = 0; i < rowData.length; i++) { if ((i != columnToPivot) && (i != measureColumn) && (i != columnToSortColumnsBy)) { // This is data - put it in the correct spot in the row thisRow[rowPos] = rowData[i]; rowPos++; } } // OK - we got the base data. Is this a new row, or a continuation // of the previous row. boolean newRow = true; Object uniqueRowIdentifierValue = null; Integer previousRowNumber = null; // First, did they provide us with a hint. if (uniqueRowIdentifierColumn >= 0) { uniqueRowIdentifierValue = rowData[uniqueRowIdentifierColumn] != null ? rowData[uniqueRowIdentifierColumn] : "_NULL_VALUE_"; //$NON-NLS-1$ previousRowNumber = (Integer) uniqueColumnIdentifierMap.get(uniqueRowIdentifierValue); if (previousRowNumber != null) { addIfNeeded(currentRow, mrs, uniqueColumnIdentifierMap, uniqueRowIdentifierColumnPostShift); currentRow = mrs.getDataRow(previousRowNumber.intValue()); newRow = false; } } newRow = (newRow && !isFirstRow && isNewRow(thisRow, currentRow)); if (newRow) { addIfNeeded(currentRow, mrs, uniqueColumnIdentifierMap, uniqueRowIdentifierColumnPostShift); // Create new current row - the row inprogress. currentRow = new Object[columnCount]; // Now, copy thisRow to currentRow. System.arraycopy(thisRow, 0, currentRow, 0, thisRow.length); } else if (isFirstRow) { System.arraycopy(thisRow, 0, currentRow, 0, thisRow.length); } isFirstRow = false; colPivotData = rowData[columnToPivot]; // The data we're pivoting to columns Integer targetColumn = (Integer) newColumnHeadersRaw.get(colPivotData); currentRow[targetColumn.intValue()] = colMeasureData; // Get next row rowData = source.next(); } addIfNeeded(currentRow, mrs, uniqueColumnIdentifierMap, uniqueRowIdentifierColumnPostShift); // System.out.println("*************************After***********************"); // System.out.println(dump(mrs)); return mrs; }