List of usage examples for org.apache.mahout.common Parameters Parameters
public Parameters()
From source file:com.cg.mapreduce.fpgrowth.mahout.fpm.FPGrowthDriver.java
License:Apache License
/** * Run TopK FPGrowth given the input file, *//* www. ja v a 2 s. c o m*/ @Override public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("minSupport", "s", "(Optional) The minimum number of times a co-occurrence must be present." + " Default Value: 3", "3"); addOption("maxHeapSize", "k", "(Optional) Maximum Heap Size k, to denote the requirement to mine top K items." + " Default value: 50", "50"); addOption("numGroups", "g", "(Optional) Number of groups the features should be divided in the map-reduce version." + " Doesn't work in sequential version Default Value:" + PFPGrowth.NUM_GROUPS_DEFAULT, Integer.toString(PFPGrowth.NUM_GROUPS_DEFAULT)); addOption("splitterPattern", "regex", "Regular Expression pattern used to split given string transaction into" + " itemsets. Default value splits comma separated itemsets. Default Value:" + " \"[ ,\\t]*[,|\\t][ ,\\t]*\" ", "[ ,\t]*[,|\t][ ,\t]*"); addOption("numTreeCacheEntries", "tc", "(Optional) Number of entries in the tree cache to prevent duplicate" + " tree building. (Warning) a first level conditional FP-Tree might consume a lot of memory, " + "so keep this value small, but big enough to prevent duplicate tree building. " + "Default Value:5 Recommended Values: [5-10]", "5"); addOption("method", "method", "Method of processing: sequential|mapreduce", "sequential"); addOption("encoding", "e", "(Optional) The file encoding. Default value: UTF-8", "UTF-8"); addFlag("useFPG2", "2", "Use an alternate FPG implementation"); if (parseArguments(args) == null) { return -1; } Parameters params = new Parameters(); if (hasOption("minSupport")) { String minSupportString = getOption("minSupport"); params.set("minSupport", minSupportString); } if (hasOption("maxHeapSize")) { String maxHeapSizeString = getOption("maxHeapSize"); params.set("maxHeapSize", maxHeapSizeString); } if (hasOption("numGroups")) { String numGroupsString = getOption("numGroups"); params.set("numGroups", numGroupsString); } if (hasOption("numTreeCacheEntries")) { String numTreeCacheString = getOption("numTreeCacheEntries"); params.set("treeCacheSize", numTreeCacheString); } if (hasOption("splitterPattern")) { String patternString = getOption("splitterPattern"); params.set("splitPattern", patternString); } String encoding = "UTF-8"; if (hasOption("encoding")) { encoding = getOption("encoding"); } params.set("encoding", encoding); if (hasOption("useFPG2")) { params.set(PFPGrowth.USE_FPG2, "true"); } Path inputDir = getInputPath(); Path outputDir = getOutputPath(); params.set("input", inputDir.toString()); params.set("output", outputDir.toString()); String classificationMethod = getOption("method"); if ("sequential".equalsIgnoreCase(classificationMethod)) { runFPGrowth(params); } else if ("mapreduce".equalsIgnoreCase(classificationMethod)) { Configuration conf = new Configuration(); HadoopUtil.delete(conf, outputDir); PFPGrowth.runPFPGrowth(params); } return 0; }
From source file:com.cg.mapreduce.myfpgrowth.FPGrowthDriver.java
License:Apache License
/** * Run TopK FPGrowth given the input file, *///w ww . ja v a 2s .co m @Override public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("minSupport", "s", "(Optional) The minimum number of times a co-occurrence must be present." + " Default Value: 3", "3"); addOption("maxHeapSize", "k", "(Optional) Maximum Heap Size k, to denote the requirement to mine top K items." + " Default value: 50", "50"); addOption("numGroups", "g", "(Optional) Number of groups the features should be divided in the map-reduce version." + " Doesn't work in sequential version Default Value:" + PFPGrowth.NUM_GROUPS_DEFAULT, Integer.toString(PFPGrowth.NUM_GROUPS_DEFAULT)); addOption("splitterPattern", "regex", "Regular Expression pattern used to split given string transaction into" + " itemsets. Default value splits comma separated itemsets. Default Value:" + " \"[ ,\\t]*[,|\\t][ ,\\t]*\" ", "[ ,\t]*[,|\t][ ,\t]*"); addOption("numTreeCacheEntries", "tc", "(Optional) Number of entries in the tree cache to prevent duplicate" + " tree building. (Warning) a first level conditional FP-Tree might consume a lot of memory, " + "so keep this value small, but big enough to prevent duplicate tree building. " + "Default Value:5 Recommended Values: [5-10]", "5"); addOption("method", "method", "Method of processing: sequential|mapreduce", "sequential"); addOption("encoding", "e", "(Optional) The file encoding. Default value: UTF-8", "UTF-8"); addFlag("useFPG2", "2", "Use an alternate FPG implementation"); if (parseArguments(args) == null) { return -1; } Parameters params = new Parameters(); if (hasOption("minSupport")) { String minSupportString = getOption("minSupport"); params.set("minSupport", minSupportString); } if (hasOption("maxHeapSize")) { String maxHeapSizeString = getOption("maxHeapSize"); params.set("maxHeapSize", maxHeapSizeString); } if (hasOption("numGroups")) { String numGroupsString = getOption("numGroups"); params.set("numGroups", numGroupsString); } if (hasOption("numTreeCacheEntries")) { String numTreeCacheString = getOption("numTreeCacheEntries"); params.set("treeCacheSize", numTreeCacheString); } if (hasOption("splitterPattern")) { String patternString = getOption("splitterPattern"); params.set("splitPattern", patternString); } String encoding = "UTF-8"; if (hasOption("encoding")) { encoding = getOption("encoding"); } params.set("encoding", encoding); if (hasOption("useFPG2")) { params.set(PFPGrowth.USE_FPG2, "true"); } Path inputDir = getInputPath(); Path outputDir = getOutputPath(); params.set("input", inputDir.toString()); params.set("output", outputDir.toString()); Configuration conf = new Configuration(); HadoopUtil.delete(conf, outputDir); PFPGrowth.runPFPGrowth(params); return 0; }
From source file:com.skp.experiment.fpm.pfpgrowth.FPGrowthDriver.java
License:Apache License
/** * Run TopK FPGrowth given the input file, *//*w w w. j av a2s. c o m*/ @Override public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("minSupport", "s", "(Optional) The minimum number of times a co-occurrence must be present." + " Default Value: 3", "3"); addOption("maxHeapSize", "k", "(Optional) Maximum Heap Size k, to denote the requirement to mine top K items." + " Default value: 50", "50"); addOption("numGroups", "g", "(Optional) Number of groups the features should be divided in the map-reduce version." + " Doesn't work in sequential version Default Value:" + PFPGrowth.NUM_GROUPS_DEFAULT, Integer.toString(PFPGrowth.NUM_GROUPS_DEFAULT)); addOption("splitterPattern", "regex", "Regular Expression pattern used to split given string transaction into" + " itemsets. Default value splits comma separated itemsets. Default Value:" + " \"[ ,\\t]*[,|\\t][ ,\\t]*\" ", "[ ,\t]*[,|\t][ ,\t]*"); addOption("numTreeCacheEntries", "tc", "(Optional) Number of entries in the tree cache to prevent duplicate" + " tree building. (Warning) a first level conditional FP-Tree might consume a lot of memory, " + "so keep this value small, but big enough to prevent duplicate tree building. " + "Default Value:5 Recommended Values: [5-10]", "5"); addOption("method", "method", "Method of processing: sequential|mapreduce", "sequential"); addOption("encoding", "e", "(Optional) The file encoding. Default value: UTF-8", "UTF-8"); addFlag("useFPG2", "2", "Use an alternate FPG implementation"); if (parseArguments(args) == null) { return -1; } Parameters params = new Parameters(); if (hasOption("minSupport")) { String minSupportString = getOption("minSupport"); params.set("minSupport", minSupportString); } if (hasOption("maxHeapSize")) { String maxHeapSizeString = getOption("maxHeapSize"); params.set("maxHeapSize", maxHeapSizeString); } if (hasOption("numGroups")) { String numGroupsString = getOption("numGroups"); params.set("numGroups", numGroupsString); } if (hasOption("numTreeCacheEntries")) { String numTreeCacheString = getOption("numTreeCacheEntries"); params.set("treeCacheSize", numTreeCacheString); } if (hasOption("splitterPattern")) { String patternString = getOption("splitterPattern"); params.set("splitPattern", patternString); } String encoding = "UTF-8"; if (hasOption("encoding")) { encoding = getOption("encoding"); } params.set("encoding", encoding); if (hasOption("useFPG2")) { params.set(PFPGrowth.USE_FPG2, "true"); } Path inputDir = getInputPath(); Path outputDir = getOutputPath(); params.set("input", inputDir.toString()); params.set("output", outputDir.toString()); String classificationMethod = getOption("method"); if ("sequential".equalsIgnoreCase(classificationMethod)) { runFPGrowth(params); } else if ("mapreduce".equalsIgnoreCase(classificationMethod)) { //Configuration conf = new Configuration(); HadoopUtil.delete(getConf(), outputDir); PFPGrowth.runPFPGrowth(getConf(), params); } return 0; }
From source file:de.mpii.fsm.driver.FsmDriver.java
License:Apache License
/** * (non-Javadoc)/* w ww . j av a2 s. c om*/ * @see org.apache.hadoop.util.Tool#run(java.lang.String[]) * * Add the appropriate options here. Execute the MG-FSM algorithm * according to the parameters specified at run time. * * @param String[] args * @return int */ @Override public int run(String[] args) throws Exception { /* Here parameters that will be available to the user * during run time are specified and intialized. */ /* Hadooop-config options */ addOutputOption(); /*User-interesting options*/ addOption("input", "i", "(Optional) Specify the path from where the input is to be read" + "\n NOTE: This option can not be used with -(r)esume option.", null); addOption("support", "s", "(Optional) Minimum support (sigma) " + "\nDefault Value: 1\n", FsmConfig.SIGMA_DEFAULT_STRING); addOption("gamma", "g", "(Optional) Maximum allowed for mining frequent sequences (gamma)" + " by MG-FSM " + "\nDefault Value: 2\n", FsmConfig.GAMMA_DEFAULT_STRING); addOption("lambda", "l", "(Optional) Maximum length for mining frequent sequences (lambda)" + "\nDefault Value: 5\n", FsmConfig.LAMBDA_DEFAULT_STRING); addOption("execMode", "m", "Method of execution viz. s -(s)equential or d -(d)istributed" + "\nDefault Value: (s)-sequential\n", FsmConfig.DEFAULT_EXEC_MODE); addOption("type", "t", "(Optional) Specify the mining mode." + "\nExpected values for input:" + "\n1. a -(a)ll\n2. m -(m)aximal \n3. c -(c)losed" + "\nDefault Value : a -(a)ll\n", FsmConfig.DEFAULT_TYPE); /* keepFiles default value is null. * It will be set to a temporary location, in case * no path is specified.*/ addOption("keepFiles", "k", "(Optional) Keep the intermediary files " + "for later use or runs. The files stored are:" + "\n1. Dictionary \n2. Encoded Sequences \n " + "Specify the intermediate path where to keep these files :", null); /* resume points to the location where the * intermediary files are located*/ addOption("resume", "r", "(Optional) Resume running further " + "runs of the MG-FSM algorithm on" + " already encoded transaction file located in the folder specified in input.\n", null); /*Developer-interesting options*/ addOption("partitionSize", "p", "(Optional) Explicitly specify the partition size." + "\nDefault Value: 10000", FsmConfig.DEFAULT_PARTITION_SIZE); addOption("indexing", "id", "(Optional) Specify the indexing mode." + "\nExpected values for input:" + "\n1. none\n2. minmax \n3. full" + "\nDefault Value : full\n", FsmConfig.DEFAULT_INDEXING_METHOD); /* split flag is false by default*/ addFlag("split", "sp", "(Optional) Explicitly specify " + "whether or not to allow split by setting this flag."); addOption("numReducers", "N", "(Optional) Number of reducers to be used by MG-FSM. Default value: 90 ", "90"); /*------------------------------------------------------------ * ERROR CHECKS *------------------------------------------------------------*/ /* Parse the arguments received from * the user during run-time.*/ if (parseArguments(args) == null) { System.out.println("\n------------\n" + " E R R O R " + "\n------------\n"); System.out.println("One of the mandatory options is NOT specified"); System.out.println("e.g. the input option MUST be specified."); //Return a non-zero exit status to indicate failure return 1; } Parameters params = new Parameters(); if (hasOption("tempDir")) { String tempDirPath = getOption("tempDir"); params.set("tempDir", tempDirPath); } if (hasOption("input")) { String inputString = getOption("input"); params.set("input", inputString); } else { params.set("input", null); } if (hasOption("support")) { String supportString = getOption("support"); /* * Checks & constraints on the value that can * be assigned to support, gamma, & lambda. * * NOTE: refer [1] */ if (Integer.parseInt(supportString) < 1) { System.out.println("Value of support should be greater than or equal to 1"); //Return a non-zero exit status to indicate failure return (1); } params.set("support", supportString); } if (hasOption("gamma")) { String gammaString = getOption("gamma"); if (Integer.parseInt(gammaString) < 0) { System.out.println("Value of gap should be greater than or equal to 0"); //Return a non-zero exit status to indicate failure return (1); } params.set("gamma", gammaString); } if (hasOption("lambda")) { String lambdaString = getOption("lambda"); if (Integer.parseInt(lambdaString) < 2) { System.out.println("Value of length should be greater than or equal to 2"); //Return a non-zero exit status to indicate failure return (1); } params.set("lambda", lambdaString); } if (hasOption("execMode")) { String modeString = getOption("execMode"); params.set("execMode", modeString); } if (hasOption("type")) { String modeString = getOption("type"); params.set("type", modeString); } if (hasOption("indexing")) { String indexingString = getOption("indexing"); params.set("indexing", indexingString); } if (hasOption("partitionSize")) { String partitionString = getOption("partitionSize"); params.set("partitionSize", partitionString); } if (hasOption("split")) { params.set("split", "true"); } else { params.set("split", "false"); } if (hasOption("keepFiles")) { String keepFilesString = getOption("keepFiles"); params.set("keepFiles", keepFilesString); } else { params.set("keepFiles", null); } if (hasOption("resume")) { String resumeString = getOption("resume"); params.set("resume", resumeString); } else { params.set("resume", null); } if (hasOption("numReducers")) { String numReducersString = getOption("numReducers"); params.set("numReducers", numReducersString); } else { params.set("numReducers", null); } Path inputDir = null; Path outputDir = getOutputPath(); /* --------------------------------------------------------------------- * ERROR CHECKS ON COMBINATION OF OPTIONS SUPPLIED TO THE DRIVER * --------------------------------------------------------------------*/ //Complain if the '-(t)ype' is equal to '-(m)aximal' or '-(c)losed' and //the 'tempDir' is not specified /*if((params.get("tempDir")==null||params.get("tempDir").contentEquals("temp"))&& ((params.get("type").toCharArray()[0]=='m')||(params.get("type").toCharArray()[0]=='c'))){ System.out .println("If -(t)ype is -(m)aximal or -(c)losed then a -tempDir path must be specified"); }*/ if ((params.get("resume") != null) && (params.get("keepFiles") != null)) { System.out.println("-(r)esume & -(k)eepFiles are mutually exclusive options"); System.out.println("Exiting..."); //Return a non-zero exit status to indicate failure return (1); } if ((params.get("input") != null) && (params.get("resume") != null)) { System.out.println("-(r)esume & -(i)nput are mutually exclusive options"); System.out.println("Exiting..."); //Return a non-zero exit status to indicate failure return (1); } if ((params.get("input") == null) && (params.get("resume") == null)) { System.out.println("At least one option from -(i)nput or -(r)esume must be specified"); System.out.println("Exiting..."); //Return a non-zero exit status to indicate failure return (1); } else { if (params.get("input") != null) { inputDir = new Path(params.get("input")); } else { inputDir = new Path(params.get("resume")); } } /* --------------------------------------------------------------------- * Checks to make sure the i/o paths * exist and are consistent. * -------------------------------------------------------------------- */ Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); //If the output paths exist clean them up if (fs.exists(outputDir)) { System.out.println("Deleting existing output path"); fs.delete(outputDir, true); } //Create the necessary output paths afresh now fs.mkdirs(outputDir); //Complain if the input path doesn't exist if (!fs.exists(inputDir)) { System.out.println("\n------------\n" + " E R R O R " + "\n------------\n"); System.out.println("Input path does not exist OR input option not specified. Exiting..."); //Return a non-zero exit status to indicate failure return (1); } if (inputDir.toString().compareTo(outputDir.toString()) == 0) { System.out.println("\n------------\n" + " E R R O R " + "\n------------\n"); System.out.println("The input and output path can NOT be same." + "\nThe output path is deleted prior to running the Hadoop jobs." + "\nHence, the input would be also deleted if paths are same." + "\nExiting..."); //Return a non-zero exit status to indicate failure return (1); } params.set("input", inputDir.toString()); params.set("output", outputDir.toString()); /*--------------------------------------------------------------------- * END OF ERROR CHECKS * --------------------------------------------------------------------*/ /* Execute the FSM Job depending upon the parameters specified. */ String executionMethod = getOption("execMode"); //Set the resume and keepFiles flags in the commonConfig. //Also, set the intermediateOutput path accordingly. if (params.get("resume") != null) commonConfig.setResumeOption(true); else commonConfig.setResumeOption(false); if (params.get("keepFiles") != null) { commonConfig.setKeepFilesOption(true); Path intermediateDir = new Path(params.get("keepFiles")); if (fs.exists(intermediateDir)) { fs.delete(intermediateDir, true); } commonConfig.setIntermediatePath(params.get("keepFiles")); } else { File intermediateOutputPath = File.createTempFile("MG_FSM_INTRM_OP_", ""); //Below JDK 7 we are only allowed to create temporary files. //Hence, turn the file into a directory in temporary folder. intermediateOutputPath.delete(); intermediateOutputPath.mkdir(); commonConfig.setIntermediatePath(intermediateOutputPath.getAbsolutePath().toString()); System.out.println("The intermediate output will be written \n" + "to this temporary path :" + intermediateOutputPath); commonConfig.setKeepFilesOption(false); } //Set the 'tempDir' if its null if (params.get("tempDir") == null || params.get("tempDir").contentEquals("temp")) { File tempOutputPath = File.createTempFile("MG_FSM_TEMP_OP_", ""); tempOutputPath.delete(); //tempOutputPath.mkdir(); commonConfig.setTmpPath(tempOutputPath.getAbsolutePath().toString()); System.out.println("The temporary output associated with the internal map -reduce\n" + "jobs will be written to this temporary path :" + commonConfig.getTmpPath()); } else { commonConfig.setTmpPath(params.get("tempDir")); } //Set the input and output paths of the commonConfig commonConfig.setInputPath(params.get("input")); commonConfig.setOutputPath(params.get("output")); commonConfig.setDictionaryPath( commonConfig.getIntermediatePath().concat("/" + Constants.OUTPUT_DICTIONARY_FILE_PATH)); //Supply the rest of the algorithm specific options to commonConfig commonConfig.setSigma(Integer.parseInt(params.get("support"))); commonConfig.setGamma(Integer.parseInt(params.get("gamma"))); commonConfig.setLambda(Integer.parseInt(params.get("lambda"))); commonConfig.setPartitionSize(Long.parseLong(params.get("partitionSize"))); commonConfig.setAllowSplits(Boolean.parseBoolean(params.get("splits"))); if (params.get("numReducers") != null) { commonConfig.setNumberOfReducers(Integer.parseInt(params.get("numReducers"))); } switch (params.get("type").toCharArray()[0]) { case 'a': { commonConfig.setType(FsmConfig.Type.ALL); break; } case 'm': { commonConfig.setType(FsmConfig.Type.MAXIMAL); break; } case 'c': { commonConfig.setType(FsmConfig.Type.CLOSED); break; } default: { commonConfig.setType(FsmConfig.Type.ALL); break; } } switch (params.get("indexing").toCharArray()[0]) { case 'n': { commonConfig.setIndexingMethod(FsmConfig.IndexingMethod.NONE); break; } case 'm': { commonConfig.setIndexingMethod(FsmConfig.IndexingMethod.MINMAX); break; } case 'f': { commonConfig.setIndexingMethod(FsmConfig.IndexingMethod.FULL); break; } default: { commonConfig.setIndexingMethod(FsmConfig.IndexingMethod.FULL); break; } } //SEQUENTIAL EXECUTION MODE if ("s".equalsIgnoreCase(executionMethod)) { SequentialMode mySequentialMiner; mySequentialMiner = new SequentialMode(commonConfig); // If we are dealing with a fresh set of transactions // we need to do encode & then mine. if (!commonConfig.isResumeOption()) { mySequentialMiner.createDictionary(commonConfig.getInputPath()); mySequentialMiner.createIdToItemMap(); //If the input path is a corpus //runSeqJob will recursively call encodeAndMine() //on all the files to bring together a encoded sequences file //and consequently call the sequences miner on each of these //encoded sequences mySequentialMiner.runSeqJob(new File(commonConfig.getInputPath())); } /* * If the transactions are encoded from previous runs, then run * the following set of functions for reading the encoded transactions * and then directly mine them for frequent sequences. */ else { mySequentialMiner.setIdToItemMap(new Dictionary().readDictionary( commonConfig.getInputPath().concat("/" + Constants.OUTPUT_DICTIONARY_FILE_PATH))); mySequentialMiner.encodeAndMine(mySequentialMiner.getCommonConfig().getInputPath()); } } //DISTRIBUTED EXECUTION MODE else if ("d".equalsIgnoreCase(executionMethod)) { DistributedMode myDistributedMiner = new DistributedMode(commonConfig); /*Execute the appropriate job based on whether we need to * encode the input sequences or not. */ if (!commonConfig.isResumeOption()) myDistributedMiner.runJobs(); else myDistributedMiner.resumeJobs(); } //END OF EXECUTING FSM JOB //Return a zero exit status to indicate successful completion return 0; }
From source file:it.polito.dbdmg.searum.Searum.java
License:Apache License
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException { if (args.length < ARG_LEN) { System.err.println(/* w w w. j a v a2 s. c om*/ "Usage: Searum <input_file> <output_directory> <discretize (true|false)> <min_sup (0.0|1.0)> [<min_confidence (0.0|1.0)>]"); System.exit(-1); } /* Setting Parameters */ String input = args[1]; String output = args[2]; Integer enableDiscretization = (args[3].equals("true")) ? 1 : 0; Integer enableRules; Double minSupport = new Double(args[4]); Double minConfidence = null; System.err.println(ARG_LEN); if (args.length == (ARG_LEN + 1)) { enableRules = new Integer(1); minConfidence = new Double(args[5]); } else { enableRules = new Integer(0); } String splitPattern = "[\\ ]"; Parameters params = new Parameters(); params.set("minSupport", minSupport.toString()); if (enableRules.compareTo(new Integer(1)) == 0) { params.set("minConfidence", minConfidence.toString()); } params.set("splitPattern", splitPattern); params.set("input", input); params.set("output", output); params.set("enableDiscretization", enableDiscretization.toString()); params.set("enableRules", enableRules.toString()); params.set("maxHeapSize", maxHeapSize.toString()); params.set("numGroups", numGroups.toString()); log.info("========================| SEARUM |======================="); log.info("=== A cloud-based Service for Association RUle Mining ==="); log.info("============== Developed by Luigi Grimaudo =============="); log.info("Input file: " + input); log.info("Output directory: " + output); log.info("MinSupp: " + minSupport.toString()); if (enableRules.compareTo(new Integer(1)) == 0) { log.info("MinConf: " + minConfidence.toString()); } ARM.runPFPGrowth(params); }
From source file:tv.icntv.grade.film.recommend.CorrelateJob.java
License:Apache License
private Parameters getParameter(String strings) { Parameters parameters = new Parameters(); String[] values = strings.split("--"); for (String v : values) { String[] kvs = v.split("="); if (null == kvs || kvs.length != 2) { continue; }// w w w .jav a2 s . co m parameters.set(kvs[0], kvs[1]); } return parameters; //To change body of created methods use File | Settings | File Templates. }
From source file:tv.icntv.recommend.algorithm.CorrelateJob.java
License:Apache License
private Parameters getParameter(String strings) { Parameters parameters = new Parameters(); String[] values = strings.split(split); for (String v : values) { String[] kvs = v.split("="); if (null == kvs || kvs.length != 2) { continue; }//from w w w. j a va2 s . co m parameters.set(kvs[0], kvs[1]); } return parameters; }