List of usage examples for org.apache.commons.math.stat.descriptive DescriptiveStatistics DescriptiveStatistics
public DescriptiveStatistics()
From source file:fantail.algorithms.RankingWithBinaryPCT.java
private double getMedian(Instances data, int attIndex) throws Exception { DescriptiveStatistics stats = new DescriptiveStatistics(); for (int i = 0; i < data.numInstances(); i++) { Instance inst = (Instance) data.instance(i); stats.addValue(inst.value(attIndex)); }//from ww w .j a v a2 s . c om double median = stats.getPercentile(50); return median; }
From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.feature.coreference.CoreferenceFeatures.java
@Override protected List<Feature> extract(JCas jCas, Sentence sentence, String sentencePrefix) throws TextClassificationException { List<List<CoreferenceLink>> coreferenceChains = extractCoreferenceChains(jCas); FrequencyDistribution<String> featuresAcrossAllChains = new FrequencyDistribution<>(); DescriptiveStatistics chainLength = new DescriptiveStatistics(); DescriptiveStatistics distanceToPreviousSentence = new DescriptiveStatistics(); DescriptiveStatistics distanceToNextSentence = new DescriptiveStatistics(); DescriptiveStatistics interSentencesCorLinks = new DescriptiveStatistics(); for (List<CoreferenceLink> chain : coreferenceChains) { SortedMap<Integer, List<CoreferenceLink>> sentencesAndLinks = extractSentencesAndLinksFromChain(chain, jCas);// ww w . j av a 2 s. com int currentSentencePos = getCurrentSentencePos(jCas, sentence); log.debug(sentencesAndLinks.keySet() + ", current " + currentSentencePos); // is the sentence in chain that spans more sentences? boolean partOfChain = sentencesAndLinks.containsKey(currentSentencePos) && sentencesAndLinks.size() > 1; // is part of a chain? if (partOfChain) { log.debug(chainToString(chain)); featuresAcrossAllChains.inc(FN_PART_OF_CHAIN); // starts the chain? if (sentencesAndLinks.firstKey().equals(currentSentencePos)) { featuresAcrossAllChains.inc(FN_STARTS_THE_CHAIN); } else if (sentencesAndLinks.lastKey().equals(currentSentencePos)) { // ends the chain? featuresAcrossAllChains.inc(FN_ENDS_THE_CHAIN); } else { // in the middle of chain? featuresAcrossAllChains.inc(FN_IN_THE_MIDDLE_OF_CHAIN); } // length of the chain chainLength.addValue(sentencesAndLinks.size()); List<CoreferenceLink> currentSentenceLinks = sentencesAndLinks.get(currentSentencePos); CoreferenceLink currentSentenceFirstLink = currentSentenceLinks.get(0); CoreferenceLink currentSentenceLastLink = currentSentenceLinks.get(currentSentenceLinks.size() - 1); // transition to the previous link, i.e. NOMINAL -> PRONOMINAL if (!sentencesAndLinks.firstKey().equals(currentSentencePos)) { // find the previous sentence List<CoreferenceLink> previousSentenceLinks = null; int prevSentNo = currentSentencePos; while (previousSentenceLinks == null && prevSentNo >= 0) { prevSentNo--; if (sentencesAndLinks.containsKey(prevSentNo)) { previousSentenceLinks = sentencesAndLinks.get(prevSentNo); } } if (previousSentenceLinks == null) { throw new IllegalStateException("Oops :))"); } // distance to previous sentence distanceToPreviousSentence.addValue(currentSentencePos - prevSentNo); // get the last link from the previous sentence CoreferenceLink prevSentenceLastLink = previousSentenceLinks .get(previousSentenceLinks.size() - 1); // add type type transition String prevSentenceLastLinkReferenceType = prevSentenceLastLink.getReferenceType(); String currentSentenceFirstLinkReferenceType = currentSentenceFirstLink.getReferenceType(); String transitionType = prevSentenceLastLinkReferenceType + GLUE + currentSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TYPE + transitionType, 1); // add token - type transition String glueCoreferenceCurrentSentence = glueCoreferenceLinkTokens(currentSentenceFirstLink); String typeToken = prevSentenceLastLinkReferenceType + GLUE + glueCoreferenceCurrentSentence; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TOKEN + typeToken, 1); // add type - token transition String glueCoreferencePrevSentence = glueCoreferenceLinkTokens(prevSentenceLastLink); String tokenType = glueCoreferencePrevSentence + GLUE + currentSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TYPE + tokenType, 1); // add token token transition String tokenToken = glueCoreferencePrevSentence + GLUE + glueCoreferenceCurrentSentence; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN + tokenToken, 1); // exact matching token-token reference? if (glueCoreferencePrevSentence.equals(glueCoreferenceCurrentSentence)) { featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN_MATCH, 1); } } // transition to the previous link, i.e. NOMINAL -> PRONOMINAL if (!sentencesAndLinks.lastKey().equals(currentSentencePos)) { // find the previous sentence List<CoreferenceLink> nextSentenceLinks = null; int nextSentNo = currentSentencePos; while (nextSentenceLinks == null && nextSentNo <= sentencesAndLinks.lastKey()) { nextSentNo++; if (sentencesAndLinks.containsKey(nextSentNo)) { nextSentenceLinks = sentencesAndLinks.get(nextSentNo); } } if (nextSentenceLinks == null) { throw new IllegalStateException("Oops :))"); } // distance to next sentence distanceToNextSentence.addValue(nextSentNo - currentSentencePos); // get the last link from the previous sentence CoreferenceLink nextSentenceFirstLink = nextSentenceLinks.get(0); // add type type transition String currentSentenceLastLinkReferenceType = currentSentenceLastLink.getReferenceType(); String nextSentenceFirstLinkReferenceType = nextSentenceFirstLink.getReferenceType(); String transitionType = currentSentenceLastLinkReferenceType + GLUE + nextSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TYPE + transitionType, 1); // add token - type transition String glueCoreferenceCurrentSent = glueCoreferenceLinkTokens(currentSentenceLastLink); String typeToken = glueCoreferenceCurrentSent + GLUE + nextSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TYPE + typeToken, 1); // add type - token transition String glueCoreferenceNextSent = glueCoreferenceLinkTokens(nextSentenceFirstLink); String tokenType = currentSentenceLastLinkReferenceType + GLUE + glueCoreferenceNextSent; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TOKEN + tokenType, 1); // add token token transition String tokenToken = glueCoreferenceCurrentSent + GLUE + glueCoreferenceNextSent; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN + tokenToken, 1); // exact matching token-token reference? if (glueCoreferenceNextSent.equals(glueCoreferenceCurrentSent)) { featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN_MATCH, 1); } } } // number of inter-sentence coreference links if (sentencesAndLinks.containsKey(currentSentencePos)) { int coreferenceLinks = sentencesAndLinks.get(currentSentencePos).size(); interSentencesCorLinks.addValue(coreferenceLinks); } /* List<Integer> positions = positionsOfSentenceInCurrentChain(chain, sentence); // ok, we're in a chain if (!positions.isEmpty()) { log.debug(printChain(chain)); log.debug(sentence.getCoveredText()); log.debug(positions); Integer lastPosition = positions.get(positions.size() - 1); Integer firstPosition = positions.get(0); if (lastPosition == positions.size() - 1) { log.debug("Last sentence of chain"); } log.debug("-----"); } */ } List<Feature> result = new ArrayList<>(); log.debug(featuresAcrossAllChains); if (distanceToNextSentence.getN() > 0) { log.debug("Next:" + distanceToNextSentence); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MIN, distanceToNextSentence.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MAX, distanceToNextSentence.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_AVG, distanceToNextSentence.getMean())); } if (distanceToPreviousSentence.getN() > 0) { log.debug("Prev: " + distanceToPreviousSentence); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MIN, distanceToPreviousSentence.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MAX, distanceToPreviousSentence.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_AVG, distanceToPreviousSentence.getMean())); } if (interSentencesCorLinks.getN() > 0) { result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MIN, interSentencesCorLinks.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MAX, interSentencesCorLinks.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_AVG, interSentencesCorLinks.getMean())); } log.debug("----"); for (String feat : featuresAcrossAllChains.getKeys()) { // binary result.add(new Feature(sentencePrefix + FEATURE_NAME + feat, 1)); } return result; }
From source file:com.linkedin.pinot.tools.PinotZKChanger.java
protected void printSegmentAssignment(Map<String, Map<String, String>> mapping) throws Exception { StringWriter sw = new StringWriter(); objectMapper.writerWithDefaultPrettyPrinter().writeValue(sw, mapping); LOGGER.info(sw.toString());// w w w. jav a2 s. c om Map<String, List<String>> serverToSegmentMapping = new TreeMap<>(); for (String segment : mapping.keySet()) { Map<String, String> serverToStateMap = mapping.get(segment); for (String server : serverToStateMap.keySet()) { if (!serverToSegmentMapping.containsKey(server)) { serverToSegmentMapping.put(server, new ArrayList<String>()); } serverToSegmentMapping.get(server).add(segment); } } DescriptiveStatistics stats = new DescriptiveStatistics(); for (String server : serverToSegmentMapping.keySet()) { List<String> list = serverToSegmentMapping.get(server); LOGGER.info("server " + server + " has " + list.size() + " segments"); stats.addValue(list.size()); } LOGGER.info("Segment Distrbution stat"); LOGGER.info(stats.toString()); }
From source file:info.raack.appliancedetection.evaluation.model.EvaluationGroup.java
private void calculateEvaluationMetrics( Map<ApplianceEnergyConsumptionDetectionAlgorithm, List<Evaluation>> evaluationInfo) { for (ApplianceEnergyConsumptionDetectionAlgorithm algorithm : evaluationInfo.keySet()) { // do wattage stats DescriptiveStatistics stats = new DescriptiveStatistics(); List<Evaluation> evaluationList = evaluationInfo.get(algorithm); if (evaluationList.size() == 0) { throw new IllegalArgumentException( "No evaluations for " + algorithm + " in simulation group " + simulationGroup); }/*from ww w .j av a 2 s. c om*/ for (Evaluation evaluation : evaluationList) { // calculation produces watts stats.addValue((double) (evaluation.getOverallEnergyError()) * (3600.0 / (double) (evaluation.getSimulation().getDurationInSeconds()))); } errorMetrics.put(algorithm, new Double[] { stats.getMean(), stats.getPercentile(50), stats.getStandardDeviation() }); // stats = new DescriptiveStatistics(); evaluationList = evaluationInfo.get(algorithm); if (evaluationList.size() == 0) { throw new IllegalArgumentException( "No evaluations for " + algorithm + " in simulation group " + simulationGroup); } for (Evaluation evaluation : evaluationList) { // calculation produces watts stats.addValue((double) (evaluation.getOverallAccuracy())); } accuracyErrorMetrics.put(algorithm, new Double[] { stats.getMean(), stats.getPercentile(50), stats.getStandardDeviation() }); // stats = new DescriptiveStatistics(); evaluationList = evaluationInfo.get(algorithm); if (evaluationList.size() == 0) { throw new IllegalArgumentException( "No evaluations for " + algorithm + " in simulation group " + simulationGroup); } for (Evaluation evaluation : evaluationList) { // calculation produces watts stats.addValue((double) (evaluation.getStateTransitionAccuracy())); } stateTransitionAccuracyErrorMetrics.put(algorithm, new Double[] { stats.getMean(), stats.getPercentile(50), stats.getStandardDeviation() }); // stats = new DescriptiveStatistics(); evaluationList = evaluationInfo.get(algorithm); if (evaluationList.size() == 0) { throw new IllegalArgumentException( "No evaluations for " + algorithm + " in simulation group " + simulationGroup); } for (Evaluation evaluation : evaluationList) { // calculation produces watts stats.addValue((double) (evaluation.getStateTransitionRecall())); } stateTransitionRecallErrorMetrics.put(algorithm, new Double[] { stats.getMean(), stats.getPercentile(50), stats.getStandardDeviation() }); // stats = new DescriptiveStatistics(); evaluationList = evaluationInfo.get(algorithm); if (evaluationList.size() == 0) { throw new IllegalArgumentException( "No evaluations for " + algorithm + " in simulation group " + simulationGroup); } for (Evaluation evaluation : evaluationList) { // calculation produces watts stats.addValue((double) (evaluation.getStateTransitionPrecision())); } stateTransitionPrecisionErrorMetrics.put(algorithm, new Double[] { stats.getMean(), stats.getPercentile(50), stats.getStandardDeviation() }); } }
From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step11GoldDataStatistics.java
public static void statistics3(File inputDir, File outputDir) throws IOException { PrintWriter pw = new PrintWriter(new FileWriter(new File(outputDir, "stats3.csv"))); pw.println("qID\tagreementMean\tagreementStdDev\tqueryText"); // iterate over query containers for (File f : FileUtils.listFiles(inputDir, new String[] { "xml" }, false)) { QueryResultContainer queryResultContainer = QueryResultContainer .fromXML(FileUtils.readFileToString(f, "utf-8")); DescriptiveStatistics statistics = new DescriptiveStatistics(); for (QueryResultContainer.SingleRankedResult rankedResult : queryResultContainer.rankedResults) { Double observedAgreement = rankedResult.observedAgreement; if (observedAgreement != null) { statistics.addValue(observedAgreement); }//from w ww . j av a 2s. c o m } pw.printf(Locale.ENGLISH, "%s\t%.3f\t%.3f\t%s%n", queryResultContainer.qID, statistics.getMean(), statistics.getStandardDeviation(), queryResultContainer.query); } pw.close(); }
From source file:com.joliciel.talismane.other.corpus.CorpusStatistics.java
@Override public void onNextParseConfiguration(ParseConfiguration parseConfiguration, Writer writer) { sentenceCount++;//from ww w . jav a 2 s.c o m sentenceLengthStats.addValue(parseConfiguration.getPosTagSequence().size()); for (PosTaggedToken posTaggedToken : parseConfiguration.getPosTagSequence()) { if (posTaggedToken.getTag().equals(PosTag.ROOT_POS_TAG)) continue; Token token = posTaggedToken.getToken(); String word = token.getOriginalText(); words.add(word); if (referenceWords != null) { if (!referenceWords.contains(word)) unknownTokenCount++; } if (alphanumeric.matcher(token.getOriginalText()).find()) { String lowercase = word.toLowerCase(TalismaneSession.getLocale()); lowerCaseWords.add(lowercase); alphanumericCount++; if (referenceLowercaseWords != null) { if (!referenceLowercaseWords.contains(lowercase)) unknownAlphanumericCount++; } } tokenCount++; Integer countObj = posTagCounts.get(posTaggedToken.getTag().getCode()); int count = countObj == null ? 0 : countObj.intValue(); count++; posTagCounts.put(posTaggedToken.getTag().getCode(), count); } int maxDepth = 0; DescriptiveStatistics avgSyntaxDepthForSentenceStats = new DescriptiveStatistics(); for (DependencyArc arc : parseConfiguration.getDependencies()) { Integer countObj = depLabelCounts.get(arc.getLabel()); int count = countObj == null ? 0 : countObj.intValue(); count++; depLabelCounts.put(arc.getLabel(), count); totalDepCount++; if (arc.getHead().getTag().equals(PosTag.ROOT_POS_TAG) && (arc.getLabel() == null || arc.getLabel().length() == 0)) { // do nothing for unattached stuff (e.g. punctuation) } else if (arc.getLabel().equals("ponct")) { // do nothing for punctuation } else { int depth = 0; DependencyArc theArc = arc; while (theArc != null && !theArc.getHead().getTag().equals(PosTag.ROOT_POS_TAG)) { theArc = parseConfiguration.getGoverningDependency(theArc.getHead()); depth++; } if (depth > maxDepth) maxDepth = depth; syntaxDepthStats.addValue(depth); avgSyntaxDepthForSentenceStats.addValue(depth); int distance = Math .abs(arc.getHead().getToken().getIndex() - arc.getDependent().getToken().getIndex()); syntaxDistanceStats.addValue(distance); } maxSyntaxDepthStats.addValue(maxDepth); if (avgSyntaxDepthForSentenceStats.getN() > 0) avgSyntaxDepthStats.addValue(avgSyntaxDepthForSentenceStats.getMean()); } // we cheat a little bit by only allowing each arc to count once // there could be a situation where there are two independent non-projective arcs // crossing the same mother arc, but we prefer here to underestimate, // as this phenomenon is quite rare. Set<DependencyArc> nonProjectiveArcs = new HashSet<DependencyArc>(); int i = 0; for (DependencyArc arc : parseConfiguration.getDependencies()) { i++; if (arc.getHead().getTag().equals(PosTag.ROOT_POS_TAG) && (arc.getLabel() == null || arc.getLabel().length() == 0)) continue; if (nonProjectiveArcs.contains(arc)) continue; int headIndex = arc.getHead().getToken().getIndex(); int depIndex = arc.getDependent().getToken().getIndex(); int startIndex = headIndex < depIndex ? headIndex : depIndex; int endIndex = headIndex >= depIndex ? headIndex : depIndex; int j = 0; for (DependencyArc otherArc : parseConfiguration.getDependencies()) { j++; if (j <= i) continue; if (otherArc.getHead().getTag().equals(PosTag.ROOT_POS_TAG) && (otherArc.getLabel() == null || otherArc.getLabel().length() == 0)) continue; if (nonProjectiveArcs.contains(otherArc)) continue; int headIndex2 = otherArc.getHead().getToken().getIndex(); int depIndex2 = otherArc.getDependent().getToken().getIndex(); int startIndex2 = headIndex2 < depIndex2 ? headIndex2 : depIndex2; int endIndex2 = headIndex2 >= depIndex2 ? headIndex2 : depIndex2; boolean nonProjective = false; if (startIndex2 < startIndex && endIndex2 > startIndex && endIndex2 < endIndex) { nonProjective = true; } else if (startIndex2 > startIndex && startIndex2 < endIndex && endIndex2 > endIndex) { nonProjective = true; } if (nonProjective) { nonProjectiveArcs.add(arc); nonProjectiveArcs.add(otherArc); nonProjectiveCount++; LOG.debug("Non-projective arcs in sentence: " + parseConfiguration.getSentence().getText()); LOG.debug(arc.toString()); LOG.debug(otherArc.toString()); break; } } } }
From source file:de.mpicbg.knime.hcs.base.nodes.preproc.OutlierRemoval.java
@Override protected BufferedDataTable[] execute(BufferedDataTable[] inData, ExecutionContext exec) throws Exception { BufferedDataTable input = inData[0]; DataTableSpec inputSpec = input.getDataTableSpec(); // Get the parameter and make sure there all double value columns List<Attribute> parameter = new ArrayList<Attribute>(); for (String item : parameterNames.getIncludeList()) { Attribute attribute = new InputTableAttribute(item, input); if (attribute.getType().isCompatible(DoubleValue.class)) { parameter.add(attribute);/*from w w w .j a v a 2 s . co m*/ } else { logger.warn("The parameter '" + attribute.getName() + "' will not be considered for outlier removal, since it is not compatible to double."); } } // Get the groups defined by the nominal column. Attribute groupingAttribute = new InputTableAttribute(this.groupingColumn.getStringValue(), input); Map<Object, List<DataRow>> subsets = AttributeUtils.splitRowsGeneric(input, groupingAttribute); // Initialize BufferedDataContainer keepContainer = exec.createDataContainer(inputSpec); BufferedDataContainer discartContainer = exec.createDataContainer(inputSpec); int S = subsets.size(); int s = 1; // Outlier analysis for each subset for (Object key : subsets.keySet()) { // Get the subset having all constraints in common List<DataRow> rowSubset = subsets.get(key); // Get the valid values RealMatrix data = extractMatrix(rowSubset, parameter); int N = data.getColumnDimension(); int M = data.getRowDimension(); if (M == 0) { logger.warn("The group '" + key + "' has no valid values and will be removed entirely'"); } else { // Determine upper and lower outlier bounds double[] lowerBound = new double[N]; double[] upperBound = new double[N]; if (method.getStringValue().equals("Boxplot")) { for (int c = 0; c < N; ++c) { RealVector vect = data.getColumnVector(c); DescriptiveStatistics stats = new DescriptiveStatistics(); for (double value : vect.getData()) { stats.addValue(value); } double lowerQuantile = stats.getPercentile(25); double upperQuantile = stats.getPercentile(85); double whisker = factor.getDoubleValue() * Math.abs(lowerQuantile - upperQuantile); lowerBound[c] = lowerQuantile - whisker; upperBound[c] = upperQuantile + whisker; } } else { for (int c = 0; c < N; ++c) { RealVector vect = data.getColumnVector(c); double mean = StatUtils.mean(vect.getData()); double sd = Math.sqrt(StatUtils.variance(vect.getData())); lowerBound[c] = mean - factor.getDoubleValue() * sd; upperBound[c] = mean + factor.getDoubleValue() * sd; } } // Remove The outlier if (rule.getBooleanValue()) { // The row is only discarted if the row is an outlier in all parameter. for (DataRow row : rowSubset) { int c = 0; for (Attribute column : parameter) { DataCell valueCell = row.getCell(((InputTableAttribute) column).getColumnIndex()); // a missing value will be treated as data point inside the bounds if (valueCell.isMissing()) { continue; } Double value = ((DoubleValue) valueCell).getDoubleValue(); if ((value != null) && (lowerBound[c] <= value) && (value <= upperBound[c])) { break; } else { c++; } } if (c != N) { keepContainer.addRowToTable(row); } else { discartContainer.addRowToTable(row); } } } else { // The row is discarted if it has a outlier for at least one parameter. for (DataRow row : rowSubset) { int c = 0; for (Attribute column : parameter) { DataCell valueCell = row.getCell(((InputTableAttribute) column).getColumnIndex()); // a missing value will be treated as data point inside the bounds if (valueCell.isMissing()) { c++; continue; } Double value = ((DoubleValue) valueCell).getDoubleValue(); if ((value != null) && (lowerBound[c] <= value) && (value <= upperBound[c])) { c++; } else { break; } } if (c == N) { keepContainer.addRowToTable(row); } else { discartContainer.addRowToTable(row); } } } } BufTableUtils.updateProgress(exec, s++, S); } keepContainer.close(); discartContainer.close(); return new BufferedDataTable[] { keepContainer.getTable(), discartContainer.getTable() }; }
From source file:fantail.algorithms.BinaryART.java
private double getMedian(Instances data, int attIndex) throws Exception { if (false) {// w w w. ja v a 2s .c om return getMedian2(data, attIndex); // added 07-july 2013; actually they are the same // removed 17/07/2013 } DescriptiveStatistics stats = new DescriptiveStatistics(); for (int i = 0; i < data.numInstances(); i++) { Instance inst = (Instance) data.instance(i); stats.addValue(inst.value(attIndex)); } double median = stats.getPercentile(50); return median; }
From source file:net.sourceforge.jags.model.ModelTest.java
@Test public void testObservedStochasticNode() throws MathException { double[] data = normalSample(); Node mu = model.addConstantNode(new int[] { 1 }, new double[] { 1 }); Node tau = model.addConstantNode(new int[] { 1 }, new double[] { .001 }); Node n = model.addStochasticNode("dnorm", new Node[] { mu, tau }, null, null, new double[] { 0 }); model.initialize(true);/*from ww w. j av a 2 s . c o m*/ model.update(1000); int N = 1000; model.stopAdapting(); Monitor m = model.addTraceMonitor(n); model.update(N); assertEquals(N, m.dim()[1]); // Iterations dimension DescriptiveStatistics stats = new DescriptiveStatistics(); for (double v : m.value(0)) { stats.addValue(v); } TTest test = new TTestImpl(); assertFalse(test.tTest(0, m.value(0), 0.05)); }
From source file:com.linkedin.pinot.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect * the statistic information and log them periodically. * * @param conf perf benchmark driver config. * @param queryFile query file./*w w w . j ava 2 s . c o m*/ * @param numThreads number of threads sending queries. * @throws Exception */ @SuppressWarnings("InfiniteLoopStatement") public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, final int numThreads) throws Exception { final long randomSeed = 123456789L; final Random random = new Random(randomSeed); final int reportIntervalMillis = 3000; final List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } final int numQueries = queries.size(); final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); final AtomicInteger counter = new AtomicInteger(0); final AtomicLong totalResponseTime = new AtomicLong(0L); final ExecutorService executorService = Executors.newFixedThreadPool(numThreads); final DescriptiveStatistics stats = new DescriptiveStatistics(); final CountDownLatch latch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Runnable() { @Override public void run() { for (int j = 0; j < numQueries; j++) { String query = queries.get(random.nextInt(numQueries)); long startTime = System.currentTimeMillis(); try { driver.postQuery(query); long clientTime = System.currentTimeMillis() - startTime; synchronized (stats) { stats.addValue(clientTime); } counter.getAndIncrement(); totalResponseTime.getAndAdd(clientTime); } catch (Exception e) { LOGGER.error("Caught exception while running query: {}", query, e); return; } } latch.countDown(); } }); } executorService.shutdown(); int iter = 0; long startTime = System.currentTimeMillis(); while (latch.getCount() > 0) { Thread.sleep(reportIntervalMillis); double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND; int count = counter.get(); double avgResponseTime = ((double) totalResponseTime.get()) / count; LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds, count, count / timePassedSeconds, avgResponseTime); iter++; if (iter % 10 == 0) { printStats(stats); } } printStats(stats); }