List of usage examples for org.apache.commons.math3.distribution WeibullDistribution WeibullDistribution
public WeibullDistribution(double alpha, double beta) throws NotStrictlyPositiveException
From source file:jasima.core.random.continuous.DblWeibull.java
public DblWeibull(double shape, double scale) { super(); setDistribution(new WeibullDistribution(shape, scale)); }
From source file:jasima.core.random.continuous.DblWeibull.java
/** * Sets the shape parameter for this distribution. * /*from w ww . j a va 2s . com*/ * @param shape * The shape parameter to use. * @throws NotStrictlyPositiveException * If the parameter value was {@code <=0.0}. */ public void setShape(double shape) throws NotStrictlyPositiveException { setDistribution(new WeibullDistribution(shape, dist.getScale())); }
From source file:jasima.core.random.continuous.DblWeibull.java
/** * Sets the scale parameter for this distribution. * /*from w w w . jav a2s .c o m*/ * @param scale * The scale parameter to use. * @throws NotStrictlyPositiveException * If the parameter value was {@code <=0.0}. */ public void setScale(double scale) { setDistribution(new WeibullDistribution(dist.getShape(), scale)); }
From source file:bes.injector.InjectorBurnTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException, TimeoutException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; final Injector injector = new Injector(""); for (int i = 0; i < executors.length; i++) { executors[i] = injector.newExecutor(threadCount, maxQueued); threadCounts[i] = threadCount;/*from w ww.ja v a 2 s . c om*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<Batch>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) { try { Thread.sleep(10); } catch (InterruptedException e) { } } // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<Result>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; for (int i = 0; i < executors.length; i++) { executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i); threadCounts[i] = threadCount;/*from w w w.j a v a2s. c o m*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:org.apache.solr.client.solrj.io.eval.WeibullDistributionEvaluator.java
@Override public Object doWork(Object first, Object second) throws IOException { if (null == first) { throw new IOException(String.format(Locale.ROOT, "Invalid expression %s - null found for the first value", toExpression(constructingFactory))); }/*from ww w . j a v a2 s .c o m*/ if (null == second) { throw new IOException(String.format(Locale.ROOT, "Invalid expression %s - null found for the second value", toExpression(constructingFactory))); } Number shape = (Number) first; Number scale = (Number) second; return new WeibullDistribution(shape.doubleValue(), scale.doubleValue()); }
From source file:org.lightjason.trafficsimulation.math.EDistributionFactory.java
/** * generate the distribution/*from www .j a v a 2 s . c o m*/ * * @param p_args distribution arguments * @return the distribution */ public final AbstractRealDistribution generate(final double... p_args) { switch (this) { case BETA: return new BetaDistribution(p_args[0], p_args[1]); case CAUCHY: return new CauchyDistribution(p_args[0], p_args[1]); case CHI_SQUARED: return new ChiSquaredDistribution(p_args[0]); case EXPONENTIAL: return new ExponentialDistribution(p_args[0]); case F: return new FDistribution(p_args[0], p_args[1]); case GAMMA: return new GammaDistribution(p_args[0], p_args[1]); case GUMBEL: return new GumbelDistribution(p_args[0], p_args[1]); case LAPLACE: return new LaplaceDistribution(p_args[0], p_args[1]); case LEVY: return new LevyDistribution(p_args[0], p_args[1]); case LOGISTIC: return new LogisticDistribution(p_args[0], p_args[1]); case LOG_NORMAL: return new LogNormalDistribution(p_args[0], p_args[1]); case NAKAGAMI: return new NakagamiDistribution(p_args[0], p_args[1]); case NORMAL: return new NormalDistribution(p_args[0], p_args[1]); case PARETO: return new ParetoDistribution(p_args[0], p_args[1]); case T: return new TDistribution(p_args[0]); case TRIANGULAR: return new TriangularDistribution(p_args[0], p_args[1], p_args[2]); case UNIFORM: return new UniformRealDistribution(p_args[0], p_args[1]); case WEIBULL: return new WeibullDistribution(p_args[0], p_args[1]); default: throw new RuntimeException(MessageFormat.format("not set {0}", this)); } }
From source file:org.workflowsim.failure.FailureGenerator.java
/** * *//* w w w .j av a 2 s . c o m*/ protected static RealDistribution getDistribution(double alpha, double beta) { RealDistribution distribution = null; switch (FailureParameters.getFailureDistribution()) { case LOGNORMAL: distribution = new LogNormalDistribution(1.0 / alpha, beta); break; case WEIBULL: distribution = new WeibullDistribution(beta, 1.0 / alpha); break; case GAMMA: distribution = new GammaDistribution(beta, 1.0 / alpha); break; case NORMAL: //beta is the std, 1.0/alpha is the mean distribution = new NormalDistribution(1.0 / alpha, beta); break; default: break; } return distribution; }
From source file:org.workflowsim.utils.DistributionGenerator.java
/** * Gets the RealDistribution with two parameters * * @param scale the first param scale/* w ww . j a v a 2 s . com*/ * @param shape the second param shape * @return the RealDistribution Object */ public RealDistribution getDistribution(double scale, double shape) { RealDistribution distribution = null; switch (this.dist) { case LOGNORMAL: distribution = new LogNormalDistribution(scale, shape); break; case WEIBULL: distribution = new WeibullDistribution(shape, scale); break; case GAMMA: distribution = new GammaDistribution(shape, scale); break; case NORMAL: //shape is the std, scale is the mean distribution = new NormalDistribution(scale, shape); break; default: break; } return distribution; }