Example usage for com.google.common.util.concurrent RateLimiter acquire

List of usage examples for com.google.common.util.concurrent RateLimiter acquire

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter acquire.

Prototype

public double acquire() 

Source Link

Document

Acquires a single permit from this RateLimiter , blocking until the request can be granted.

Usage

From source file:webindex.data.LoadHdfs.java

public static void main(String[] args) throws Exception {

    if (args.length != 1) {
        log.error("Usage: LoadHdfs <dataDir>");
        System.exit(1);//from  w w w. j  av  a  2s . c  om
    }
    final String dataDir = args[0];
    IndexEnv.validateDataDir(dataDir);

    final String hadoopConfDir = IndexEnv.getHadoopConfDir();
    final WebIndexConfig webIndexConfig = WebIndexConfig.load();
    final int rateLimit = webIndexConfig.getLoadRateLimit();
    final String appName = webIndexConfig.fluoApp;

    List<String> loadPaths = new ArrayList<>();
    FileSystem hdfs = IndexEnv.getHDFS();
    RemoteIterator<LocatedFileStatus> listIter = hdfs.listFiles(new Path(dataDir), true);
    while (listIter.hasNext()) {
        LocatedFileStatus status = listIter.next();
        if (status.isFile()) {
            loadPaths.add(status.getPath().toString());
        }
    }

    log.info("Loading {} files into Fluo from {}", loadPaths.size(), dataDir);

    SparkConf sparkConf = new SparkConf().setAppName("webindex-load-hdfs");
    try (JavaSparkContext ctx = new JavaSparkContext(sparkConf)) {

        JavaRDD<String> paths = ctx.parallelize(loadPaths, loadPaths.size());

        paths.foreachPartition(iter -> {
            final FluoConfiguration fluoConfig = new FluoConfiguration(new File("fluo-conn.properties"));
            fluoConfig.setApplicationName(appName);
            final RateLimiter rateLimiter = rateLimit > 0 ? RateLimiter.create(rateLimit) : null;
            FileSystem fs = IndexEnv.getHDFS(hadoopConfDir);
            try (FluoClient client = FluoFactory.newClient(fluoConfig);
                    LoaderExecutor le = client.newLoaderExecutor()) {
                iter.forEachRemaining(path -> {
                    Path filePath = new Path(path);
                    try {
                        if (fs.exists(filePath)) {
                            FSDataInputStream fsin = fs.open(filePath);
                            ArchiveReader reader = WARCReaderFactory.get(filePath.getName(), fsin, true);
                            for (ArchiveRecord record : reader) {
                                Page page = ArchiveUtil.buildPageIgnoreErrors(record);
                                if (page.getOutboundLinks().size() > 0) {
                                    log.info("Loading page {} with {} links", page.getUrl(),
                                            page.getOutboundLinks().size());
                                    if (rateLimiter != null) {
                                        rateLimiter.acquire();
                                    }
                                    le.execute(PageLoader.updatePage(page));
                                }
                            }
                        }
                    } catch (IOException e) {
                        log.error("Exception while processing {}", path, e);
                    }
                });
            }
        });
    }
}

From source file:com.yahoo.pulsar.testclient.PerformanceProducer.java

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {//from  w  ww.  j  av  a 2 s.  co m
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    if (arguments.destinations.size() != 1) {
        System.out.println("Only one topic name is allowed");
        jc.usage();
        System.exit(-1);
    }

    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }

        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }

        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }

        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));

    // Read payload data from file if needed
    byte payloadData[];
    if (arguments.payloadFilename != null) {
        payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
    } else {
        payloadData = new byte[arguments.msgSize];
    }

    // Now processing command line arguments
    String prefixTopicName = arguments.destinations.get(0);
    List<Future<Producer>> futures = Lists.newArrayList();

    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    }

    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }

    PulsarClient client = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);

    ProducerConfiguration producerConf = new ProducerConfiguration();
    producerConf.setSendTimeout(0, TimeUnit.SECONDS);
    producerConf.setCompressionType(arguments.compression);
    // enable round robin message routing if it is a partitioned topic
    producerConf.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition);
    if (arguments.batchTime > 0) {
        producerConf.setBatchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS);
        producerConf.setBatchingEnabled(true);
        producerConf.setMaxPendingMessages(arguments.msgRate);
    }

    for (int i = 0; i < arguments.numTopics; i++) {
        String topic = (arguments.numTopics == 1) ? prefixTopicName
                : String.format("%s-%d", prefixTopicName, i);
        log.info("Adding {} publishers on destination {}", arguments.numProducers, topic);

        for (int j = 0; j < arguments.numProducers; j++) {
            futures.add(client.createProducerAsync(topic, producerConf));
        }
    }

    final List<Producer> producers = Lists.newArrayListWithCapacity(futures.size());
    for (Future<Producer> future : futures) {
        producers.add(future.get());
    }

    log.info("Created {} producers", producers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(producers);
    AtomicBoolean isDone = new AtomicBoolean();

    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);

            long startTime = System.currentTimeMillis();

            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (Producer producer : producers) {
                    if (arguments.testTime > 0) {
                        if (System.currentTimeMillis() - startTime > arguments.testTime) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }

                    if (arguments.numMessages > 0) {
                        if (totalSent++ >= arguments.numMessages) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();

                    final long sendTime = System.nanoTime();

                    producer.sendAsync(payloadData).thenRun(() -> {
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);
                    }).exceptionally(ex -> {
                        log.warn("Write error on message", ex);
                        System.exit(-1);
                        return null;
                    });
                }
            }
        } catch (Throwable t) {
            log.error("Got error", t);
        }
    });

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {}", statsFileName);

    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);

    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();

        oldTime = now;
    }

    client.close();
}

From source file:org.apache.pulsar.testclient.PerformanceProducer.java

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {//from w  w w.j ava  2  s.  com
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    if (arguments.destinations.size() != 1) {
        System.out.println("Only one topic name is allowed");
        jc.usage();
        System.exit(-1);
    }

    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }

        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }

        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }

        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));

    // Read payload data from file if needed
    byte payloadData[];
    if (arguments.payloadFilename != null) {
        payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
    } else {
        payloadData = new byte[arguments.msgSize];
    }

    // Now processing command line arguments
    String prefixTopicName = arguments.destinations.get(0);
    List<Future<Producer>> futures = Lists.newArrayList();

    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    }

    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }

    PulsarClient client = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);

    ProducerConfiguration producerConf = new ProducerConfiguration();
    producerConf.setSendTimeout(0, TimeUnit.SECONDS);
    producerConf.setCompressionType(arguments.compression);
    // enable round robin message routing if it is a partitioned topic
    producerConf.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition);
    if (arguments.batchTime > 0) {
        producerConf.setBatchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS);
        producerConf.setBatchingEnabled(true);
        producerConf.setMaxPendingMessages(arguments.msgRate);
    }

    // Block if queue is full else we will start seeing errors in sendAsync
    producerConf.setBlockIfQueueFull(true);

    for (int i = 0; i < arguments.numTopics; i++) {
        String topic = (arguments.numTopics == 1) ? prefixTopicName
                : String.format("%s-%d", prefixTopicName, i);
        log.info("Adding {} publishers on destination {}", arguments.numProducers, topic);

        for (int j = 0; j < arguments.numProducers; j++) {
            futures.add(client.createProducerAsync(topic, producerConf));
        }
    }

    final List<Producer> producers = Lists.newArrayListWithCapacity(futures.size());
    for (Future<Producer> future : futures) {
        producers.add(future.get());
    }

    log.info("Created {} producers", producers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(producers);
    AtomicBoolean isDone = new AtomicBoolean();

    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);

            long startTime = System.currentTimeMillis();

            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (Producer producer : producers) {
                    if (arguments.testTime > 0) {
                        if (System.currentTimeMillis() - startTime > arguments.testTime) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }

                    if (arguments.numMessages > 0) {
                        if (totalSent++ >= arguments.numMessages) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();

                    final long sendTime = System.nanoTime();

                    producer.sendAsync(payloadData).thenRun(() -> {
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);
                    }).exceptionally(ex -> {
                        log.warn("Write error on message", ex);
                        System.exit(-1);
                        return null;
                    });
                }
            }
        } catch (Throwable t) {
            log.error("Got error", t);
        }
    });

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {}", statsFileName);

    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);

    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();

        oldTime = now;
    }

    client.close();
}

From source file:org.apache.bookkeeper.zookeeper.BkZooWorker.java

/**
 * Execute a sync zookeeper operation with a given retry policy.
 *
 * @param client/* w ww.j ava2 s.  co m*/
 *          ZooKeeper client.
 * @param proc
 *          Synchronous zookeeper operation wrapped in a {@link Callable}.
 * @param retryPolicy
 *          Retry policy to execute the synchronous operation.
 * @param rateLimiter
 *          Rate limiter for zookeeper calls
 * @param statsLogger
 *          Stats Logger for zookeeper client.
 * @return result of the zookeeper operation
 * @throws KeeperException any non-recoverable exception or recoverable exception exhausted all retires.
 * @throws InterruptedException the operation is interrupted.
 */
public static <T> T syncCallWithRetries(BkZooKeeperClient client, ZooCallable<T> proc, RetryPolicy retryPolicy,
        RateLimiter rateLimiter, OpStatsLogger statsLogger) throws KeeperException, InterruptedException {
    T result = null;
    boolean isDone = false;
    int attempts = 0;
    long startTimeNanos = MathUtils.nowInNano();
    while (!isDone) {
        try {
            if (null != client) {
                client.waitForConnection();
            }
            logger.debug("Execute {} at {} retry attempt.", proc, attempts);
            if (null != rateLimiter) {
                rateLimiter.acquire();
            }
            result = proc.call();
            isDone = true;
            statsLogger.registerSuccessfulEvent(MathUtils.elapsedMicroSec(startTimeNanos),
                    TimeUnit.MICROSECONDS);
        } catch (KeeperException e) {
            ++attempts;
            boolean rethrow = true;
            long elapsedTime = MathUtils.elapsedMSec(startTimeNanos);
            if (((null != client && isRecoverableException(e)) || null == client)
                    && retryPolicy.allowRetry(attempts, elapsedTime)) {
                rethrow = false;
            }
            if (rethrow) {
                statsLogger.registerFailedEvent(MathUtils.elapsedMicroSec(startTimeNanos),
                        TimeUnit.MICROSECONDS);
                logger.debug("Stopped executing {} after {} attempts.", proc, attempts);
                throw e;
            }
            TimeUnit.MILLISECONDS.sleep(retryPolicy.nextRetryWaitTime(attempts, elapsedTime));
        }
    }
    return result;
}

From source file:org.apache.bookkeeper.zookeeper.ZooWorker.java

/**
 * Execute a sync zookeeper operation with a given retry policy.
 *
 * @param client//from  w ww .j  a  va2 s . c  o m
 *          ZooKeeper client.
 * @param proc
 *          Synchronous zookeeper operation wrapped in a {@link Callable}.
 * @param retryPolicy
 *          Retry policy to execute the synchronous operation.
 * @param rateLimiter
 *          Rate limiter for zookeeper calls
 * @param statsLogger
 *          Stats Logger for zookeeper client.
 * @return result of the zookeeper operation
 * @throws KeeperException any non-recoverable exception or recoverable exception exhausted all retires.
 * @throws InterruptedException the operation is interrupted.
 */
public static <T> T syncCallWithRetries(ZooKeeperClient client, ZooCallable<T> proc, RetryPolicy retryPolicy,
        RateLimiter rateLimiter, OpStatsLogger statsLogger) throws KeeperException, InterruptedException {
    T result = null;
    boolean isDone = false;
    int attempts = 0;
    long startTimeNanos = MathUtils.nowInNano();
    while (!isDone) {
        try {
            if (null != client) {
                client.waitForConnection();
            }
            logger.debug("Execute {} at {} retry attempt.", proc, attempts);
            if (null != rateLimiter) {
                rateLimiter.acquire();
            }
            result = proc.call();
            isDone = true;
            statsLogger.registerSuccessfulEvent(MathUtils.elapsedMicroSec(startTimeNanos),
                    TimeUnit.MICROSECONDS);
        } catch (KeeperException e) {
            ++attempts;
            boolean rethrow = true;
            long elapsedTime = MathUtils.elapsedMSec(startTimeNanos);
            if (((null != client && isRecoverableException(e)) || null == client)
                    && retryPolicy.allowRetry(attempts, elapsedTime)) {
                rethrow = false;
            }
            if (rethrow) {
                statsLogger.registerFailedEvent(MathUtils.elapsedMicroSec(startTimeNanos),
                        TimeUnit.MICROSECONDS);
                logger.debug("Stopped executing {} after {} attempts.", proc, attempts);
                throw e;
            }
            TimeUnit.MILLISECONDS.sleep(retryPolicy.nextRetryWaitTime(attempts, elapsedTime));
        }
    }
    return result;
}

From source file:com.google.pubsub.clients.common.LoadTestRunner.java

private static void runTest(StartRequest request) {
    log.info("Request received, starting up server.");
    ListeningExecutorService executor = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(request.getMaxOutstandingRequests() + 10));

    final RateLimiter rateLimiter = RateLimiter.create(request.getRequestRate());
    final Semaphore outstandingTestLimiter = new Semaphore(request.getMaxOutstandingRequests(), false);

    final long toSleep = request.getStartTime().getSeconds() * 1000 - System.currentTimeMillis();
    if (toSleep > 0) {
        try {//from   w  w w  .j  a  v a  2  s  .  c o  m
            Thread.sleep(toSleep);
        } catch (InterruptedException e) {
            log.error("Interrupted sleeping, starting test now.");
        }
    }

    stopwatch.start();
    while (shouldContinue(request)) {
        outstandingTestLimiter.acquireUninterruptibly();
        rateLimiter.acquire();
        executor.submit(task).addListener(outstandingTestLimiter::release, executor);
    }
    stopwatch.stop();
    executor.shutdownNow();
    finished.set(true);
    log.info("Load test complete.");
}

From source file:com.nestedbird.modules.ratelimiter.RateLimiterAspect.java

/**
 * Handle soft rate limiting, this pauses the current thread
 *
 * @param key       the methods key//from   w w  w . j av a 2 s.c o  m
 * @param joinPoint the join point
 * @param limit     the limit annotation data
 */
private void handleSoftRateLimiting(final String key, final JoinPoint joinPoint, final RateLimit limit) {
    if (limit.value() > 0) {
        final RateLimiter limiter = limiterMap.computeIfAbsent(key, name -> RateLimiter.create(limit.value()));
        limiter.acquire();
    }
}

From source file:org.eclipse.buildship.ui.view.execution.UpdateExecutionPageJob.java

@Override
protected IStatus run(IProgressMonitor monitor) {
    RateLimiter rateLimiter = RateLimiter.create(MAX_UPDATES_PER_SECOND);
    while (this.running || !this.queue.isEmpty()) {
        rateLimiter.acquire();

        List<ProgressEvent> events = Lists.newArrayList();
        this.queue.drainTo(events);

        Display display = PlatformUI.getWorkbench().getDisplay();
        if (!display.isDisposed()) {
            display.syncExec(new UpdateExecutionPageContent(this.page, events));
        }//from w w w.  java2 s  . co m
    }

    return Status.OK_STATUS;
}

From source file:org.opennms.netmgt.discovery.actors.Discoverer.java

public DiscoveryResults discover(DiscoveryJob job) {
    // Track the results of this particular job
    final PingResponseTracker tracker = new PingResponseTracker();

    // Filter out any entries where getAddress() == null
    List<IPPollAddress> addresses = StreamSupport.stream(job.getAddresses().spliterator(), false)
            .filter(j -> j.getAddress() != null).collect(Collectors.toList());

    // Expect callbacks for all of the addresses before issuing any pings
    addresses.stream().map(a -> a.getAddress()).forEach(a -> tracker.expectCallbackFor(a));

    // Use a RateLimiter to limit the ping packets per second that we send
    RateLimiter limiter = RateLimiter.create(job.getPacketsPerSecond());

    // Issue all of the pings
    addresses.stream().forEach(a -> {
        limiter.acquire();
        ping(a, tracker);/*w  w  w .  j av  a 2  s .  c om*/
    });

    // Don't bother waiting if there aren't any addresses
    if (!addresses.isEmpty()) {
        // Wait for the pings to complete
        try {
            tracker.getLatch().await();
        } catch (InterruptedException e) {
            throw Throwables.propagate(e);
        }
    }

    // We're done
    return new DiscoveryResults(tracker.getResponses(), job.getForeignSource(), job.getLocation());
}

From source file:com.amazonaws.service.apigateway.importer.impl.sdk.ApiGatewaySdkApiImporter.java

protected List<Resource> buildResourceList(RestApi api) {
    List<Resource> resourceList = new ArrayList<>();

    Resources resources = api.getResources();
    resourceList.addAll(resources.getItem());

    LOG.debug("Building list of resources. Stack trace: ", new Throwable());

    final RateLimiter rl = RateLimiter.create(2);
    while (resources._isLinkAvailable("next")) {
        rl.acquire();
        resources = resources.getNext();
        resourceList.addAll(resources.getItem());
    }/*from  w  w w . j  ava2 s  .  c  o  m*/

    return resourceList;
}