Example usage for com.google.common.util.concurrent RateLimiter create

List of usage examples for com.google.common.util.concurrent RateLimiter create

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter create.

Prototype



public static RateLimiter create(double permitsPerSecond) 

Source Link

Document

Creates a RateLimiter with the specified stable throughput, given as "permits per second" (commonly referred to as QPS, queries per second).

Usage

From source file:com.trulia.stail.Stail.java

public static void main(String[] args) {
    final Stail stail = new Stail();

    JCommander jct = new JCommander(stail);
    jct.setProgramName("stail");
    try {/*from  w w w  . j a v  a  2  s  . c o m*/
        jct.parse(args);

        AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
        if (stail.profile != null) {
            credentialsProvider = new ProfileCredentialsProvider(stail.profile);
        }

        if (stail.role != null) {
            credentialsProvider = new STSAssumeRoleSessionCredentialsProvider.Builder(stail.role, "stail")
                    .withStsClient(AWSSecurityTokenServiceClientBuilder.standard()
                            .withCredentials(credentialsProvider).build())
                    .build();
        }

        AmazonKinesis client = AmazonKinesisClientBuilder.standard().withRegion(stail.region)
                .withCredentials(credentialsProvider).build();

        // prepare the initial shard iterators at the LATEST position
        Map<Shard, String> shardIterators = getShardIterators(client, stail.stream, stail.start);

        IRecordProcessor processor = stail.json ? new JSONRecordProcessor() : new RawRecordProcessor();

        Map<Shard, RateLimiter> rateLimiters = new HashMap<>();
        shardIterators.keySet()
                .forEach(shard -> rateLimiters.put(shard, RateLimiter.create(MAX_SHARD_THROUGHPUT)));

        long end = Strings.isNullOrEmpty(stail.duration) ? Long.MAX_VALUE
                : System.currentTimeMillis() + Duration.parse(stail.duration).toMillis();

        Set<String> reshardedShards = new HashSet<>();

        Map<Shard, String> sequenceNumbers = new HashMap<>();

        while (System.currentTimeMillis() < end) {
            if (!reshardedShards.isEmpty()) {
                // get the new list of shards
                List<Shard> shards = getShards(client, stail.stream);
                for (Shard shard : shards) {
                    if (!Strings.isNullOrEmpty(shard.getParentShardId())
                            && reshardedShards.contains(shard.getParentShardId())) {
                        // the old shard was split, so we need to consume this new shard from the beginning
                        shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard));
                    } else if (!Strings.isNullOrEmpty(shard.getAdjacentParentShardId())
                            && reshardedShards.contains(shard.getAdjacentParentShardId())) {
                        // the old shards were merged into a new shard
                        shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard));
                    }
                }

                reshardedShards.clear();
            }

            for (Shard shard : Lists.newArrayList(shardIterators.keySet())) {
                String shardIterator = shardIterators.remove(shard);

                GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
                getRecordsRequest.setShardIterator(shardIterator);
                getRecordsRequest.setLimit(BATCH_SIZE);

                try {
                    GetRecordsResult getRecordsResult = client.getRecords(getRecordsRequest);
                    List<Record> records = getRecordsResult.getRecords();
                    processor.processRecords(records, null);

                    shardIterator = getRecordsResult.getNextShardIterator();

                    if (records.size() <= 0) {
                        // nothing on the stream yet, so lets wait a bit to see if something appears
                        TimeUnit.SECONDS.sleep(1);
                    } else {
                        int bytesRead = records.stream().map(record -> record.getData().position())
                                .reduce((_1, _2) -> _1 + _2).get();

                        sequenceNumbers.put(shard, records.get(records.size() - 1).getSequenceNumber());

                        // optionally sleep if we have hit the limit for this shard
                        rateLimiters.get(shard).acquire(bytesRead);
                    }

                    if (!Strings.isNullOrEmpty(shardIterator)) {
                        shardIterators.put(shard, shardIterator);
                    } else {
                        reshardedShards.add(shard.getShardId());
                    }
                } catch (ProvisionedThroughputExceededException e) {
                    logger.warn("tripped the max throughput.  Backing off: {}", e.getMessage());
                    TimeUnit.SECONDS.sleep(6); // we tripped the max throughput.  Back off

                    // add the original iterator back into the map so we can try it again
                    shardIterators.put(shard, shardIterator);
                } catch (ExpiredIteratorException e) {
                    logger.debug("Iterator expired", e);

                    String sequenceNumber = sequenceNumbers.get(shard);
                    if (sequenceNumber == null) {
                        logger.warn("No previously known sequence number for {}.  Moving to LATEST",
                                shard.getShardId());
                        shardIterators.put(shard, getShardIterator(client, stail.stream, shard, null));
                    } else {
                        shardIterators.put(shard,
                                getShardIteratorAtSequenceNumber(client, stail.stream, shard, sequenceNumber));
                    }
                }
            }
        }
    } catch (ParameterException e) {
        jct.usage();
        System.exit(1);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        System.exit(2);
    }
}

From source file:co.paralleluniverse.photon.Photon.java

public static void main(final String[] args) throws InterruptedException, IOException {

    final Options options = new Options();
    options.addOption("rate", true, "Requests per second (default " + rateDefault + ")");
    options.addOption("duration", true,
            "Minimum test duration in seconds: will wait for <duration> * <rate> requests to terminate or, if progress check enabled, no progress after <duration> (default "
                    + durationDefault + ")");
    options.addOption("maxconnections", true,
            "Maximum number of open connections (default " + maxConnectionsDefault + ")");
    options.addOption("timeout", true,
            "Connection and read timeout in millis (default " + timeoutDefault + ")");
    options.addOption("print", true,
            "Print cycle in millis, 0 to disable intermediate statistics (default " + printCycleDefault + ")");
    options.addOption("check", true,
            "Progress check cycle in millis, 0 to disable progress check (default " + checkCycleDefault + ")");
    options.addOption("stats", false, "Print full statistics when finish (default false)");
    options.addOption("minmax", false, "Print min/mean/stddev/max stats when finish (default false)");
    options.addOption("name", true, "Test name to print in the statistics (default '" + testNameDefault + "')");
    options.addOption("help", false, "Print help");

    try {/*  w ww. java2  s.c om*/
        final CommandLine cmd = new BasicParser().parse(options, args);
        final String[] ar = cmd.getArgs();
        if (cmd.hasOption("help") || ar.length != 1)
            printUsageAndExit(options);

        final String url = ar[0];

        final int timeout = Integer.parseInt(cmd.getOptionValue("timeout", timeoutDefault));
        final int maxConnections = Integer
                .parseInt(cmd.getOptionValue("maxconnections", maxConnectionsDefault));
        final int duration = Integer.parseInt(cmd.getOptionValue("duration", durationDefault));
        final int printCycle = Integer.parseInt(cmd.getOptionValue("print", printCycleDefault));
        final int checkCycle = Integer.parseInt(cmd.getOptionValue("check", checkCycleDefault));
        final String testName = cmd.getOptionValue("name", testNameDefault);
        final int rate = Integer.parseInt(cmd.getOptionValue("rate", rateDefault));

        final MetricRegistry metrics = new MetricRegistry();
        final Meter requestMeter = metrics.meter("request");
        final Meter responseMeter = metrics.meter("response");
        final Meter errorsMeter = metrics.meter("errors");
        final Logger log = LoggerFactory.getLogger(Photon.class);
        final ConcurrentHashMap<String, AtomicInteger> errors = new ConcurrentHashMap<>();
        final HttpGet request = new HttpGet(url);
        final StripedTimeSeries<Long> sts = new StripedTimeSeries<>(30000, false);
        final StripedHistogram sh = new StripedHistogram(60000, 5);

        log.info("name: " + testName + " url:" + url + " rate:" + rate + " duration:" + duration
                + " maxconnections:" + maxConnections + ", " + "timeout:" + timeout);
        final DefaultConnectingIOReactor ioreactor = new DefaultConnectingIOReactor(IOReactorConfig.custom()
                .setConnectTimeout(timeout).setIoThreadCount(10).setSoTimeout(timeout).build());

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            final List<ExceptionEvent> events = ioreactor.getAuditLog();
            if (events != null)
                events.stream().filter(event -> event != null).forEach(event -> {
                    System.err.println(
                            "Apache Async HTTP Client I/O Reactor Error Time: " + event.getTimestamp());
                    //noinspection ThrowableResultOfMethodCallIgnored
                    if (event.getCause() != null)
                        //noinspection ThrowableResultOfMethodCallIgnored
                        event.getCause().printStackTrace();
                });
            if (cmd.hasOption("stats"))
                printFinishStatistics(errorsMeter, sts, sh, testName);
            if (!errors.keySet().isEmpty())
                errors.entrySet().stream()
                        .forEach(p -> log.info(testName + " " + p.getKey() + " " + p.getValue() + "ms"));
            System.out.println(
                    testName + " responseTime(90%): " + sh.getHistogramData().getValueAtPercentile(90) + "ms");
            if (cmd.hasOption("minmax")) {
                final HistogramData hd = sh.getHistogramData();
                System.out.format("%s %8s%8s%8s%8s\n", testName, "min", "mean", "sd", "max");
                System.out.format("%s %8d%8.2f%8.2f%8d\n", testName, hd.getMinValue(), hd.getMean(),
                        hd.getStdDeviation(), hd.getMaxValue());
            }
        }));

        final PoolingNHttpClientConnectionManager mngr = new PoolingNHttpClientConnectionManager(ioreactor);
        mngr.setDefaultMaxPerRoute(maxConnections);
        mngr.setMaxTotal(maxConnections);
        final CloseableHttpAsyncClient ahc = HttpAsyncClientBuilder.create().setConnectionManager(mngr)
                .setDefaultRequestConfig(RequestConfig.custom().setLocalAddress(null).build()).build();
        try (final CloseableHttpClient client = new FiberHttpClient(ahc)) {
            final int num = duration * rate;

            final CountDownLatch cdl = new CountDownLatch(num);
            final Semaphore sem = new Semaphore(maxConnections);
            final RateLimiter rl = RateLimiter.create(rate);

            spawnStatisticsThread(printCycle, cdl, log, requestMeter, responseMeter, errorsMeter, testName);

            for (int i = 0; i < num; i++) {
                rl.acquire();
                if (sem.availablePermits() == 0)
                    log.debug("Maximum connections count reached, waiting...");
                sem.acquireUninterruptibly();

                new Fiber<Void>(() -> {
                    requestMeter.mark();
                    final long start = System.nanoTime();
                    try {
                        try (final CloseableHttpResponse ignored = client.execute(request)) {
                            responseMeter.mark();
                        } catch (final Throwable t) {
                            markError(errorsMeter, errors, t);
                        }
                    } catch (final Throwable t) {
                        markError(errorsMeter, errors, t);
                    } finally {
                        final long now = System.nanoTime();
                        final long millis = TimeUnit.NANOSECONDS.toMillis(now - start);
                        sts.record(start, millis);
                        sh.recordValue(millis);
                        sem.release();
                        cdl.countDown();
                    }
                }).start();
            }
            spawnProgressCheckThread(log, duration, checkCycle, cdl);
            cdl.await();
        }
    } catch (final ParseException ex) {
        System.err.println("Parsing failed.  Reason: " + ex.getMessage());
    }
}

From source file:org.apache.pulsar.testclient.ManagedLedgerWriter.java

public static void main(String[] args) throws Exception {

    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {/*ww w. j  a  v a 2 s . c  om*/
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar managed-ledger perf writer with config: {}", w.writeValueAsString(arguments));

    byte[] payloadData = new byte[arguments.msgSize];
    ByteBuf payloadBuffer = PooledByteBufAllocator.DEFAULT.directBuffer(arguments.msgSize);
    payloadBuffer.writerIndex(arguments.msgSize);

    // Now processing command line arguments
    String managedLedgerPrefix = "test-" + DigestUtils.sha1Hex(UUID.randomUUID().toString()).substring(0, 5);

    ClientConfiguration bkConf = new ClientConfiguration();
    bkConf.setUseV2WireProtocol(true);
    bkConf.setAddEntryTimeout(30);
    bkConf.setReadEntryTimeout(30);
    bkConf.setThrottleValue(0);
    bkConf.setNumChannelsPerBookie(arguments.maxConnections);
    bkConf.setZkServers(arguments.zookeeperServers);

    ManagedLedgerFactoryConfig mlFactoryConf = new ManagedLedgerFactoryConfig();
    mlFactoryConf.setMaxCacheSize(0);
    ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(bkConf, mlFactoryConf);

    ManagedLedgerConfig mlConf = new ManagedLedgerConfig();
    mlConf.setEnsembleSize(arguments.ensembleSize);
    mlConf.setWriteQuorumSize(arguments.writeQuorum);
    mlConf.setAckQuorumSize(arguments.ackQuorum);
    mlConf.setMinimumRolloverTime(10, TimeUnit.MINUTES);
    mlConf.setMetadataEnsembleSize(arguments.ensembleSize);
    mlConf.setMetadataWriteQuorumSize(arguments.writeQuorum);
    mlConf.setMetadataAckQuorumSize(arguments.ackQuorum);
    mlConf.setDigestType(arguments.digestType);
    mlConf.setMaxSizePerLedgerMb(2048);

    List<CompletableFuture<ManagedLedger>> futures = new ArrayList<>();

    for (int i = 0; i < arguments.numManagedLedgers; i++) {
        String name = String.format("%s-%03d", managedLedgerPrefix, i);
        CompletableFuture<ManagedLedger> future = new CompletableFuture<>();
        futures.add(future);
        factory.asyncOpen(name, mlConf, new OpenLedgerCallback() {

            @Override
            public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
                future.complete(ledger);
            }

            @Override
            public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
                future.completeExceptionally(exception);
            }
        }, null);
    }

    List<ManagedLedger> managedLedgers = futures.stream().map(CompletableFuture::join)
            .collect(Collectors.toList());

    log.info("Created {} managed ledgers", managedLedgers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(managedLedgers);
    AtomicBoolean isDone = new AtomicBoolean();

    List<List<ManagedLedger>> managedLedgersPerThread = Lists.partition(managedLedgers,
            Math.max(1, managedLedgers.size() / arguments.numThreads));

    for (int i = 0; i < arguments.numThreads; i++) {
        List<ManagedLedger> managedLedgersForThisThread = managedLedgersPerThread.get(i);
        int nunManagedLedgersForThisThread = managedLedgersForThisThread.size();
        long numMessagesForThisThread = arguments.numMessages / arguments.numThreads;
        int maxOutstandingForThisThread = arguments.maxOutstanding;

        executor.submit(() -> {
            try {
                final double msgRate = arguments.msgRate / (double) arguments.numThreads;
                final RateLimiter rateLimiter = RateLimiter.create(msgRate);

                // Acquire 1 sec worth of messages to have a slower ramp-up
                rateLimiter.acquire((int) msgRate);
                final long startTime = System.currentTimeMillis();

                final Semaphore semaphore = new Semaphore(maxOutstandingForThisThread);

                final AddEntryCallback addEntryCallback = new AddEntryCallback() {
                    @Override
                    public void addComplete(Position position, Object ctx) {
                        long sendTime = (Long) (ctx);
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);

                        semaphore.release();
                    }

                    @Override
                    public void addFailed(ManagedLedgerException exception, Object ctx) {
                        log.warn("Write error on message", exception);
                        System.exit(-1);
                    }
                };

                // Send messages on all topics/producers
                long totalSent = 0;
                while (true) {
                    for (int j = 0; j < nunManagedLedgersForThisThread; j++) {
                        if (arguments.testTime > 0) {
                            if (System.currentTimeMillis() - startTime > arguments.testTime) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        if (numMessagesForThisThread > 0) {
                            if (totalSent++ >= numMessagesForThisThread) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        semaphore.acquire();
                        rateLimiter.acquire();

                        final long sendTime = System.nanoTime();
                        managedLedgersForThisThread.get(j).asyncAddEntry(payloadBuffer, addEntryCallback,
                                sendTime);
                    }
                }
            } catch (Throwable t) {
                log.error("Got error", t);
            }
        });
    }

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        reportHistogram.reset();

        oldTime = now;
    }

    factory.shutdown();
}

From source file:com.android.tools.idea.diagnostics.crash.UploadRateLimiter.java

static UploadRateLimiter create(double qps) {
    return new UploadRateLimiter() {
        private final RateLimiter myRateLimiter = RateLimiter.create(qps);

        @Override//  w w w. ja v  a  2  s.com
        public boolean tryAcquire() {
            return myRateLimiter.tryAcquire();
        }
    };
}

From source file:com.yahoo.pulsar.testclient.PerformanceProducer.java

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {// w ww. ja va  2s. c o m
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    if (arguments.destinations.size() != 1) {
        System.out.println("Only one topic name is allowed");
        jc.usage();
        System.exit(-1);
    }

    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }

        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }

        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }

        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));

    // Read payload data from file if needed
    byte payloadData[];
    if (arguments.payloadFilename != null) {
        payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
    } else {
        payloadData = new byte[arguments.msgSize];
    }

    // Now processing command line arguments
    String prefixTopicName = arguments.destinations.get(0);
    List<Future<Producer>> futures = Lists.newArrayList();

    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    }

    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }

    PulsarClient client = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);

    ProducerConfiguration producerConf = new ProducerConfiguration();
    producerConf.setSendTimeout(0, TimeUnit.SECONDS);
    producerConf.setCompressionType(arguments.compression);
    // enable round robin message routing if it is a partitioned topic
    producerConf.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition);
    if (arguments.batchTime > 0) {
        producerConf.setBatchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS);
        producerConf.setBatchingEnabled(true);
        producerConf.setMaxPendingMessages(arguments.msgRate);
    }

    for (int i = 0; i < arguments.numTopics; i++) {
        String topic = (arguments.numTopics == 1) ? prefixTopicName
                : String.format("%s-%d", prefixTopicName, i);
        log.info("Adding {} publishers on destination {}", arguments.numProducers, topic);

        for (int j = 0; j < arguments.numProducers; j++) {
            futures.add(client.createProducerAsync(topic, producerConf));
        }
    }

    final List<Producer> producers = Lists.newArrayListWithCapacity(futures.size());
    for (Future<Producer> future : futures) {
        producers.add(future.get());
    }

    log.info("Created {} producers", producers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(producers);
    AtomicBoolean isDone = new AtomicBoolean();

    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);

            long startTime = System.currentTimeMillis();

            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (Producer producer : producers) {
                    if (arguments.testTime > 0) {
                        if (System.currentTimeMillis() - startTime > arguments.testTime) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }

                    if (arguments.numMessages > 0) {
                        if (totalSent++ >= arguments.numMessages) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();

                    final long sendTime = System.nanoTime();

                    producer.sendAsync(payloadData).thenRun(() -> {
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);
                    }).exceptionally(ex -> {
                        log.warn("Write error on message", ex);
                        System.exit(-1);
                        return null;
                    });
                }
            }
        } catch (Throwable t) {
            log.error("Got error", t);
        }
    });

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {}", statsFileName);

    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);

    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();

        oldTime = now;
    }

    client.close();
}

From source file:com.pinterest.pinlater.QueueRateLimiter.java

public static IFace create(double maxRequestsPerSecond) {
    if (maxRequestsPerSecond <= 0.0) {
        return ALLOW_NONE;
    }//w w  w .j a v a  2  s.co  m

    final RateLimiter rateLimiter = RateLimiter.create(maxRequestsPerSecond);
    return new IFace() {
        @Override
        public boolean allowDequeue(int numJobs) {
            return rateLimiter.tryAcquire(numJobs);
        }

        @Override
        public double getRate() {
            return rateLimiter.getRate();
        }
    };
}

From source file:com.pinterest.secor.util.RateLimitUtil.java

public static void configure(SecorConfig config) {
    // Lazy initialization of the rate limiter would have to be in a synchronized block so
    // creating it here makes things simple.
    mRateLimiter = RateLimiter.create(config.getMessagesPerSecond());
}

From source file:org.apache.pulsar.testclient.PerformanceProducer.java

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {/*from w  w  w  . j  a v  a 2  s . c  o m*/
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    if (arguments.destinations.size() != 1) {
        System.out.println("Only one topic name is allowed");
        jc.usage();
        System.exit(-1);
    }

    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }

        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }

        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }

        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }

        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));

    // Read payload data from file if needed
    byte payloadData[];
    if (arguments.payloadFilename != null) {
        payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
    } else {
        payloadData = new byte[arguments.msgSize];
    }

    // Now processing command line arguments
    String prefixTopicName = arguments.destinations.get(0);
    List<Future<Producer>> futures = Lists.newArrayList();

    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(),
                new DefaultThreadFactory("pulsar-perf-producer"));
    }

    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }

    PulsarClient client = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);

    ProducerConfiguration producerConf = new ProducerConfiguration();
    producerConf.setSendTimeout(0, TimeUnit.SECONDS);
    producerConf.setCompressionType(arguments.compression);
    // enable round robin message routing if it is a partitioned topic
    producerConf.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition);
    if (arguments.batchTime > 0) {
        producerConf.setBatchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS);
        producerConf.setBatchingEnabled(true);
        producerConf.setMaxPendingMessages(arguments.msgRate);
    }

    // Block if queue is full else we will start seeing errors in sendAsync
    producerConf.setBlockIfQueueFull(true);

    for (int i = 0; i < arguments.numTopics; i++) {
        String topic = (arguments.numTopics == 1) ? prefixTopicName
                : String.format("%s-%d", prefixTopicName, i);
        log.info("Adding {} publishers on destination {}", arguments.numProducers, topic);

        for (int j = 0; j < arguments.numProducers; j++) {
            futures.add(client.createProducerAsync(topic, producerConf));
        }
    }

    final List<Producer> producers = Lists.newArrayListWithCapacity(futures.size());
    for (Future<Producer> future : futures) {
        producers.add(future.get());
    }

    log.info("Created {} producers", producers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(producers);
    AtomicBoolean isDone = new AtomicBoolean();

    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);

            long startTime = System.currentTimeMillis();

            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (Producer producer : producers) {
                    if (arguments.testTime > 0) {
                        if (System.currentTimeMillis() - startTime > arguments.testTime) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }

                    if (arguments.numMessages > 0) {
                        if (totalSent++ >= arguments.numMessages) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();

                    final long sendTime = System.nanoTime();

                    producer.sendAsync(payloadData).thenRun(() -> {
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);
                    }).exceptionally(ex -> {
                        log.warn("Write error on message", ex);
                        System.exit(-1);
                        return null;
                    });
                }
            }
        } catch (Throwable t) {
            log.error("Got error", t);
        }
    });

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {}", statsFileName);

    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);

    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();

        oldTime = now;
    }

    client.close();
}

From source file:com.hiido.hefty.io.Throttle.java

public Throttle(long maxRatePerSecond) {
    this.rateLimiter = RateLimiter.create(maxRatePerSecond);
}

From source file:com.amazonaws.dynamodb.bootstrap.DynamoDBTableScan.java

/**
 * Initializes the RateLimiter and sets the AmazonDynamoDBClient.
 *//*from w  ww .  jav a2  s . c  o  m*/
public DynamoDBTableScan(double rateLimit, AmazonDynamoDBClient client) {
    rateLimiter = RateLimiter.create(rateLimit);
    this.client = client;
}