Example usage for com.google.common.util.concurrent RateLimiter acquire

List of usage examples for com.google.common.util.concurrent RateLimiter acquire

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter acquire.

Prototype

public double acquire(int permits) 

Source Link

Document

Acquires the given number of permits from this RateLimiter , blocking until the request can be granted.

Usage

From source file:org.apache.pulsar.testclient.ManagedLedgerWriter.java

public static void main(String[] args) throws Exception {

    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {/* w w  w  . ja  v a 2s.  c  o  m*/
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar managed-ledger perf writer with config: {}", w.writeValueAsString(arguments));

    byte[] payloadData = new byte[arguments.msgSize];
    ByteBuf payloadBuffer = PooledByteBufAllocator.DEFAULT.directBuffer(arguments.msgSize);
    payloadBuffer.writerIndex(arguments.msgSize);

    // Now processing command line arguments
    String managedLedgerPrefix = "test-" + DigestUtils.sha1Hex(UUID.randomUUID().toString()).substring(0, 5);

    ClientConfiguration bkConf = new ClientConfiguration();
    bkConf.setUseV2WireProtocol(true);
    bkConf.setAddEntryTimeout(30);
    bkConf.setReadEntryTimeout(30);
    bkConf.setThrottleValue(0);
    bkConf.setNumChannelsPerBookie(arguments.maxConnections);
    bkConf.setZkServers(arguments.zookeeperServers);

    ManagedLedgerFactoryConfig mlFactoryConf = new ManagedLedgerFactoryConfig();
    mlFactoryConf.setMaxCacheSize(0);
    ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(bkConf, mlFactoryConf);

    ManagedLedgerConfig mlConf = new ManagedLedgerConfig();
    mlConf.setEnsembleSize(arguments.ensembleSize);
    mlConf.setWriteQuorumSize(arguments.writeQuorum);
    mlConf.setAckQuorumSize(arguments.ackQuorum);
    mlConf.setMinimumRolloverTime(10, TimeUnit.MINUTES);
    mlConf.setMetadataEnsembleSize(arguments.ensembleSize);
    mlConf.setMetadataWriteQuorumSize(arguments.writeQuorum);
    mlConf.setMetadataAckQuorumSize(arguments.ackQuorum);
    mlConf.setDigestType(arguments.digestType);
    mlConf.setMaxSizePerLedgerMb(2048);

    List<CompletableFuture<ManagedLedger>> futures = new ArrayList<>();

    for (int i = 0; i < arguments.numManagedLedgers; i++) {
        String name = String.format("%s-%03d", managedLedgerPrefix, i);
        CompletableFuture<ManagedLedger> future = new CompletableFuture<>();
        futures.add(future);
        factory.asyncOpen(name, mlConf, new OpenLedgerCallback() {

            @Override
            public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
                future.complete(ledger);
            }

            @Override
            public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
                future.completeExceptionally(exception);
            }
        }, null);
    }

    List<ManagedLedger> managedLedgers = futures.stream().map(CompletableFuture::join)
            .collect(Collectors.toList());

    log.info("Created {} managed ledgers", managedLedgers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(managedLedgers);
    AtomicBoolean isDone = new AtomicBoolean();

    List<List<ManagedLedger>> managedLedgersPerThread = Lists.partition(managedLedgers,
            Math.max(1, managedLedgers.size() / arguments.numThreads));

    for (int i = 0; i < arguments.numThreads; i++) {
        List<ManagedLedger> managedLedgersForThisThread = managedLedgersPerThread.get(i);
        int nunManagedLedgersForThisThread = managedLedgersForThisThread.size();
        long numMessagesForThisThread = arguments.numMessages / arguments.numThreads;
        int maxOutstandingForThisThread = arguments.maxOutstanding;

        executor.submit(() -> {
            try {
                final double msgRate = arguments.msgRate / (double) arguments.numThreads;
                final RateLimiter rateLimiter = RateLimiter.create(msgRate);

                // Acquire 1 sec worth of messages to have a slower ramp-up
                rateLimiter.acquire((int) msgRate);
                final long startTime = System.currentTimeMillis();

                final Semaphore semaphore = new Semaphore(maxOutstandingForThisThread);

                final AddEntryCallback addEntryCallback = new AddEntryCallback() {
                    @Override
                    public void addComplete(Position position, Object ctx) {
                        long sendTime = (Long) (ctx);
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);

                        semaphore.release();
                    }

                    @Override
                    public void addFailed(ManagedLedgerException exception, Object ctx) {
                        log.warn("Write error on message", exception);
                        System.exit(-1);
                    }
                };

                // Send messages on all topics/producers
                long totalSent = 0;
                while (true) {
                    for (int j = 0; j < nunManagedLedgersForThisThread; j++) {
                        if (arguments.testTime > 0) {
                            if (System.currentTimeMillis() - startTime > arguments.testTime) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        if (numMessagesForThisThread > 0) {
                            if (totalSent++ >= numMessagesForThisThread) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        semaphore.acquire();
                        rateLimiter.acquire();

                        final long sendTime = System.nanoTime();
                        managedLedgersForThisThread.get(j).asyncAddEntry(payloadBuffer, addEntryCallback,
                                sendTime);
                    }
                }
            } catch (Throwable t) {
                log.error("Got error", t);
            }
        });
    }

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        reportHistogram.reset();

        oldTime = now;
    }

    factory.shutdown();
}

From source file:com.themodernway.server.core.servlet.CoreSpringDispatcherServlet.java

@Override
public void acquire(final int units) {
    if (units > 0) {
        final RateLimiter rate = getRateLimiter();

        if (null != rate) {
            rate.acquire(units);
        }//  ww w . j a v a2s  . c o m
    }
}

From source file:org.apache.cassandra.stress.operations.userdefined.TokenRangeQuery.java

public boolean ready(WorkManager workManager, RateLimiter rateLimiter) {
    tokenRangeIterator.update();/*from w  w w  .java 2 s .c  o m*/

    if (tokenRangeIterator.exhausted() && currentState.get() == null)
        return false;

    int numLeft = workManager.takePermits(1);
    if (rateLimiter != null && numLeft > 0)
        rateLimiter.acquire(numLeft);

    return numLeft > 0;
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfWriter.java

void write(List<DistributedLogManager> logs, double writeRate, int maxOutstandingBytesForThisThread,
        long numRecordsForThisThread, long numBytesForThisThread) throws Exception {
    log.info(//from  w  ww .  j  a  v  a  2 s. co m
            "Write thread started with : logs = {}, rate = {},"
                    + " num records = {}, num bytes = {}, max outstanding bytes = {}",
            logs.stream().map(l -> l.getStreamName()).collect(Collectors.toList()), writeRate,
            numRecordsForThisThread, numBytesForThisThread, maxOutstandingBytesForThisThread);

    List<CompletableFuture<AsyncLogWriter>> writerFutures = logs.stream()
            .map(manager -> manager.openAsyncLogWriter()).collect(Collectors.toList());
    List<AsyncLogWriter> writers = result(FutureUtils.collect(writerFutures));

    long txid = writers.stream().mapToLong(writer -> writer.getLastTxId()).max().orElse(0L);
    txid = Math.max(0L, txid);

    RateLimiter limiter;
    if (writeRate > 0) {
        limiter = RateLimiter.create(writeRate);
    } else {
        limiter = null;
    }
    final Semaphore semaphore;
    if (maxOutstandingBytesForThisThread > 0) {
        semaphore = new Semaphore(maxOutstandingBytesForThisThread);
    } else {
        semaphore = null;
    }

    // Acquire 1 second worth of records to have a slower ramp-up
    if (limiter != null) {
        limiter.acquire((int) writeRate);
    }

    long totalWritten = 0L;
    long totalBytesWritten = 0L;
    final int numLogs = logs.size();
    while (true) {
        for (int i = 0; i < numLogs; i++) {
            if (numRecordsForThisThread > 0 && totalWritten >= numRecordsForThisThread) {
                markPerfDone();
            }
            if (numBytesForThisThread > 0 && totalBytesWritten >= numBytesForThisThread) {
                markPerfDone();
            }
            if (null != semaphore) {
                semaphore.acquire(payload.length);
            }

            totalWritten++;
            totalBytesWritten += payload.length;
            if (null != limiter) {
                limiter.acquire(payload.length);
            }
            final long sendTime = System.nanoTime();
            writers.get(i).write(new LogRecord(++txid, Unpooled.wrappedBuffer(payload))).thenAccept(dlsn -> {
                if (null != semaphore) {
                    semaphore.release(payload.length);
                }

                recordsWritten.increment();
                bytesWritten.add(payload.length);

                long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                recorder.recordValue(latencyMicros);
                cumulativeRecorder.recordValue(latencyMicros);
            }).exceptionally(cause -> {
                log.warn("Error at writing records", cause);
                System.exit(-1);
                return null;
            });
        }
    }
}

From source file:org.apache.cassandra.stress.operations.PartitionOperation.java

public boolean ready(WorkManager permits, RateLimiter rateLimiter) {
    int partitionCount = (int) spec.partitionCount.next();
    if (partitionCount <= 0)
        return false;
    partitionCount = permits.takePermits(partitionCount);
    if (partitionCount <= 0)
        return false;

    int i = 0;/*w  ww.  j  a v a2 s  .  co m*/
    boolean success = true;
    for (; i < partitionCount && success; i++) {
        if (i >= partitionCache.size())
            partitionCache.add(PartitionIterator.get(spec.partitionGenerator, spec.seedManager));

        success = false;
        while (!success) {
            Seed seed = spec.seedManager.next(this);
            if (seed == null)
                break;

            success = reset(seed, partitionCache.get(i));
        }
    }
    partitionCount = i;

    if (rateLimiter != null)
        rateLimiter.acquire(partitionCount);

    partitions = partitionCache.subList(0, partitionCount);
    return !partitions.isEmpty();
}

From source file:org.apache.cassandra.stress.Operation.java

boolean ready(WorkManager permits, RateLimiter rateLimiter) {
    int partitionCount = (int) spec.partitionCount.next();
    if (partitionCount <= 0)
        return false;
    partitionCount = permits.takePermits(partitionCount);
    if (partitionCount <= 0)
        return false;

    int i = 0;//from w  ww.  j a va 2 s.co  m
    boolean success = true;
    for (; i < partitionCount && success; i++) {
        if (i >= partitionCache.size())
            partitionCache.add(PartitionIterator.get(spec.partitionGenerator, spec.seedManager));

        success = false;
        while (!success) {
            Seed seed = spec.seedManager.next(this);
            if (seed == null)
                break;

            success = reset(seed, partitionCache.get(i));
        }
    }
    partitionCount = i;

    if (rateLimiter != null)
        rateLimiter.acquire(partitionCount);

    partitions = partitionCache.subList(0, partitionCount);
    return !partitions.isEmpty();
}

From source file:com.amazonaws.reinvent2015.practicaldynamodb.parallelscan.SegmentScannerFunctionHandler.java

@Override
public Object handleRequest(SegmentScannerInput input, Context context) {
    context.getLogger().log("Input: " + input.toJson() + "\n");
    context.getLogger().log("Start scanning segment " + input.getSegment() + "\n");

    DynamoDB dynamodb = new DynamoDB(Regions.US_WEST_2);

    // update tracking table in DynamoDB stating that we're in progress
    dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME).putItem(
            new Item().withPrimaryKey(SEGMENT, input.getSegment()).withString(STATUS, STATUS_IN_PROGRESS));

    ScanSpec scanSpec = new ScanSpec().withMaxPageSize(MAX_PAGE_SIZE).withSegment(input.getSegment())
            .withTotalSegments(input.getTotalSegments()).withConsistentRead(true)
            .withMaxResultSize(MAX_RESULT_SIZE).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    // if resuming an in-progress segment, specify the start key here
    if (input.getStartScore() != null) {
        scanSpec.withExclusiveStartKey(SCORE_ID, input.getStartScore());
    }//from w  w w . j a va2 s  .  c o  m

    RateLimiter rateLimiter = RateLimiter.create(input.getMaxConsumedCapacity());

    Map<String, AttributeValue> lastEvaluatedKey = null;
    Table scoresTable = dynamodb.getTable(SCORE_TABLE_NAME);

    for (Page<Item, ScanOutcome> scanResultPage : scoresTable.scan(scanSpec).pages()) {
        // process items
        for (Item item : scanResultPage) {
            DataTransformer.HIGH_SCORES_BY_DATE_TRANSFORMER.transform(item, dynamodb);
        }

        /*
        * After reading each page, we acquire the consumed capacity from
        * the RateLimiter.
        *
        * For more information on using RateLimiter with DynamoDB scans,
        * see "Rate Limited Scans in Amazon DynamoDB"
        * on the AWS Java Development Blog:
        * https://java.awsblog.com/post/Tx3VAYQIZ3Q0ZVW
        */
        ScanResult scanResult = scanResultPage.getLowLevelResult().getScanResult();
        lastEvaluatedKey = scanResult.getLastEvaluatedKey();
        double consumedCapacity = scanResult.getConsumedCapacity().getCapacityUnits();
        rateLimiter.acquire((int) Math.round(consumedCapacity));

        // forego processing additional pages if we're running out of time
        if (context.getRemainingTimeInMillis() < REMAINING_TIME_CUTOFF) {
            break;
        }
    }

    if (lastEvaluatedKey != null && !lastEvaluatedKey.isEmpty()) {
        Entry<String, AttributeValue> entry = lastEvaluatedKey.entrySet().iterator().next();
        String lastScoreId = entry.getValue().getS();

        dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME)
                .putItem(new Item().withPrimaryKey(SEGMENT, input.getSegment())
                        .withString(STATUS, STATUS_INCOMPLETE).withString(LAST_SCORE_ID, lastScoreId));
        return false;
    }

    // update tracking table in DynamoDB stating that we're done
    dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME)
            .putItem(new Item().withPrimaryKey(SEGMENT, input.getSegment()).withString(STATUS, STATUS_DONE));

    context.getLogger().log("Finish scanning segment " + input.getSegment() + "\n");
    return true;
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

private void timedThrottle(String apiName, RateLimiter limiter, String tableName, int permits) {
    if (limiter == null) {
        throw new IllegalArgumentException("limiter for " + apiName + " on table " + tableName + " was null");
    }//from   w w w .  j  a v a  2s .co  m
    final Timer.Context throttleTimerCtxt = getTimerContext(String.format("%sThrottling", apiName), tableName);
    try {
        limiter.acquire(permits);
    } finally {
        throttleTimerCtxt.stop();
    }
}