Example usage for com.google.common.util.concurrent RateLimiter create

List of usage examples for com.google.common.util.concurrent RateLimiter create

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter create.

Prototype



public static RateLimiter create(double permitsPerSecond) 

Source Link

Document

Creates a RateLimiter with the specified stable throughput, given as "permits per second" (commonly referred to as QPS, queries per second).

Usage

From source file:org.opennms.netmgt.discovery.actors.Discoverer.java

public DiscoveryResults discover(DiscoveryJob job) {
    // Track the results of this particular job
    final PingResponseTracker tracker = new PingResponseTracker();

    // Filter out any entries where getAddress() == null
    List<IPPollAddress> addresses = StreamSupport.stream(job.getAddresses().spliterator(), false)
            .filter(j -> j.getAddress() != null).collect(Collectors.toList());

    // Expect callbacks for all of the addresses before issuing any pings
    addresses.stream().map(a -> a.getAddress()).forEach(a -> tracker.expectCallbackFor(a));

    // Use a RateLimiter to limit the ping packets per second that we send
    RateLimiter limiter = RateLimiter.create(job.getPacketsPerSecond());

    // Issue all of the pings
    addresses.stream().forEach(a -> {
        limiter.acquire();//from   ww  w .  j a v a 2  s  .c  om
        ping(a, tracker);
    });

    // Don't bother waiting if there aren't any addresses
    if (!addresses.isEmpty()) {
        // Wait for the pings to complete
        try {
            tracker.getLatch().await();
        } catch (InterruptedException e) {
            throw Throwables.propagate(e);
        }
    }

    // We're done
    return new DiscoveryResults(tracker.getResponses(), job.getForeignSource(), job.getLocation());
}

From source file:org.apache.bookkeeper.server.service.ScrubberService.java

public ScrubberService(StatsLogger logger, BookieConfiguration conf, LedgerStorage ledgerStorage) {
    super(NAME, conf, logger);
    this.executor = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("ScrubThread"));

    this.scrubPeriod = conf.getServerConf().getLocalScrubPeriod();
    checkArgument(scrubPeriod > 0, "localScrubInterval must be > 0 for ScrubberService to be used");

    double rateLimit = conf.getServerConf().getLocalScrubRateLimit();
    this.scrubRateLimiter = rateLimit == 0 ? Optional.empty() : Optional.of(RateLimiter.create(rateLimit));

    this.ledgerStorage = ledgerStorage;

    this.scrubCounter = logger.getOpStatsLogger(RUN_DURATION);
    this.errorCounter = logger.getCounter(DETECTED_SCRUB_ERRORS);
    this.fatalErrorCounter = logger.getCounter(DETECTED_FATAL_SCRUB_ERRORS);
}

From source file:com.streamsets.pipeline.stage.destination.http.HttpClientTarget.java

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();
    int rateLimit = conf.rateLimit > 0 ? conf.rateLimit : Integer.MAX_VALUE;
    rateLimiter = RateLimiter.create(rateLimit);
    errorRecordHandler = new DefaultErrorRecordHandler(getContext());
    this.httpClientCommon.init(issues, getContext());
    if (issues.size() == 0) {
        conf.dataGeneratorFormatConfig.init(getContext(), conf.dataFormat, Groups.HTTP.name(),
                HttpClientCommon.DATA_FORMAT_CONFIG_PREFIX, issues);
        generatorFactory = conf.dataGeneratorFormatConfig.getDataGeneratorFactory();
    }//  w w  w.  jav a2s .  c o m
    return issues;
}

From source file:com.amazonaws.reinvent2015.practicaldynamodb.parallelscan.SegmentScannerFunctionHandler.java

@Override
public Object handleRequest(SegmentScannerInput input, Context context) {
    context.getLogger().log("Input: " + input.toJson() + "\n");
    context.getLogger().log("Start scanning segment " + input.getSegment() + "\n");

    DynamoDB dynamodb = new DynamoDB(Regions.US_WEST_2);

    // update tracking table in DynamoDB stating that we're in progress
    dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME).putItem(
            new Item().withPrimaryKey(SEGMENT, input.getSegment()).withString(STATUS, STATUS_IN_PROGRESS));

    ScanSpec scanSpec = new ScanSpec().withMaxPageSize(MAX_PAGE_SIZE).withSegment(input.getSegment())
            .withTotalSegments(input.getTotalSegments()).withConsistentRead(true)
            .withMaxResultSize(MAX_RESULT_SIZE).withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);

    // if resuming an in-progress segment, specify the start key here
    if (input.getStartScore() != null) {
        scanSpec.withExclusiveStartKey(SCORE_ID, input.getStartScore());
    }//from www.j ava  2 s .  co  m

    RateLimiter rateLimiter = RateLimiter.create(input.getMaxConsumedCapacity());

    Map<String, AttributeValue> lastEvaluatedKey = null;
    Table scoresTable = dynamodb.getTable(SCORE_TABLE_NAME);

    for (Page<Item, ScanOutcome> scanResultPage : scoresTable.scan(scanSpec).pages()) {
        // process items
        for (Item item : scanResultPage) {
            DataTransformer.HIGH_SCORES_BY_DATE_TRANSFORMER.transform(item, dynamodb);
        }

        /*
        * After reading each page, we acquire the consumed capacity from
        * the RateLimiter.
        *
        * For more information on using RateLimiter with DynamoDB scans,
        * see "Rate Limited Scans in Amazon DynamoDB"
        * on the AWS Java Development Blog:
        * https://java.awsblog.com/post/Tx3VAYQIZ3Q0ZVW
        */
        ScanResult scanResult = scanResultPage.getLowLevelResult().getScanResult();
        lastEvaluatedKey = scanResult.getLastEvaluatedKey();
        double consumedCapacity = scanResult.getConsumedCapacity().getCapacityUnits();
        rateLimiter.acquire((int) Math.round(consumedCapacity));

        // forego processing additional pages if we're running out of time
        if (context.getRemainingTimeInMillis() < REMAINING_TIME_CUTOFF) {
            break;
        }
    }

    if (lastEvaluatedKey != null && !lastEvaluatedKey.isEmpty()) {
        Entry<String, AttributeValue> entry = lastEvaluatedKey.entrySet().iterator().next();
        String lastScoreId = entry.getValue().getS();

        dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME)
                .putItem(new Item().withPrimaryKey(SEGMENT, input.getSegment())
                        .withString(STATUS, STATUS_INCOMPLETE).withString(LAST_SCORE_ID, lastScoreId));
        return false;
    }

    // update tracking table in DynamoDB stating that we're done
    dynamodb.getTable(FUNCTION_TRACKER_TABLE_NAME)
            .putItem(new Item().withPrimaryKey(SEGMENT, input.getSegment()).withString(STATUS, STATUS_DONE));

    context.getLogger().log("Finish scanning segment " + input.getSegment() + "\n");
    return true;
}

From source file:org.apache.s4.comm.staging.ThrottlingThreadPoolExecutorService.java

/**
 * //from w ww . j  a  v a 2  s.  c  om
 * @param parallelism
 *            Maximum number of threads in the pool
 * @param threadName
 *            Naming scheme
 * @param workQueueSize
 *            Queue capacity
 * @param classLoader
 *            ClassLoader used as contextClassLoader for spawned threads
 */
public ThrottlingThreadPoolExecutorService(int parallelism, int rate, String threadName, int workQueueSize,
        final ClassLoader classLoader) {
    super();
    this.parallelism = parallelism;
    this.streamName = threadName;
    this.classLoader = classLoader;
    this.workQueueSize = workQueueSize;
    this.droppingMeter = Metrics.newMeter(getClass(), "throttling-dropping", "throttling-dropping",
            TimeUnit.SECONDS);
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadName)
            .setThreadFactory(new ThreadFactory() {

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setContextClassLoader(classLoader);
                    return t;
                }
            }).build();
    rateLimitedPermits = RateLimiter.create(rate);
    workQueue = new ArrayBlockingQueue<Runnable>(workQueueSize + parallelism);
    ThreadPoolExecutor eventProcessingExecutor = new ThreadPoolExecutor(parallelism, parallelism, 60,
            TimeUnit.SECONDS, workQueue, threadFactory, new RejectedExecutionHandler() {

                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    droppingMeter.mark();
                }
            });
    eventProcessingExecutor.allowCoreThreadTimeOut(true);
    executorDelegatee = MoreExecutors.listeningDecorator(eventProcessingExecutor);

}

From source file:alluxio.worker.file.DefaultFileSystemWorker.java

/**
 * Creates a new instance of {@link FileSystemWorker}.
 *
 * @param blockWorker the block worker handle
 * @param workerId a reference to the id of this worker
 * @throws IOException if an I/O error occurs
 *//*from   w w w .j  a v  a  2  s .co  m*/
public DefaultFileSystemWorker(BlockWorker blockWorker, AtomicReference<Long> workerId) throws IOException {
    super(Executors.newFixedThreadPool(3, ThreadFactoryUtils.build("file-system-worker-heartbeat-%d", true)));
    mWorkerId = workerId;
    mSessions = new Sessions();
    mUnderFileSystemManager = new UnderFileSystemManager();
    mFileDataManager = new FileDataManager(Preconditions.checkNotNull(blockWorker),
            RateLimiter.create(Configuration.getBytes(PropertyKey.WORKER_FILE_PERSIST_RATE_LIMIT)));

    // Setup AbstractMasterClient
    mFileSystemMasterWorkerClient = new FileSystemMasterClient(
            NetworkAddressUtils.getConnectAddress(ServiceType.MASTER_RPC));

    // Setup session cleaner
    mSessionCleaner = new SessionCleaner(mSessions, mUnderFileSystemManager);

    mServiceHandler = new FileSystemWorkerClientServiceHandler(this);
}

From source file:com.hubrick.vertx.kafka.consumer.KafkaConsumerManager.java

public KafkaConsumerManager(Vertx vertx, KafkaConsumer<String, String> consumer,
        KafkaConsumerConfiguration configuration, KafkaConsumerHandler handler) {
    this.vertx = vertx;
    this.consumer = consumer;
    this.configuration = configuration;
    this.handler = handler;
    this.rateLimiter = configuration.getMessagesPerSecond() > 0.0D
            ? Optional.of(RateLimiter.create(configuration.getMessagesPerSecond()))
            : Optional.empty();/* w  w w . j  av a  2 s  .  c om*/
}

From source file:org.apache.bookkeeper.tools.cli.commands.bookieid.SearchReplaceBookieIdCommand.java

@Override
protected void run(BookKeeper bk, Flags flags) throws Exception {
    try (BookKeeperAdmin admin = new BookKeeperAdmin((org.apache.bookkeeper.client.BookKeeper) bk)) {
        LedgerManager ledgerManager = ((org.apache.bookkeeper.client.BookKeeper) bk).getLedgerManager();
        long i = 0;

        BookieSocketAddress fromAddr = new BookieSocketAddress(flags.from);
        BookieSocketAddress toAddr = new BookieSocketAddress(flags.to);
        System.out.println(String.format("Replacing bookie id %s with %s in metadata", fromAddr, toAddr));
        RateLimiter limiter = RateLimiter.create(flags.rate);
        for (Long lid : admin.listLedgers()) {
            Versioned<LedgerMetadata> md = ledgerManager.readLedgerMetadata(lid).get();
            if (md.getValue().getAllEnsembles().entrySet().stream()
                    .anyMatch(e -> e.getValue().contains(fromAddr))) {
                limiter.acquire();/*from www .ja v  a 2 s.  c  o  m*/

                LedgerMetadataBuilder builder = LedgerMetadataBuilder.from(md.getValue());
                md.getValue().getAllEnsembles().entrySet().stream().filter(e -> e.getValue().contains(fromAddr))
                        .forEach(e -> {
                            List<BookieSocketAddress> ensemble = new ArrayList<>(e.getValue());
                            ensemble.replaceAll((a) -> {
                                if (a.equals(fromAddr)) {
                                    return toAddr;
                                } else {
                                    return a;
                                }
                            });
                            builder.replaceEnsembleEntry(e.getKey(), ensemble);
                        });
                LedgerMetadata newMeta = builder.build();
                if (flags.verbose) {
                    System.out.println("Replacing ledger " + lid + " metadata ...");
                    System.out.println(md.getValue().toSafeString());
                    System.out.println("with ...");
                    System.out.println(newMeta.toSafeString());
                }
                i++;
                if (!flags.dryRun) {
                    ledgerManager.writeLedgerMetadata(lid, newMeta, md.getVersion()).get();
                }
            }
            if (i >= flags.max) {
                System.out.println("Max number of ledgers processed, exiting");
                break;
            }
        }
        System.out.println("Replaced bookie ID in " + i + " ledgers");
    }
}

From source file:org.opennms.netmgt.icmp.proxy.PingSweepRpcModule.java

@Override
public CompletableFuture<PingSweepResponseDTO> execute(PingSweepRequestDTO request) {
    final Pinger pinger = pingerFactory.getInstance();
    final PingSweepResultTracker tracker = new PingSweepResultTracker();

    String location = request.getLocation();
    int packetSize = request.getPacketSize();
    List<IPPollRange> ranges = new ArrayList<>();
    for (IPRangeDTO dto : request.getIpRanges()) {
        IPPollRange pollRange = new IPPollRange(null, location, dto.getBegin(), dto.getEnd(), dto.getTimeout(),
                dto.getRetries());/*from   w ww .  ja v  a2  s  . c o  m*/
        ranges.add(pollRange);
    }

    // Use a RateLimiter to limit the ping packets per second that we send
    RateLimiter limiter = RateLimiter.create(request.getPacketsPerSecond());

    List<IPPollAddress> addresses = StreamSupport.stream(getAddresses(ranges).spliterator(), false)
            .filter(j -> j.getAddress() != null).collect(Collectors.toList());

    return CompletableFuture.supplyAsync(() -> {
        addresses.stream().forEach(pollAddress -> {
            try {
                tracker.expectCallbackFor(pollAddress.getAddress());
                limiter.acquire();
                pinger.ping(pollAddress.getAddress(), pollAddress.getTimeout(), pollAddress.getRetries(),
                        packetSize, 1, tracker);
            } catch (Exception e) {
                tracker.handleError(pollAddress.getAddress(), null, e);
                tracker.completeExceptionally(e);
            }
        });

        try {
            tracker.getLatch().await();
        } catch (InterruptedException e) {
            throw Throwables.propagate(e);
        }
        tracker.complete();
        return tracker.getResponse();
    }, executor);

}

From source file:c3.ops.priam.aws.S3FileSystem.java

@Inject
public S3FileSystem(Provider<AbstractBackupPath> pathProvider, ICompression compress,
        final IConfiguration config, ICredential cred) {
    this.pathProvider = pathProvider;
    this.compress = compress;
    this.config = config;
    int threads = config.getMaxBackupUploadThreads();
    LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(threads);
    this.executor = new BlockingSubmitThreadPoolExecutor(threads, queue, UPLOAD_TIMEOUT);
    double throttleLimit = config.getUploadThrottle();
    rateLimiter = RateLimiter.create(throttleLimit < 1 ? Double.MAX_VALUE : throttleLimit);

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    String mbeanName = MBEAN_NAME;
    try {/* ww  w  .  j av  a 2 s . c  o m*/
        mbs.registerMBean(this, new ObjectName(mbeanName));
    } catch (Exception e) {
        logger.warn("Fail to register " + mbeanName);
        //throw new RuntimeException(e);
    }

    s3Client = new AmazonS3Client(cred.getAwsCredentialProvider());
    s3Client.setEndpoint(getS3Endpoint());
}