Example usage for com.google.common.util.concurrent RateLimiter create

List of usage examples for com.google.common.util.concurrent RateLimiter create

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter create.

Prototype



public static RateLimiter create(double permitsPerSecond) 

Source Link

Document

Creates a RateLimiter with the specified stable throughput, given as "permits per second" (commonly referred to as QPS, queries per second).

Usage

From source file:org.apache.cassandra.db.BatchlogManager.java

private void replayAllFailedBatches() throws ExecutionException, InterruptedException {
    logger.trace("Started replayAllFailedBatches");

    // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
    // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
    int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB()
            / StorageService.instance.getTokenMetadata().getAllEndpoints().size();
    RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

    UntypedResultSet page = executeInternal(
            String.format("SELECT id, data, written_at, version FROM %s.%s LIMIT %d", SystemKeyspace.NAME,
                    SystemKeyspace.BATCHLOG, PAGE_SIZE));

    while (!page.isEmpty()) {
        UUID id = processBatchlogPage(page, rateLimiter);

        if (page.size() < PAGE_SIZE)
            break; // we've exhausted the batchlog, next query would be empty.

        page = executeInternal(String.format(
                "SELECT id, data, written_at, version FROM %s.%s WHERE token(id) > token(?) LIMIT %d",
                SystemKeyspace.NAME, SystemKeyspace.BATCHLOG, PAGE_SIZE), id);
    }// ww  w.  j  av a2s .c  o  m

    cleanup();

    logger.trace("Finished replayAllFailedBatches");
}

From source file:org.restcomm.protocols.ss7.map.load.Client.java

protected void initializeStack(IpChannelType ipChannelType) throws Exception {

    this.rateLimiterObj = RateLimiter.create(MAXCONCURRENTDIALOGS); // rate

    this.initSCTP(ipChannelType);

    // Initialize M3UA first
    this.initM3UA();

    // Initialize SCCP
    this.initSCCP();

    // Initialize TCAP
    this.initTCAP();

    // Initialize MAP
    this.initMAP();

    // FInally start ASP
    // Set 5: Finally start ASP
    this.clientM3UAMgmt.startAsp("ASP1");

    this.csvWriter = new CsvWriter("map");
    this.csvWriter.addCounter(CREATED_DIALOGS);
    this.csvWriter.addCounter(SUCCESSFUL_DIALOGS);
    this.csvWriter.addCounter(ERROR_DIALOGS);
    this.csvWriter.start(TEST_START_DELAY, PRINT_WRITER_PERIOD);
}

From source file:com.rapid7.diskstorage.dynamodb.Client.java

public Client(com.thinkaurelius.titan.diskstorage.configuration.Configuration config) {
    String credentialsClassName = config.get(Constants.DYNAMODB_CREDENTIALS_CLASS_NAME);
    Class<?> clazz;/*from  w w  w.  j  a  v  a2 s  .c o m*/
    try {
        clazz = Class.forName(credentialsClassName);
    } catch (ClassNotFoundException e) {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME, e);
    }

    String[] credentialsConstructorArgsValues = config.get(Constants.DYNAMODB_CREDENTIALS_CONSTRUCTOR_ARGS);
    final List<String> filteredArgList = new ArrayList<String>();
    for (Object obj : credentialsConstructorArgsValues) {
        final String str = obj.toString();
        if (!str.isEmpty()) {
            filteredArgList.add(str);
        }
    }

    AWSCredentialsProvider credentialsProvider;
    if (AWSCredentials.class.isAssignableFrom(clazz)) {
        AWSCredentials credentials = createCredentials(clazz,
                filteredArgList.toArray(new String[filteredArgList.size()]));
        credentialsProvider = new StaticCredentialsProvider(credentials);
    } else if (AWSCredentialsProvider.class.isAssignableFrom(clazz)) {
        credentialsProvider = createCredentialsProvider(clazz, credentialsConstructorArgsValues);
    } else {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME);
    }
    //begin adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withConnectionTimeout(config.get(Constants.DYNAMODB_CLIENT_CONN_TIMEOUT)) //
            .withConnectionTTL(config.get(Constants.DYNAMODB_CLIENT_CONN_TTL)) //
            .withMaxConnections(config.get(Constants.DYNAMODB_CLIENT_MAX_CONN)) //
            .withMaxErrorRetry(config.get(Constants.DYNAMODB_CLIENT_MAX_ERROR_RETRY)) //
            .withGzip(config.get(Constants.DYNAMODB_CLIENT_USE_GZIP)) //
            .withReaper(config.get(Constants.DYNAMODB_CLIENT_USE_REAPER)) //
            .withUserAgent(config.get(Constants.DYNAMODB_CLIENT_USER_AGENT)) //
            .withSocketTimeout(config.get(Constants.DYNAMODB_CLIENT_SOCKET_TIMEOUT)) //
            .withSocketBufferSizeHints( //
                    config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_SEND_HINT), //
                    config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_RECV_HINT)) //
            .withProxyDomain(config.get(Constants.DYNAMODB_CLIENT_PROXY_DOMAIN)) //
            .withProxyWorkstation(config.get(Constants.DYNAMODB_CLIENT_PROXY_WORKSTATION)) //
            .withProxyHost(config.get(Constants.DYNAMODB_CLIENT_PROXY_HOST)) //
            .withProxyPort(config.get(Constants.DYNAMODB_CLIENT_PROXY_PORT)) //
            .withProxyUsername(config.get(Constants.DYNAMODB_CLIENT_PROXY_USERNAME)) //
            .withProxyPassword(config.get(Constants.DYNAMODB_CLIENT_PROXY_PASSWORD)); //

    forceConsistentRead = config.get(Constants.DYNAMODB_FORCE_CONSISTENT_READ);
    //end adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    enableParallelScan = config.get(Constants.DYNAMODB_ENABLE_PARALLEL_SCAN);
    prefix = config.get(Constants.DYNAMODB_TABLE_PREFIX);
    final String metricsPrefix = config.get(Constants.DYNAMODB_METRICS_PREFIX);

    final long maxRetries = config.get(Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES);
    if (maxRetries < 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES.getName() + " must be at least 0");
    }
    final long retryMillis = config.get(Constants.DYNAMODB_INITIAL_RETRY_MILLIS);
    if (retryMillis <= 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_INITIAL_RETRY_MILLIS.getName() + " must be at least 1");
    }
    final double controlPlaneRate = config.get(Constants.DYNAMODB_CONTROL_PLANE_RATE);
    if (controlPlaneRate < 0) {
        throw new IllegalArgumentException("must have a positive control plane rate");
    }
    final RateLimiter controlPlaneRateLimiter = RateLimiter.create(controlPlaneRate);

    final Map<String, RateLimiter> readRateLimit = new HashMap<>();
    final Map<String, RateLimiter> writeRateLimit = new HashMap<>();

    Set<String> storeNames = new HashSet<String>(Constants.REQUIRED_BACKEND_STORES);
    storeNames.addAll(config.getContainedNamespaces(Constants.DYNAMODB_STORES_NAMESPACE));
    for (String storeName : storeNames) {
        setupStore(config, prefix, readRateLimit, writeRateLimit, storeName);
    }

    endpoint = TitanConfigUtil.getNullableConfigValue(config, Constants.DYNAMODB_CLIENT_ENDPOINT);
    delegate = new DynamoDBDelegate(endpoint, credentialsProvider, clientConfig, config, readRateLimit,
            writeRateLimit, maxRetries, retryMillis, prefix, metricsPrefix, controlPlaneRateLimiter);
}

From source file:org.jmxtrans.embedded.samples.graphite.GraphiteDataInjector.java

public void setMaxGraphiteDataPointsPerSecond(int maxGraphiteDataPointsPerSecond) {
    this.rateLimiter = RateLimiter.create(maxGraphiteDataPointsPerSecond);
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.CommonSourceConfigBean.java

public RateLimiter creatQueryRateLimiter() {
    final BigDecimal rateLimit = new BigDecimal(queriesPerSecond);
    if (rateLimit.signum() < 1) {
        // negative or zero value; no rate limit
        return null;
    } else {/*from   w w w.  j  a v  a2 s.  c o  m*/
        return RateLimiter.create(rateLimit.doubleValue());
    }
}

From source file:com.twitter.aurora.scheduler.async.AsyncModule.java

@Override
protected void configure() {
    // Don't worry about clean shutdown, these can be daemon and cleanup-free.
    final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(ASYNC_WORKER_THREADS.get(),
            new ThreadFactoryBuilder().setNameFormat("AsyncProcessor-%d").setDaemon(true).build());
    Stats.exportSize("timeout_queue_size", executor.getQueue());
    Stats.export(new StatImpl<Long>("async_tasks_completed") {
        @Override//from w  ww.j  a v a 2 s. c o  m
        public Long read() {
            return executor.getCompletedTaskCount();
        }
    });

    // AsyncModule itself is not a subclass of PrivateModule because TaskEventModule internally uses
    // a MultiBinder, which cannot span multiple injectors.
    binder().install(new PrivateModule() {
        @Override
        protected void configure() {
            bind(new TypeLiteral<Amount<Long, Time>>() {
            }).toInstance(TRANSIENT_TASK_STATE_TIMEOUT.get());
            bind(ScheduledExecutorService.class).toInstance(executor);

            bind(TaskTimeout.class).in(Singleton.class);
            requireBinding(StatsProvider.class);
            expose(TaskTimeout.class);
        }
    });
    PubsubEventModule.bindSubscriber(binder(), TaskTimeout.class);

    binder().install(new PrivateModule() {
        @Override
        protected void configure() {
            bind(TaskGroupsSettings.class).toInstance(new TaskGroupsSettings(
                    new TruncatedBinaryBackoff(INITIAL_SCHEDULE_DELAY.get(), MAX_SCHEDULE_DELAY.get()),
                    RateLimiter.create(MAX_SCHEDULE_ATTEMPTS_PER_SEC.get())));

            bind(RescheduleCalculatorImpl.RescheduleCalculatorSettings.class)
                    .toInstance(new RescheduleCalculatorImpl.RescheduleCalculatorSettings(
                            new TruncatedBinaryBackoff(INITIAL_FLAPPING_DELAY.get(), MAX_FLAPPING_DELAY.get()),
                            FLAPPING_THRESHOLD.get(), MAX_RESCHEDULING_DELAY.get()));

            bind(RescheduleCalculator.class).to(RescheduleCalculatorImpl.class).in(Singleton.class);
            bind(SchedulingAction.class).to(TaskScheduler.class);
            bind(TaskScheduler.class).in(Singleton.class);
            if (ENABLE_PREEMPTOR.get()) {
                bind(Preemptor.class).to(PreemptorImpl.class);
                bind(PreemptorImpl.class).in(Singleton.class);
                LOG.info("Preemptor Enabled.");
            } else {
                bind(Preemptor.class).toInstance(NULL_PREEMPTOR);
                LOG.warning("Preemptor Disabled.");
            }
            bind(new TypeLiteral<Amount<Long, Time>>() {
            }).annotatedWith(PreemptionDelay.class).toInstance(PREEMPTION_DELAY.get());
            bind(TaskGroups.class).in(Singleton.class);
            expose(TaskGroups.class);
        }
    });
    PubsubEventModule.bindSubscriber(binder(), TaskGroups.class);

    binder().install(new PrivateModule() {
        @Override
        protected void configure() {
            bind(OfferReturnDelay.class).to(RandomJitterReturnDelay.class);
            bind(ScheduledExecutorService.class).toInstance(executor);
            bind(OfferQueue.class).to(OfferQueueImpl.class);
            bind(OfferQueueImpl.class).in(Singleton.class);
            expose(OfferQueue.class);
        }
    });
    PubsubEventModule.bindSubscriber(binder(), OfferQueue.class);

    binder().install(new PrivateModule() {
        @Override
        protected void configure() {
            // TODO(ksweeney): Create a configuration validator module so this can be injected.
            // TODO(William Farner): Revert this once large task counts is cheap ala hierarchichal store
            bind(Integer.class).annotatedWith(PruneThreshold.class).toInstance(100);
            bind(new TypeLiteral<Amount<Long, Time>>() {
            }).annotatedWith(PruneThreshold.class).toInstance(HISTORY_PRUNE_THRESHOLD.get());
            bind(ScheduledExecutorService.class).toInstance(executor);

            bind(HistoryPruner.class).in(Singleton.class);
            expose(HistoryPruner.class);
        }
    });
    PubsubEventModule.bindSubscriber(binder(), HistoryPruner.class);
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.Client.java

public Client(org.janusgraph.diskstorage.configuration.Configuration config) {
    final String credentialsClassName = config.get(Constants.DYNAMODB_CREDENTIALS_CLASS_NAME);
    final Class<?> clazz;
    try {/*ww  w  .j a v  a2s  .com*/
        clazz = Class.forName(credentialsClassName);
    } catch (ClassNotFoundException e) {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME, e);
    }

    final String[] credentialsConstructorArgsValues = config
            .get(Constants.DYNAMODB_CREDENTIALS_CONSTRUCTOR_ARGS);
    final List<String> filteredArgList = new ArrayList<>();
    for (Object obj : credentialsConstructorArgsValues) {
        final String str = obj.toString();
        if (!str.isEmpty()) {
            filteredArgList.add(str);
        }
    }

    final AWSCredentialsProvider credentialsProvider;
    if (AWSCredentials.class.isAssignableFrom(clazz)) {
        AWSCredentials credentials = createCredentials(clazz,
                filteredArgList.toArray(new String[filteredArgList.size()]));
        credentialsProvider = new AWSStaticCredentialsProvider(credentials);
    } else if (AWSCredentialsProvider.class.isAssignableFrom(clazz)) {
        credentialsProvider = createCredentialsProvider(clazz, credentialsConstructorArgsValues);
    } else {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME);
    }
    //begin adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withConnectionTimeout(config.get(Constants.DYNAMODB_CLIENT_CONN_TIMEOUT))
            .withConnectionTTL(config.get(Constants.DYNAMODB_CLIENT_CONN_TTL))
            .withMaxConnections(config.get(Constants.DYNAMODB_CLIENT_MAX_CONN))
            .withMaxErrorRetry(config.get(Constants.DYNAMODB_CLIENT_MAX_ERROR_RETRY))
            .withGzip(config.get(Constants.DYNAMODB_CLIENT_USE_GZIP))
            .withReaper(config.get(Constants.DYNAMODB_CLIENT_USE_REAPER))
            .withUserAgentSuffix(config.get(Constants.DYNAMODB_CLIENT_USER_AGENT))
            .withSocketTimeout(config.get(Constants.DYNAMODB_CLIENT_SOCKET_TIMEOUT))
            .withSocketBufferSizeHints(config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_SEND_HINT),
                    config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_RECV_HINT))
            .withProxyDomain(config.get(Constants.DYNAMODB_CLIENT_PROXY_DOMAIN))
            .withProxyWorkstation(config.get(Constants.DYNAMODB_CLIENT_PROXY_WORKSTATION))
            .withProxyHost(config.get(Constants.DYNAMODB_CLIENT_PROXY_HOST))
            .withProxyPort(config.get(Constants.DYNAMODB_CLIENT_PROXY_PORT))
            .withProxyUsername(config.get(Constants.DYNAMODB_CLIENT_PROXY_USERNAME))
            .withProxyPassword(config.get(Constants.DYNAMODB_CLIENT_PROXY_PASSWORD));
    forceConsistentRead = config.get(Constants.DYNAMODB_FORCE_CONSISTENT_READ);
    //end adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    enableParallelScan = config.get(Constants.DYNAMODB_ENABLE_PARALLEL_SCAN);
    prefix = config.get(Constants.DYNAMODB_TABLE_PREFIX);
    final String metricsPrefix = config.get(Constants.DYNAMODB_METRICS_PREFIX);

    final long maxRetries = config.get(Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES);
    if (maxRetries < 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES.getName() + " must be at least 0");
    }
    final long retryMillis = config.get(Constants.DYNAMODB_INITIAL_RETRY_MILLIS);
    if (retryMillis <= 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_INITIAL_RETRY_MILLIS.getName() + " must be at least 1");
    }
    final double controlPlaneRate = config.get(Constants.DYNAMODB_CONTROL_PLANE_RATE);
    if (controlPlaneRate < 0) {
        throw new IllegalArgumentException("must have a positive control plane rate");
    }
    final RateLimiter controlPlaneRateLimiter = RateLimiter.create(controlPlaneRate);

    final Map<String, RateLimiter> readRateLimit = new HashMap<>();
    final Map<String, RateLimiter> writeRateLimit = new HashMap<>();

    final Set<String> storeNames = new HashSet<>(Constants.REQUIRED_BACKEND_STORES);
    storeNames.add(config.get(GraphDatabaseConfiguration.IDS_STORE_NAME));
    storeNames.addAll(config.getContainedNamespaces(Constants.DYNAMODB_STORES_NAMESPACE));
    storeNames.forEach(storeName -> setupStore(config, prefix, readRateLimit, writeRateLimit, storeName));

    endpoint = JanusGraphConfigUtil.getNullableConfigValue(config, Constants.DYNAMODB_CLIENT_ENDPOINT);
    signingRegion = JanusGraphConfigUtil.getNullableConfigValue(config,
            Constants.DYNAMODB_CLIENT_SIGNING_REGION);
    delegate = new DynamoDBDelegate(endpoint, signingRegion, credentialsProvider, clientConfig, config,
            readRateLimit, writeRateLimit, maxRetries, retryMillis, prefix, metricsPrefix,
            controlPlaneRateLimiter);
}

From source file:org.apache.cassandra.batchlog.BatchlogManager.java

private void replayFailedBatches() {
    logger.trace("Started replayFailedBatches");

    // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
    // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
    int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
    if (endpointsCount <= 0) {
        logger.trace("Replay cancelled as there are no peers in the ring.");
        return;/*from w w  w.j  a  v a 2s .c  o  m*/
    }
    int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
    RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

    UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
    ColumnFamilyStore store = Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
    int pageSize = calculatePageSize(store);
    // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
    // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
    // token(id) > token(lastReplayedUuid) as part of the query.
    String query = String.format(
            "SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)",
            SystemKeyspace.NAME, SystemKeyspace.BATCHES);
    UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
    processBatchlogEntries(batches, pageSize, rateLimiter);
    lastReplayedUuid = limitUuid;
    logger.trace("Finished replayFailedBatches");
}

From source file:org.mobicents.protocols.ss7.map.load.Client.java

protected void initializeStack(IpChannelType ipChannelType) throws Exception {

    this.rateLimiterObj = RateLimiter.create(MAXCONCURRENTDIALOGS); // rate

    this.initSCTP(ipChannelType);

    // Initialize M3UA first
    this.initM3UA();

    // Initialize SCCP
    this.initSCCP();

    // Initialize TCAP
    this.initTCAP();

    // Initialize MAP
    this.initMAP();

    // FInally start ASP
    // Set 5: Finally start ASP
    this.clientM3UAMgmt.startAsp("ASP1");
}

From source file:com.linkedin.pinot.broker.queryquota.TableQueryQuotaManager.java

/**
 * Create a rate limiter for a table./*from  w w w. ja  va 2 s.  c om*/
 * @param tableName table name with table type.
 * @param brokerResource broker resource which stores all the broker states of each table.
 * @param quotaConfig quota config of the table.
 */
private void createRateLimiter(String tableName, ExternalView brokerResource, QuotaConfig quotaConfig) {
    if (quotaConfig == null || Strings.isNullOrEmpty(quotaConfig.getMaxQueriesPerSecond())) {
        LOGGER.info("No qps config specified for table: {}", tableName);
        return;
    }

    if (brokerResource == null) {
        LOGGER.warn("Failed to init qps quota for table {}. No broker resource connected!", tableName);
        return;
    }

    Map<String, String> stateMap = brokerResource.getStateMap(tableName);
    int otherOnlineBrokerCount = 0;

    // If stateMap is null, that means this broker is the first broker for this table.
    if (stateMap != null) {
        for (Map.Entry<String, String> state : stateMap.entrySet()) {
            if (!_helixManager.getInstanceName().equals(state.getKey()) && state.getValue()
                    .equals(CommonConstants.Helix.StateModel.SegmentOnlineOfflineStateModel.ONLINE)) {
                otherOnlineBrokerCount++;
            }
        }
    }
    LOGGER.info("The number of online brokers for table {} is {}", tableName, otherOnlineBrokerCount + 1);
    //int onlineCount = otherOnlineBrokerCount + 1;

    // FIXME We use fixed rate for the 1st version.
    int onlineCount = 1;

    // Get the dynamic rate
    double overallRate;
    if (quotaConfig.isMaxQueriesPerSecondValid()) {
        overallRate = Double.parseDouble(quotaConfig.getMaxQueriesPerSecond());
    } else {
        LOGGER.error("Failed to init qps quota: error when parsing qps quota: {} for table: {}",
                quotaConfig.getMaxQueriesPerSecond(), tableName);
        return;
    }

    double perBrokerRate = overallRate / onlineCount;
    QueryQuotaConfig queryQuotaConfig = new QueryQuotaConfig(RateLimiter.create(perBrokerRate),
            new HitCounter(TIME_RANGE_IN_SECOND));
    _rateLimiterMap.put(tableName, queryQuotaConfig);
    LOGGER.info(
            "Rate limiter for table: {} has been initialized. Overall rate: {}. Per-broker rate: {}. Number of online broker instances: {}",
            tableName, overallRate, perBrokerRate, onlineCount);
}