Example usage for com.google.common.util.concurrent RateLimiter create

List of usage examples for com.google.common.util.concurrent RateLimiter create

Introduction

In this page you can find the example usage for com.google.common.util.concurrent RateLimiter create.

Prototype



public static RateLimiter create(double permitsPerSecond) 

Source Link

Document

Creates a RateLimiter with the specified stable throughput, given as "permits per second" (commonly referred to as QPS, queries per second).

Usage

From source file:org.entrystore.ldcache.cache.impl.CacheImpl.java

void throttle(URI uri) {
    String hostname = java.net.URI.create(uri.stringValue()).getHost();
    try {/*w w  w.  ja  v a 2  s  . co m*/
        rateLimiters.get(hostname, new Callable<RateLimiter>() {
            @Override
            public RateLimiter call() throws Exception {
                return RateLimiter.create(rateLimit);
            }
        }).acquire();
    } catch (ExecutionException e) {
        log.error(e.getMessage());
    }
}

From source file:com.netflix.ndbench.core.NdBenchDriver.java

private void checkAndInitRateLimit(AtomicReference<RateLimiter> rateLimiter, int property, String prop) {
    RateLimiter oldLimiter = rateLimiter.get();
    if (oldLimiter == null) {
        Logger.info("Setting rate Limit for: " + prop + " to: " + property);
        rateLimiter.set(RateLimiter.create(property));
        return;/* w  ww . j a v  a  2 s. c om*/
    }

    int oldLimit = Double.valueOf(oldLimiter.getRate()).intValue();
    int newLimit = property;
    if (oldLimit != newLimit) {
        Logger.info("Updating rate Limit for: " + prop + " to: " + newLimit);
        rateLimiter.set(RateLimiter.create(newLimit));
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

public ManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper bookKeeper, MetaStore store,
        ManagedLedgerConfig config, ScheduledExecutorService scheduledExecutor,
        OrderedSafeExecutor orderedExecutor, final String name) {
    this.factory = factory;
    this.bookKeeper = bookKeeper;
    this.config = config;
    this.store = store;
    this.name = name;
    this.scheduledExecutor = scheduledExecutor;
    this.executor = orderedExecutor;
    TOTAL_SIZE_UPDATER.set(this, 0);
    NUMBER_OF_ENTRIES_UPDATER.set(this, 0);
    ENTRIES_ADDED_COUNTER_UPDATER.set(this, 0);
    STATE_UPDATER.set(this, State.None);
    this.ledgersStat = null;
    this.mbean = new ManagedLedgerMBeanImpl(this);
    this.entryCache = factory.getEntryCacheManager().getEntryCache(this);
    this.waitingCursors = Queues.newConcurrentLinkedQueue();
    this.uninitializedCursors = Maps.newHashMap();
    this.updateCursorRateLimit = RateLimiter.create(1);

    // Get the next rollover time. Add a random value upto 5% to avoid rollover multiple ledgers at the same time
    this.maximumRolloverTimeMs = (long) (config.getMaximumRolloverTimeMs()
            * (1 + random.nextDouble() * 5 / 100.0));
}

From source file:org.apache.bookkeeper.zookeeper.BkZooKeeperClient.java

BkZooKeeperClient(String connectString, int sessionTimeoutMs, ZooKeeperWatcherBase watcherManager,
        RetryPolicy connectRetryPolicy, RetryPolicy operationRetryPolicy, StatsLogger statsLogger,
        int retryExecThreadCount, double rate, boolean allowReadOnlyMode) throws IOException {
    super(connectString, sessionTimeoutMs, watcherManager, allowReadOnlyMode);
    this.connectString = connectString;
    this.sessionTimeoutMs = sessionTimeoutMs;
    this.allowReadOnlyMode = allowReadOnlyMode;
    this.watcherManager = watcherManager;
    this.connectRetryPolicy = connectRetryPolicy;
    this.operationRetryPolicy = operationRetryPolicy;
    this.rateLimiter = rate > 0 ? RateLimiter.create(rate) : null;
    this.retryExecutor = Executors.newScheduledThreadPool(retryExecThreadCount,
            new ThreadFactoryBuilder().setNameFormat("ZKC-retry-executor-%d").build());
    this.connectExecutor = Executors.newSingleThreadExecutor(
            new ThreadFactoryBuilder().setNameFormat("ZKC-connect-executor-%d").build());
    // added itself to the watcher
    watcherManager.addChildWatcher(this);

    // Stats//from  www .j av  a 2s  .c  o  m
    StatsLogger scopedStatsLogger = statsLogger.scope("zk");
    createClientStats = scopedStatsLogger.getOpStatsLogger("create_client");
    createStats = scopedStatsLogger.getOpStatsLogger("create");
    getStats = scopedStatsLogger.getOpStatsLogger("get_data");
    setStats = scopedStatsLogger.getOpStatsLogger("set_data");
    deleteStats = scopedStatsLogger.getOpStatsLogger("delete");
    getChildrenStats = scopedStatsLogger.getOpStatsLogger("get_children");
    existsStats = scopedStatsLogger.getOpStatsLogger("exists");
    multiStats = scopedStatsLogger.getOpStatsLogger("multi");
    getACLStats = scopedStatsLogger.getOpStatsLogger("get_acl");
    setACLStats = scopedStatsLogger.getOpStatsLogger("set_acl");
    syncStats = scopedStatsLogger.getOpStatsLogger("sync");
}

From source file:com.streamsets.datacollector.execution.runner.common.ProductionPipelineRunner.java

public void setRateLimit(Long rateLimit) {
    this.rateLimit = rateLimit;
    rateLimiter = RateLimiter.create(rateLimit.doubleValue());
}

From source file:com.spotify.styx.StyxScheduler.java

@Override
public void create(Environment environment) {
    final Config config = environment.config();
    final Closer closer = environment.closer();

    final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = (thread, throwable) -> LOG
            .error("Thread {} threw {}", thread, throwable);
    final ThreadFactory schedulerTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-scheduler-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler).build();
    final ThreadFactory eventTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-event-worker-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler)
            .build();/*from   w  ww.ja v  a  2 s  .c  om*/
    final ThreadFactory dockerRunnerTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-docker-runner-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler)
            .build();

    final ScheduledExecutorService executor = executorFactory.create(3, schedulerTf);
    final ExecutorService eventWorker = Executors.newFixedThreadPool(16, eventTf);
    final ExecutorService dockerRunnerExecutor = Executors.newSingleThreadExecutor(dockerRunnerTf);
    closer.register(executorCloser("scheduler", executor));
    closer.register(executorCloser("event-worker", eventWorker));
    closer.register(executorCloser("docker-runner", dockerRunnerExecutor));

    final Stats stats = statsFactory.apply(environment);
    final WorkflowCache workflowCache = new InMemWorkflowCache();
    final Storage storage = instrument(Storage.class, storageFactory.apply(environment), stats, time);

    warmUpCache(workflowCache, storage);

    final QueuedStateManager stateManager = closer.register(new QueuedStateManager(time, eventWorker, storage));

    final Config staleStateTtlConfig = config.getConfig(STYX_STALE_STATE_TTL_CONFIG);
    final TimeoutConfig timeoutConfig = TimeoutConfig.createFromConfig(staleStateTtlConfig);

    final Supplier<String> dockerId = new CachedSupplier<>(storage::globalDockerRunnerId, time);
    final DockerRunner routingDockerRunner = DockerRunner.routing(
            id -> dockerRunnerFactory.create(id, environment, stateManager, executor, stats), dockerId);
    final DockerRunner dockerRunner = instrument(DockerRunner.class, routingDockerRunner, stats, time);
    final Publisher publisher = publisherFactory.apply(environment);

    final RateLimiter submissionRateLimiter = RateLimiter.create(DEFAULT_SUBMISSION_RATE_PER_SEC);

    final OutputHandler[] outputHandlers = new OutputHandler[] { transitionLogger(""),
            new DockerRunnerHandler(dockerRunner, stateManager, storage, submissionRateLimiter,
                    dockerRunnerExecutor),
            new TerminationHandler(retryUtil, stateManager), new MonitoringHandler(time, stats),
            new PublisherHandler(publisher), new ExecutionDescriptionHandler(storage, stateManager) };
    final StateFactory stateFactory = (workflowInstance) -> RunState.fresh(workflowInstance, time,
            outputHandlers);

    final TriggerListener trigger = new StateInitializingTrigger(stateFactory, stateManager, storage);
    final TriggerManager triggerManager = new TriggerManager(trigger, time, storage, stats);

    final WorkflowInitializer workflowInitializer = new WorkflowInitializer(storage, time);
    final Consumer<Workflow> workflowRemoveListener = workflowRemoved(storage);
    final Consumer<Workflow> workflowChangeListener = workflowChanged(workflowCache, workflowInitializer, stats,
            stateManager);

    final Scheduler scheduler = new Scheduler(time, timeoutConfig, stateManager, workflowCache, storage,
            trigger);

    restoreState(storage, outputHandlers, stateManager);
    startTriggerManager(triggerManager, executor);
    startScheduleSources(environment, executor, workflowChangeListener, workflowRemoveListener);
    startScheduler(scheduler, executor);
    startRuntimeConfigUpdate(storage, executor, submissionRateLimiter);
    setupMetrics(stateManager, workflowCache, storage, submissionRateLimiter, stats);

    final SchedulerResource schedulerResource = new SchedulerResource(stateManager, trigger, storage, time);

    environment.routingEngine().registerAutoRoute(Route.sync("GET", "/ping", rc -> "pong"))
            .registerRoutes(schedulerResource.routes());

    this.stateManager = stateManager;
    this.scheduler = scheduler;
    this.triggerManager = triggerManager;
}

From source file:com.github.jcustenborder.kafka.connect.simulator.SimulatorSourceTask.java

@Override
public void start(Map<String, String> map) {
    this.config = new SimulatorSourceConnectorConfig(map);
    this.rateLimiter = RateLimiter.create(this.config.rateLimit);
    this.fairy = Fairy.create();

    this.keySetters = new ArrayList<>();
    SchemaBuilder keySchemaBuilder = SchemaBuilder.struct();
    keySchemaBuilder.name(this.config.keySchemaName);
    for (String field : this.config.keyFields) {
        configureField(field, keySchemaBuilder, keySetters);
    }//from  w w  w.j a v  a 2 s .c  om
    this.keySchema = keySchemaBuilder.build();

    this.valueSetters = new ArrayList<>();
    SchemaBuilder valueSchemaBuilder = SchemaBuilder.struct();
    valueSchemaBuilder.name(this.config.valueSchemaName);
    for (String field : this.config.valueFields) {
        configureField(field, valueSchemaBuilder, valueSetters);
    }
    this.valueSchema = valueSchemaBuilder.build();

}

From source file:com.datastax.loader.CqlDelimLoad.java

private void setup() {
    // Connect to Cassandra
    Cluster.Builder clusterBuilder = Cluster.builder().addContactPoint(host).withPort(port)
            .withProtocolVersion(ProtocolVersion.V2) // Should be V3, but issues for now....
            .withLoadBalancingPolicy(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(), true));
    if (null != username)
        clusterBuilder = clusterBuilder.withCredentials(username, password);
    cluster = clusterBuilder.build();/*w  ww  .  ja v  a2  s  . co  m*/
    session = cluster.newSession();
    if (0 < rate)
        rateLimiter = RateLimiter.create(rate);
}

From source file:io.warp10.continuum.ThrottlingManager.java

/**
 * Validate the ingestion of datapoints against the DDP limit
 * //w w  w  . ja  v  a  2s. c  o m
 * We tolerate inputs only if they wouldn't incur a wait greater than 2 seconds
 * 
 * @param producer
 * @param owner
 * @param application
 * @param count
 * @param maxwait Max wait per datapoint
 */
public static void checkDDP(Metadata metadata, String producer, String owner, String application, int count,
        long maxwait) throws WarpException {
    if (!loaded) {
        return;
    }

    //
    // Extract RateLimiter
    //

    RateLimiter producerLimiter = producerRateLimiters.get(producer);
    RateLimiter applicationLimiter = applicationRateLimiters.get(application);

    //
    // TODO(hbs): store per producer/per app maxwait values? Extract them from the throttling file?
    //

    long appMaxWait = maxwait;
    long producerMaxWait = maxwait;

    // -1.0 as the default rate means do not enforce DDP limit
    if (null == producerLimiter && null == applicationLimiter && -1.0D == DEFAULT_RATE_PRODUCER) {
        return;
    } else if (null == producerLimiter) {
        // Create a rate limiter with the default rate      
        producerLimiter = RateLimiter.create(Math.max(MINIMUM_RATE_LIMIT, DEFAULT_RATE_PRODUCER));
        producerRateLimiters.put(producer, producerLimiter);
    }

    // Check per application limiter
    if (null != applicationLimiter) {
        synchronized (applicationLimiter) {
            if (!applicationLimiter.tryAcquire(count, appMaxWait * count, TimeUnit.MILLISECONDS)) {
                StringBuilder sb = new StringBuilder();
                sb.append("Storing data for ");
                if (null != metadata) {
                    GTSHelper.metadataToString(sb, metadata.getName(), metadata.getLabels());
                }
                sb.append(" would incur a wait greater than ");
                sb.append(appMaxWait);
                sb.append(
                        " ms per datapoint due to your Daily Data Points limit being already exceeded for application '"
                                + application + "'. Current max rate is " + applicationLimiter.getRate()
                                + " datapoints/s.");

                Map<String, String> labels = new HashMap<String, String>();
                labels.put(SensisionConstants.SENSISION_LABEL_APPLICATION, application);
                Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_THROTTLING_RATE_PER_APP, labels,
                        1);
                Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_THROTTLING_RATE_PER_APP_GLOBAL,
                        Sensision.EMPTY_LABELS, 1);

                throw new WarpException(sb.toString());
            }
        }
    }

    synchronized (producerLimiter) {
        if (!producerLimiter.tryAcquire(count, producerMaxWait * count, TimeUnit.MILLISECONDS)) {
            StringBuilder sb = new StringBuilder();
            sb.append("Storing data for ");
            if (null != metadata) {
                GTSHelper.metadataToString(sb, metadata.getName(), metadata.getLabels());
            }
            sb.append(" would incur a wait greater than ");
            sb.append(producerMaxWait);
            sb.append(
                    " ms per datapoint due to your Daily Data Points limit being already exceeded. Current maximum rate is "
                            + producerLimiter.getRate() + " datapoints/s.");

            Map<String, String> labels = new HashMap<String, String>();
            labels.put(SensisionConstants.SENSISION_LABEL_PRODUCER, producer);
            Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_THROTTLING_RATE, labels, 1);
            Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_THROTTLING_RATE_GLOBAL,
                    Sensision.EMPTY_LABELS, 1);

            throw new WarpException(sb.toString());
        }
    }
}

From source file:com.tinspx.util.concurrent.LimitedExecutorService.java

public static Supplier<RateLimiter> rateLimiterSupplier(final double permitsPerSecond) {
    checkArgument(permitsPerSecond > 0.0 && !Double.isNaN(permitsPerSecond),
            "permitsPerSecond must be positive (%s)", permitsPerSecond);
    return new Supplier<RateLimiter>() {
        @Override/*  w  ww  .  ja  va  2s. c  om*/
        public RateLimiter get() {
            return RateLimiter.create(permitsPerSecond);
        }
    };
}