Example usage for com.google.common.collect EvictingQueue create

List of usage examples for com.google.common.collect EvictingQueue create

Introduction

In this page you can find the example usage for com.google.common.collect EvictingQueue create.

Prototype

public static <E> EvictingQueue<E> create(int maxSize) 

Source Link

Document

Creates and returns a new evicting queue that will hold up to maxSize elements.

Usage

From source file:org.apache.storm.scheduler.blacklist.BlacklistScheduler.java

@Override
public void prepare(Map conf) {
    LOG.info("Preparing black list scheduler");
    underlyingScheduler.prepare(conf);/*from  w w  w  . j a  v a  2s . co m*/
    this.conf = conf;

    toleranceTime = ObjectReader.getInt(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME),
            DEFAULT_BLACKLIST_SCHEDULER_TOLERANCE_TIME);
    toleranceCount = ObjectReader.getInt(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT),
            DEFAULT_BLACKLIST_SCHEDULER_TOLERANCE_COUNT);
    resumeTime = ObjectReader.getInt(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME),
            DEFAULT_BLACKLIST_SCHEDULER_RESUME_TIME);

    String reporterClassName = ObjectReader.getString(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_REPORTER),
            LogReporter.class.getName());
    reporter = (IReporter) initializeInstance(reporterClassName, "blacklist reporter");

    String strategyClassName = ObjectReader.getString(this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_STRATEGY),
            DefaultBlacklistStrategy.class.getName());
    blacklistStrategy = (IBlacklistStrategy) initializeInstance(strategyClassName, "blacklist strategy");

    nimbusMonitorFreqSecs = ObjectReader.getInt(this.conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS));
    blacklistStrategy.prepare(this.conf);

    windowSize = toleranceTime / nimbusMonitorFreqSecs;
    badSupervisorsToleranceSlidingWindow = EvictingQueue.create(windowSize);
    cachedSupervisors = new HashMap<>();
    blacklistHost = new HashSet<>();

    StormMetricsRegistry.registerGauge("nimbus:num-blacklisted-supervisor", new Callable<Integer>() {
        @Override
        public Integer call() throws Exception {
            //nimbus:num-blacklisted-supervisor + none blacklisted supervisor = nimbus:num-supervisors
            return blacklistHost.size();
        }
    });
}

From source file:org.hawkular.datamining.forecast.AutomaticForecaster.java

@Override
public void update(Update update) {
    synchronized (selectModelLock) {
        if (update.getWindowSize() != null && !update.getWindowSize().equals(config.getWindowsSize())) {
            EvictingQueue<DataPoint> newWindow = EvictingQueue.create(update.getWindowSize());
            newWindow.addAll(window);//www . j  a v  a 2  s  .com
            window = newWindow;
        }

        if (update.getConceptDriftStrategy() != null) {
            update.getConceptDriftStrategy().forecaster = this;
        }

        config.update(update);
        selectBestModel(Collections.emptyList());
    }
}

From source file:com.searchcode.app.util.LoggerWrapper.java

public LoggerWrapper() {
    this.LOGCOUNT = Singleton.getHelpers().tryParseInt(
            (String) Properties.getProperties().getOrDefault(Values.LOG_COUNT, Values.DEFAULT_LOG_COUNT),
            Values.DEFAULT_LOG_COUNT);//from   w ww .  j a v a2  s .  c o  m
    this.LOGLEVEL = (String) Properties.getProperties().getOrDefault(Values.LOG_LEVEL,
            Values.DEFAULT_LOG_LEVEL);
    this.LOGPATH = Singleton.getHelpers().getLogPath();

    if (this.LOGLEVEL.equals("OFF")) {
        this.LOGSENABLED = false;
    }

    switch (this.LOGLEVEL.toUpperCase()) {
    case "INFO":
        this.LOGLEVELENUM = Level.INFO;
        break;
    case "FINE":
        this.LOGLEVELENUM = Level.FINE;
        break;
    case "WARNING":
        this.LOGLEVELENUM = Level.WARNING;
        break;
    case "SEVERE":
    default:
        this.LOGLEVELENUM = Level.SEVERE;
        break;
    }

    if (this.LOGPATH.equals("STDOUT")) {
        this.LOGSTDOUT = true;
        this.LOGSENABLED = false;
        this.LOGLEVEL = "OFF";
    }

    if (!this.LOGLEVEL.equals("OFF")) {
        try {
            this.LOGPATH += "searchcode-server-%g.log";
            Handler handler = new FileHandler(this.LOGPATH, this.BYTESLOGSIZE, this.LOGCOUNT);

            handler.setFormatter(new SimpleFormatter());

            logger = Logger.getLogger(Values.EMPTYSTRING);
            logger.addHandler(handler);

            switch (this.LOGLEVEL.toUpperCase()) {
            case "INFO":
                handler.setLevel(Level.INFO);
                this.logger.setLevel(Level.INFO);
                break;
            case "FINE":
                handler.setLevel(Level.FINE);
                this.logger.setLevel(Level.FINE);
                break;
            case "WARNING":
                handler.setLevel(Level.WARNING);
                this.logger.setLevel(Level.WARNING);
                break;
            case "SEVERE":
            default:
                handler.setLevel(Level.SEVERE);
                this.logger.setLevel(Level.SEVERE);
                break;
            }

        } catch (IOException ex) {
            this.logger = Logger.getLogger(Values.EMPTYSTRING);
            this.logger.setLevel(Level.WARNING);

            this.logger.warning("\n//////////////////////////////////////////////////////////////////////\n"
                    + "// Unable to write to logging file"
                    + (!this.LOGPATH.isEmpty() ? ": " + this.LOGPATH : ".") + "\n"
                    + "// Logs will be written to STDOUT.\n"
                    + "//////////////////////////////////////////////////////////////////////\n");
        }
    }

    this.allCache = EvictingQueue.create(1000);
    this.infoRecentCache = EvictingQueue.create(1000);
    this.warningRecentCache = EvictingQueue.create(1000);
    this.severeRecentCache = EvictingQueue.create(1000);
    this.searchLog = EvictingQueue.create(1000);
    this.apiLog = EvictingQueue.create(1000);
}

From source file:io.geobit.chain.util.Performance.java

public void add(T t) {
    blacklist.put(t, 0);
    ko.put(t, 0L);
    ok.put(t, 0L);
    EvictingQueue<Long> q = EvictingQueue.create(10000);
    timers.put(t, q);

}

From source file:org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer.java

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    InternalHistogram histo = (InternalHistogram) aggregation;
    List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
    InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();

    List newBuckets = new ArrayList<>();
    EvictingQueue<Double> values = EvictingQueue.create(this.window);

    long lastKey = 0;
    long interval = Long.MAX_VALUE;
    Object currentKey;/*from   ww  w  .j a v  a2 s  .co m*/

    for (InternalHistogram.Bucket bucket : buckets) {
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
        currentKey = bucket.getKey();

        if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {
            values.offer(thisBucketValue);

            double movavg = model.next(values);

            List<InternalAggregation> aggs = new ArrayList<>(
                    Lists.transform(bucket.getAggregations().asList(), FUNCTION));
            aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<Reducer>(), metaData()));
            InternalHistogram.Bucket newBucket = factory.createBucket(currentKey, bucket.getDocCount(),
                    new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());
            newBuckets.add(newBucket);

        } else {
            newBuckets.add(bucket);
        }

        if (predict > 0) {
            if (currentKey instanceof Number) {
                interval = Math.min(interval, ((Number) bucket.getKey()).longValue() - lastKey);
                lastKey = ((Number) bucket.getKey()).longValue();
            } else if (currentKey instanceof DateTime) {
                interval = Math.min(interval, ((DateTime) bucket.getKey()).getMillis() - lastKey);
                lastKey = ((DateTime) bucket.getKey()).getMillis();
            } else {
                throw new AggregationExecutionException(
                        "Expected key of type Number or DateTime but got [" + currentKey + "]");
            }
        }

    }

    if (buckets.size() > 0 && predict > 0) {

        boolean keyed;
        ValueFormatter formatter;
        keyed = buckets.get(0).getKeyed();
        formatter = buckets.get(0).getFormatter();

        double[] predictions = model.predict(values, predict);
        for (int i = 0; i < predictions.length; i++) {
            List<InternalAggregation> aggs = new ArrayList<>();
            aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<Reducer>(),
                    metaData()));
            InternalHistogram.Bucket newBucket = factory.createBucket(lastKey + (interval * (i + 1)), 0,
                    new InternalAggregations(aggs), keyed, formatter);
            newBuckets.add(newBucket);
        }
    }

    return factory.create(newBuckets, histo);
}

From source file:com.eucalyptus.util.metrics.ThruputMetrics.java

private static void addDataPointNoThread(MonitoredAction action, long newDataPoint) {
    try (final LockResource lock = LockResource.lock(storageLock.writeLock())) {
        if (data.containsKey(action)) {
            data.get(action).add(new DataPoint(newDataPoint));
        } else {// w  ww  .j a v  a  2s  . c o  m
            EvictingQueue<DataPoint> newQ = EvictingQueue.create(MetricsConfiguration.METRICS_COLLECTION_SIZE);
            newQ.add(new DataPoint(newDataPoint));
            data.put(action, newQ);
        }
    }

    if (LOG.isTraceEnabled()) {
        StringBuilder sb = new StringBuilder(action.name);
        sb.append("=");
        sb.append(newDataPoint);
        LOG.trace(sb.toString());
    }
}

From source file:org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregator.java

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    InternalHistogram histo = (InternalHistogram) aggregation;
    List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
    InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();

    List newBuckets = new ArrayList<>();
    EvictingQueue<Double> values = EvictingQueue.create(this.window);

    long lastValidKey = 0;
    int lastValidPosition = 0;
    int counter = 0;

    for (InternalHistogram.Bucket bucket : buckets) {
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);

        // Default is to reuse existing bucket.  Simplifies the rest of the logic,
        // since we only change newBucket if we can add to it
        InternalHistogram.Bucket newBucket = bucket;

        if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) {
            values.offer(thisBucketValue);

            // Some models (e.g. HoltWinters) have certain preconditions that must be met
            if (model.hasValue(values.size())) {
                double movavg = model.next(values);

                List<InternalAggregation> aggs = new ArrayList<>(
                        Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION));
                aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<PipelineAggregator>(),
                        metaData()));/*from   w ww  .  j  a v a2s .c  om*/
                newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(),
                        new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());
            }

            if (predict > 0) {
                if (bucket.getKey() instanceof Number) {
                    lastValidKey = ((Number) bucket.getKey()).longValue();
                } else if (bucket.getKey() instanceof DateTime) {
                    lastValidKey = ((DateTime) bucket.getKey()).getMillis();
                } else {
                    throw new AggregationExecutionException(
                            "Expected key of type Number or DateTime but got [" + lastValidKey + "]");
                }
                lastValidPosition = counter;
            }
        }
        counter += 1;
        newBuckets.add(newBucket);

    }

    if (buckets.size() > 0 && predict > 0) {

        boolean keyed;
        ValueFormatter formatter;
        keyed = buckets.get(0).getKeyed();
        formatter = buckets.get(0).getFormatter();

        double[] predictions = model.predict(values, predict);
        for (int i = 0; i < predictions.length; i++) {

            List<InternalAggregation> aggs;
            long newKey = histo.getRounding().nextRoundingValue(lastValidKey);

            if (lastValidPosition + i + 1 < newBuckets.size()) {
                InternalHistogram.Bucket bucket = (InternalHistogram.Bucket) newBuckets
                        .get(lastValidPosition + i + 1);

                // Get the existing aggs in the bucket so we don't clobber data
                aggs = new ArrayList<>(
                        Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION));
                aggs.add(new InternalSimpleValue(name(), predictions[i], formatter,
                        new ArrayList<PipelineAggregator>(), metaData()));

                InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0,
                        new InternalAggregations(aggs), keyed, formatter);

                // Overwrite the existing bucket with the new version
                newBuckets.set(lastValidPosition + i + 1, newBucket);

            } else {
                // Not seen before, create fresh
                aggs = new ArrayList<>();
                aggs.add(new InternalSimpleValue(name(), predictions[i], formatter,
                        new ArrayList<PipelineAggregator>(), metaData()));

                InternalHistogram.Bucket newBucket = factory.createBucket(newKey, 0,
                        new InternalAggregations(aggs), keyed, formatter);

                // Since this is a new bucket, simply append it
                newBuckets.add(newBucket);
            }
            lastValidKey = newKey;
        }
    }

    return factory.create(newBuckets, histo);
}

From source file:org.owasp.webgoat.plugin.ForgedReviews.java

@RequestMapping(method = RequestMethod.POST)
@ResponseBody/*from w w w.  j  a  va 2 s  .  c  om*/
public AttackResult createNewReview(String reviewText, Integer stars, String validateReq,
        HttpServletRequest request) throws IOException {

    String host = (request.getHeader("host") == null) ? "NULL" : request.getHeader("host");
    //        String origin = (req.getHeader("origin") == null) ? "NULL" : req.getHeader("origin");
    //        Integer serverPort = (req.getServerPort() < 1) ? 0 : req.getServerPort();
    //        String serverName = (req.getServerName() == null) ? "NULL" : req.getServerName();
    String referer = (request.getHeader("referer") == null) ? "NULL" : request.getHeader("referer");
    String[] refererArr = referer.split("/");

    EvictingQueue<Review> reviews = userReviews.getOrDefault(webSession.getUserName(),
            EvictingQueue.create(100));
    Review review = new Review();

    review.setText(reviewText);
    review.setDateTime(DateTime.now().toString(fmt));
    review.setUser(webSession.getUserName());
    review.setStars(stars);

    reviews.add(review);
    userReviews.put(webSession.getUserName(), reviews);
    //short-circuit
    if (validateReq == null || !validateReq.equals(weakAntiCSRF)) {
        return trackProgress(failed().feedback("csrf-you-forgot-something").build());
    }
    //we have the spoofed files
    if (referer != "NULL" && refererArr[2].equals(host)) {
        return trackProgress(failed().feedback("csrf-same-host").build());
    } else {
        return trackProgress(success().feedback("csrf-review.success").build()); //feedback("xss-stored-comment-failure")
    }
}

From source file:org.codice.ddf.platform.logging.LoggingService.java

private EvictingQueue<LogEvent> createNewEvictingQueue(int newMaxLogEvents) {
    EvictingQueue<LogEvent> evictingQueue = EvictingQueue.create(newMaxLogEvents);
    evictingQueue.addAll(logEvents);//from  w w w.j  a va  2 s .  c  o m
    return evictingQueue;
}

From source file:com.streamsets.pipeline.stage.processor.aggregation.aggregator.AggregatorDataProvider.java

/**
 * Creates an AggregatorDataProvider for a family of Aggregators that will close data windows together (atomically)
 *
 * @param windowsToKeep number of data windows to keep in memory, including the live one.
 *///from  w w  w  . j  ava2s . c o  m
public AggregatorDataProvider(int windowsToKeep, WindowType windowType) {
    Utils.checkArgument(windowsToKeep > 0, "windows to keep must be greater than zero");
    aggregators = new HashSet<>();
    dataWindowQueue = EvictingQueue.create(windowsToKeep);
    dataWindowList = Collections.emptyList();
    this.windowType = windowType;
}