Example usage for com.google.common.collect EvictingQueue add

List of usage examples for com.google.common.collect EvictingQueue add

Introduction

In this page you can find the example usage for com.google.common.collect EvictingQueue add.

Prototype

@Override
public boolean add(E e) 

Source Link

Document

Adds the given element to this queue.

Usage

From source file:com.eucalyptus.util.metrics.ThruputMetrics.java

private static void addDataPointNoThread(MonitoredAction action, long newDataPoint) {
    try (final LockResource lock = LockResource.lock(storageLock.writeLock())) {
        if (data.containsKey(action)) {
            data.get(action).add(new DataPoint(newDataPoint));
        } else {//from w w  w . ja va  2s  .c  o  m
            EvictingQueue<DataPoint> newQ = EvictingQueue.create(MetricsConfiguration.METRICS_COLLECTION_SIZE);
            newQ.add(new DataPoint(newDataPoint));
            data.put(action, newQ);
        }
    }

    if (LOG.isTraceEnabled()) {
        StringBuilder sb = new StringBuilder(action.name);
        sb.append("=");
        sb.append(newDataPoint);
        LOG.trace(sb.toString());
    }
}

From source file:com.eucalyptus.util.metrics.ThruputMetrics.java

public static void changeSize(int newSize) {
    try (final LockResource lock = LockResource.lock(storageLock.writeLock())) {
        for (MonitoredAction action : MonitoredAction.values()) {
            if (data.containsKey(action)) {
                EvictingQueue<DataPoint> newQ = EvictingQueue.create(newSize);
                DataPoint[] values = data.get(action).toArray(new DataPoint[0]);
                for (int i = values.length > newSize ? values.length - newSize : 0; i < values.length; i++)
                    newQ.add(values[i]);
                data.put(action, newQ);/* w w  w  .  j a v  a  2  s .c o m*/
            }
        }
    }
}

From source file:io.geobit.chain.util.Performance.java

public void record(T t, Long millis) {
    Integer blackListRound = blacklist.get(t);
    if (millis == null) {
        Long error = ko.get(t);/*  www .  ja  v  a2 s .  com*/
        ko.put(t, error + 1);
        blacklist.put(t, blackListRound + StaticNumbers.PENALTY_ROUND);
    } else {
        Long good = ok.get(t);
        ok.put(t, good + 1);
        EvictingQueue<Long> times = timers.get(t);
        times.add(millis);
        int penalty = millis.intValue() / 1000; /* every seconds 1 penalty round */
        if (penalty > 0) {
            blacklist.put(t, blackListRound + Math.min(penalty, StaticNumbers.PENALTY_ROUND));
        }
    }
}

From source file:org.owasp.webgoat.plugin.StoredXssComments.java

@RequestMapping(method = RequestMethod.POST)
@ResponseBody/*from www. jav  a 2 s.c o m*/
public AttackResult createNewComment(@RequestBody String commentStr) throws IOException {

    Comment comment = parseJson(commentStr);

    EvictingQueue<Comment> comments = userComments.getOrDefault(webSession.getUserName(),
            EvictingQueue.create(100));
    comment.setDateTime(DateTime.now().toString(fmt));
    comment.setUser(webSession.getUserName());

    comments.add(comment);
    userComments.put(webSession.getUserName(), comments);

    if (comment.getText().contains(phoneHomeString)) {
        return (success().feedback("xss-stored-comment-success").build());
    } else {
        return (failed().feedback("xss-stored-comment-failure").build());
    }
}

From source file:org.hawkular.datamining.forecast.models.WeightedMovingAverage.java

public List<DataPoint> learn() {

    List<DataPoint> result = new ArrayList<>(dataPoints.size());
    EvictingQueue<Double> window = EvictingQueue.create(weights.length);

    int endHalf = weights.length / 2;

    // add zeros to the beginning
    for (int i = 0; i < (weights.length - endHalf) - 1; i++) {
        result.add(new DataPoint(null, dataPoints.get(i).getTimestamp()));
    }//  w  w  w.  j  a  v  a  2 s.com

    for (int i = 0; i < dataPoints.size(); i++) {
        window.add(dataPoints.get(i).getValue());

        if (window.remainingCapacity() == 0) {
            Iterator<Double> iterator = window.iterator();
            int counter = 0;
            double sum = 0;
            while (iterator.hasNext()) {
                double value = iterator.next();
                sum += value * weights[counter++];
            }

            result.add(new DataPoint(sum, dataPoints.get(i - endHalf).getTimestamp()));
        }
    }

    // add zeros to end
    for (int i = result.size(); i < dataPoints.size(); i++) {
        result.add(new DataPoint(null, dataPoints.get(i).getTimestamp()));
    }

    return result;
}

From source file:org.owasp.webgoat.plugin.ForgedReviews.java

@RequestMapping(method = RequestMethod.POST)
@ResponseBody//from www . j  av a 2s .  c  o  m
public AttackResult createNewReview(String reviewText, Integer stars, String validateReq,
        HttpServletRequest request) throws IOException {

    String host = (request.getHeader("host") == null) ? "NULL" : request.getHeader("host");
    //        String origin = (req.getHeader("origin") == null) ? "NULL" : req.getHeader("origin");
    //        Integer serverPort = (req.getServerPort() < 1) ? 0 : req.getServerPort();
    //        String serverName = (req.getServerName() == null) ? "NULL" : req.getServerName();
    String referer = (request.getHeader("referer") == null) ? "NULL" : request.getHeader("referer");
    String[] refererArr = referer.split("/");

    EvictingQueue<Review> reviews = userReviews.getOrDefault(webSession.getUserName(),
            EvictingQueue.create(100));
    Review review = new Review();

    review.setText(reviewText);
    review.setDateTime(DateTime.now().toString(fmt));
    review.setUser(webSession.getUserName());
    review.setStars(stars);

    reviews.add(review);
    userReviews.put(webSession.getUserName(), reviews);
    //short-circuit
    if (validateReq == null || !validateReq.equals(weakAntiCSRF)) {
        return trackProgress(failed().feedback("csrf-you-forgot-something").build());
    }
    //we have the spoofed files
    if (referer != "NULL" && refererArr[2].equals(host)) {
        return trackProgress(failed().feedback("csrf-same-host").build());
    } else {
        return trackProgress(success().feedback("csrf-review.success").build()); //feedback("xss-stored-comment-failure")
    }
}

From source file:fr.norad.jmxzabbix.core.JmxToZabbixDaemon.java

@Override
public void run() {
    EvictingQueue<ZabbixRequest> queue = EvictingQueue.create(config.getInMemoryMaxQueueSize());
    while (!interruptFlag) {
        try {/*from ww  w  .j  ava  2  s .  c  om*/
            Thread.sleep(config.getPushIntervalSecond() * 1000);
            if (isNullOrEmpty(config.getZabbix().getHost())) {
                continue;
            }
            ZabbixRequest metrics = new JmxMetrics(config.getJmx(), config.getServerName()).getMetrics();
            if (metrics != null) {
                queue.add(metrics);
            }
            new ZabbixClient(config.getZabbix()).send(queue);
        } catch (Exception e) {
            e.printStackTrace(System.err);
        }
    }

}

From source file:fr.norad.jmxzabbix.core.ZabbixClient.java

public void send(EvictingQueue<ZabbixRequest> dataQueue) throws IOException {
    if (dataQueue.isEmpty()) {
        return;/*from  w  w  w  .  j  a  v a  2s  .com*/
    }
    try (Socket zabbix = new Socket(config.getHost(), config.getPort());
            OutputStream out = zabbix.getOutputStream();
            InputStream in = zabbix.getInputStream()) {

        zabbix.setSoTimeout(config.getTimeoutSecond() * 1000);

        ZabbixRequest current = null;
        while (!dataQueue.isEmpty()) {
            try {
                current = dataQueue.poll();
                LOGGER.debug("sending request" + current);
                asClientWriteRequest(out, current);
                out.flush();
                LOGGER.debug("request sent");
                LOGGER.debug("reading response");
                ZabbixResponse zabbixResponse = asClientReadResponse(in);
                LOGGER.debug("response read" + zabbixResponse);
            } catch (Exception e) {
                if (current != null) {
                    dataQueue.add(current);
                }
                throw new IllegalStateException("Communication with Zabbix goes wrong", e);
            }
        }
    } catch (ConnectException e) {
        throw new IOException("Cannot connect to " + config.getHost() + ':' + config.getPort());
    }
}

From source file:com.streamsets.datacollector.execution.alerts.DataRuleEvaluator.java

public void evaluateRule(List<Record> sampleRecords, String lane,
        Map<String, EvictingQueue<SampledRecord>> ruleToSampledRecordsMap) {

    if (dataRuleDefinition.isEnabled() && sampleRecords != null && sampleRecords.size() > 0) {

        // initializing the ElVar context for the duration of the rule evalution to be able to have a 'rule' context
        // for the alert:info() EL.
        ELVariables elVars = new ELVariables();
        elVars.addContextVariable(PIPELINE_CONTEXT, pipelineELContext);
        elVars.addContextVariable(RULE_ID_CONTEXT, dataRuleDefinition.getId());

        //cache all sampled records for this data rule definition in an evicting queue
        EvictingQueue<SampledRecord> sampledRecords = ruleToSampledRecordsMap.get(dataRuleDefinition.getId());
        if (sampledRecords == null) {
            int maxSize = configuration.get(Constants.SAMPLED_RECORDS_MAX_CACHE_SIZE_KEY,
                    Constants.SAMPLED_RECORDS_MAX_CACHE_SIZE_DEFAULT);
            int size = dataRuleDefinition.getSamplingRecordsToRetain();
            if (size > maxSize) {
                size = maxSize;//from  w w  w  .ja  v  a 2  s.c o  m
            }
            sampledRecords = EvictingQueue.create(size);
            ruleToSampledRecordsMap.put(dataRuleDefinition.getId(), sampledRecords);
        }
        //Meter
        //evaluate sample set of records for condition
        int matchingRecordCount = 0;
        int evaluatedRecordCount = 0;
        List<String> alertTextForMatchRecords = new ArrayList<>();
        for (Record r : sampleRecords) {
            evaluatedRecordCount++;
            //evaluate
            boolean success = evaluate(elVars, r, dataRuleDefinition.getCondition(),
                    dataRuleDefinition.getId());
            if (success) {
                alertTextForMatchRecords.add(resolveAlertText(elVars, dataRuleDefinition));
                sampledRecords.add(new SampledRecord(r, true));
                matchingRecordCount++;
            } else {
                sampledRecords.add(new SampledRecord(r, false));
            }
        }

        if (dataRuleDefinition.isAlertEnabled()) {
            //Keep the counters and meters ready before execution
            //batch record counter - cummulative sum of records per batch
            Counter evaluatedRecordCounter = MetricsConfigurator.getCounter(metrics,
                    LaneResolver.getPostFixedLaneForObserver(lane));
            if (evaluatedRecordCounter == null) {
                evaluatedRecordCounter = MetricsConfigurator.createCounter(metrics,
                        LaneResolver.getPostFixedLaneForObserver(lane), name, rev);
                if (metricRegistryJson != null) {
                    CounterJson counterJson = metricRegistryJson.getCounters()
                            .get(LaneResolver.getPostFixedLaneForObserver(lane)
                                    + MetricsConfigurator.COUNTER_SUFFIX);
                    evaluatedRecordCounter.inc(counterJson.getCount());
                }
            }
            //counter for the matching records - cummulative sum of records that match criteria
            Counter matchingRecordCounter = MetricsConfigurator.getCounter(metrics,
                    USER_PREFIX + dataRuleDefinition.getId());
            if (matchingRecordCounter == null) {
                matchingRecordCounter = MetricsConfigurator.createCounter(metrics,
                        USER_PREFIX + dataRuleDefinition.getId(), name, rev);
                if (metricRegistryJson != null) {
                    CounterJson counterJson = metricRegistryJson.getCounters()
                            .get(USER_PREFIX + dataRuleDefinition.getId() + MetricsConfigurator.COUNTER_SUFFIX);
                    matchingRecordCounter.inc(counterJson.getCount());
                }
            }

            evaluatedRecordCounter.inc(evaluatedRecordCount);
            matchingRecordCounter.inc(matchingRecordCount);

            double threshold;
            try {
                threshold = Double.parseDouble(dataRuleDefinition.getThresholdValue());
            } catch (NumberFormatException e) {
                //Soft error for now as we don't want this alert to stop other rules
                LOG.error("Error interpreting threshold '{}' as a number",
                        dataRuleDefinition.getThresholdValue(), e);
                return;
            }
            switch (dataRuleDefinition.getThresholdType()) {
            case COUNT:
                if (matchingRecordCounter.getCount() > threshold) {
                    if (dataRuleDefinition instanceof DriftRuleDefinition) {
                        if (isStatAggregationEnabled()) {
                            createAndEnqueDataRuleRecord(dataRuleDefinition, evaluatedRecordCount,
                                    matchingRecordCount, alertTextForMatchRecords);
                        } else {
                            for (String alertText : alertTextForMatchRecords) {
                                alertManager.alert(matchingRecordCounter.getCount(), emailIds,
                                        AlertManagerHelper.cloneRuleWithResolvedAlertText(dataRuleDefinition,
                                                alertText));
                            }
                        }
                    } else if (dataRuleDefinition instanceof DataRuleDefinition) {
                        if (isStatAggregationEnabled()) {
                            createAndEnqueDataRuleRecord(dataRuleDefinition, evaluatedRecordCount,
                                    matchingRecordCount,
                                    Arrays.asList(resolveAlertText(elVars, dataRuleDefinition)));
                        } else {
                            alertManager.alert(matchingRecordCounter.getCount(), emailIds,
                                    AlertManagerHelper.cloneRuleWithResolvedAlertText(dataRuleDefinition,
                                            resolveAlertText(elVars, dataRuleDefinition)));
                        }
                    } else {
                        throw new RuntimeException(Utils.format("Unexpected RuleDefinition class '{}'",
                                dataRuleDefinition.getClass().getName()));
                    }
                }
                break;
            case PERCENTAGE:
                if ((matchingRecordCounter.getCount() * 100.0 / evaluatedRecordCounter.getCount()) > threshold
                        && evaluatedRecordCounter.getCount() >= dataRuleDefinition.getMinVolume()) {
                    if (isStatAggregationEnabled()) {
                        createAndEnqueDataRuleRecord(dataRuleDefinition, evaluatedRecordCount,
                                matchingRecordCount, alertTextForMatchRecords);
                    } else {
                        alertManager.alert(matchingRecordCounter.getCount(), emailIds,
                                AlertManagerHelper.cloneRuleWithResolvedAlertText(dataRuleDefinition,
                                        resolveAlertText(elVars, dataRuleDefinition)));
                    }
                }
                break;
            }
        }

        if (dataRuleDefinition.isMeterEnabled() && matchingRecordCount > 0) {
            Meter meter = MetricsConfigurator.getMeter(metrics, USER_PREFIX + dataRuleDefinition.getId());
            if (meter == null) {
                meter = MetricsConfigurator.createMeter(metrics, USER_PREFIX + dataRuleDefinition.getId(), name,
                        rev);
            }
            meter.mark(matchingRecordCount);
        }
    }
}

From source file:org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator.java

@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
    InternalHistogram histo = (InternalHistogram) aggregation;
    List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
    InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();

    List newBuckets = new ArrayList<>();
    EvictingQueue<Double> lagWindow = EvictingQueue.create(lag);
    int counter = 0;

    for (InternalHistogram.Bucket bucket : buckets) {
        Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
        InternalHistogram.Bucket newBucket = bucket;

        counter += 1;//from w ww. j av  a2 s . c om

        // Still under the initial lag period, add nothing and move on
        Double lagValue;
        if (counter <= lag) {
            lagValue = Double.NaN;
        } else {
            lagValue = lagWindow.peek(); // Peek here, because we rely on add'ing to always move the window
        }

        // Normalize null's to NaN
        if (thisBucketValue == null) {
            thisBucketValue = Double.NaN;
        }

        // Both have values, calculate diff and replace the "empty" bucket
        if (!Double.isNaN(thisBucketValue) && !Double.isNaN(lagValue)) {
            double diff = thisBucketValue - lagValue;

            List<InternalAggregation> aggs = new ArrayList<>(
                    Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION));
            aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<PipelineAggregator>(),
                    metaData()));
            newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(),
                    new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());
        }

        newBuckets.add(newBucket);
        lagWindow.add(thisBucketValue);

    }
    return factory.create(newBuckets, histo);
}