Example usage for java.util.concurrent.atomic AtomicLong set

List of usage examples for java.util.concurrent.atomic AtomicLong set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong set.

Prototype

public final void set(long newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

private CharSequence getRawDataSql(Integer[] mids, long begin, long end, AtomicLong publishedInterval) {
    if ((mids == null) || (mids.length == 0)) {
        return "";
    }/* ww w  . j a  va  2 s.c o  m*/
    if (log.isDebugEnabled()) {
        log.debug("gathering data from begin=" + TimeUtil.toString(begin) + ", end=" + TimeUtil.toString(end));
    }
    final HQDialect dialect = measurementDAO.getHQDialect();
    // XXX I don't like adding the sql hint, when we start testing against mysql 5.5 we should
    //     re-evaluate if this is necessary
    // 1) we shouldn't have to tell the db to explicitly use the Primary key for these
    //    queries, it should just know because we update stats every few hours
    // 2) we only want to use the primary key for bigger queries.  Our tests show
    //    that the primary key performance is very consistent for large queries and smaller
    //    queries.  But for smaller queries the measurement_id index is more effective
    final String hint = (dialect.getMetricDataHint().isEmpty() || (mids.length < 1000)) ? ""
            : " " + dialect.getMetricDataHint();
    final String sql = new StringBuilder(1024 + (mids.length * 5))
            .append("SELECT count(*) as cnt, sum(value) as sumvalue, ")
            .append("min(value) as minvalue, max(value) as maxvalue, timestamp").append(" FROM :table")
            .append(hint).append(" WHERE timestamp BETWEEN ").append(begin).append(" AND ").append(end)
            .append(MeasTabManagerUtil.getMeasInStmt(mids, true)).append(" GROUP BY timestamp").toString();
    final String[] tables = getDataTables(begin, end, false);
    if ((publishedInterval != null) && (tables.length == 1)) {
        if (tables[0].equals(TAB_DATA_1H)) {
            publishedInterval.set(HOUR);
        } else if (tables[0].equals(TAB_DATA_6H)) {
            publishedInterval.set(HOUR * 6);
        } else if (tables[0].equals(TAB_DATA_1D)) {
            publishedInterval.set(HOUR * 24);
        }
    }
    final StringBuilder sqlBuf = new StringBuilder(128 * tables.length);
    for (int i = 0; i < tables.length; i++) {
        sqlBuf.append(sql.replace(":table", tables[i]));
        if (i < (tables.length - 1)) {
            sqlBuf.append(" UNION ALL ");
        }
    }
    return sqlBuf;
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

private static void rawDump(PrintWriter pw, GTSDecoderIterator iter, boolean dedup, boolean signed,
        long timespan, AtomicReference<Metadata> lastMeta, AtomicLong lastCount, boolean sortMeta)
        throws IOException {

    String name = null;/* w  w w  . j a  v a  2  s.c  o  m*/
    Map<String, String> labels = null;

    StringBuilder sb = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (dedup) {
            decoder = decoder.dedup();
        }

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        GTSEncoder encoder = decoder.getEncoder(true);

        //
        // Only display the class + labels if they have changed since the previous GTS
        //

        Map<String, String> lbls = decoder.getLabels();

        //
        // Compute the name
        //

        name = decoder.getName();
        labels = lbls;
        sb.setLength(0);
        GTSHelper.encodeName(sb, name);
        sb.append("{");
        boolean first = true;

        if (sortMeta) {
            lbls = new TreeMap<String, String>(lbls);
        }

        for (Entry<String, String> entry : lbls.entrySet()) {
            //
            // Skip owner/producer labels and any other 'private' labels
            //
            if (!signed) {
                if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                    continue;
                }
                if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                    continue;
                }
            }

            if (!first) {
                sb.append(",");
            }
            GTSHelper.encodeName(sb, entry.getKey());
            sb.append("=");
            GTSHelper.encodeName(sb, entry.getValue());
            first = false;
        }
        sb.append("}");

        if (encoder.getCount() > toDecodeCount) {
            // We have too much data, shrink the encoder
            GTSEncoder enc = new GTSEncoder();
            enc.safeSetMetadata(decoder.getMetadata());
            while (decoder.next() && toDecodeCount > 0) {
                enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(),
                        decoder.getValue());
                toDecodeCount--;
            }
            encoder = enc;
        }

        if (timespan < 0) {
            currentCount += encoder.getCount();
        }

        if (encoder.size() > 0) {
            pw.print(encoder.getBaseTimestamp());
            pw.print("//");
            pw.print(encoder.getCount());
            pw.print(" ");
            pw.print(sb.toString());
            pw.print(" ");

            //pw.println(new String(OrderPreservingBase64.encode(encoder.getBytes())));
            OrderPreservingBase64.encodeToWriter(encoder.getBytes(), pw);
            pw.write('\r');
            pw.write('\n');
        }
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

private static void wrapperDump(PrintWriter pw, GTSDecoderIterator iter, boolean dedup, boolean signed,
        byte[] fetchPSK, long timespan, AtomicReference<Metadata> lastMeta, AtomicLong lastCount)
        throws IOException {

    if (!signed) {
        throw new IOException("Unsigned request.");
    }// w  w  w .j a v  a  2  s . c  om

    // Labels for Sensision
    Map<String, String> labels = new HashMap<String, String>();

    StringBuilder sb = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (dedup) {
            decoder = decoder.dedup();
        }

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        GTSEncoder encoder = decoder.getEncoder(true);

        if (encoder.getCount() > toDecodeCount) {
            // We have too much data, shrink the encoder
            GTSEncoder enc = new GTSEncoder();
            enc.safeSetMetadata(decoder.getMetadata());
            while (decoder.next() && toDecodeCount > 0) {
                enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(),
                        decoder.getValue());
                toDecodeCount--;
            }
            encoder = enc;
        }

        if (timespan < 0) {
            currentCount += encoder.getCount();
        }

        if (encoder.size() <= 0) {
            continue;
        }

        //
        // Build a GTSWrapper
        //

        GTSWrapper wrapper = GTSWrapperHelper.fromGTSEncoderToGTSWrapper(encoder, true);

        //      GTSWrapper wrapper = new GTSWrapper();
        //      wrapper.setBase(encoder.getBaseTimestamp());
        //      wrapper.setMetadata(encoder.getMetadata());
        //      wrapper.setCount(encoder.getCount());
        //      wrapper.setEncoded(encoder.getBytes());

        //
        // Serialize the wrapper
        //

        TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());
        byte[] data = null;

        try {
            data = serializer.serialize(wrapper);
        } catch (TException te) {
            throw new IOException(te);
        }

        //
        // Output is GTSWrapperId <WSP> HASH <WSP> GTSWrapper
        //

        pw.write(Hex.encodeHex(GTSWrapperHelper.getId(wrapper)));

        pw.write(' ');

        if (null != fetchPSK) {
            //
            // Compute HMac for the wrapper
            //

            long hash = SipHashInline.hash24(fetchPSK, data);

            //
            // Output the MAC before the data, as hex digits
            //
            pw.write(Hex.encodeHex(Longs.toByteArray(hash)));
        } else {
            pw.write('-');
        }

        pw.write(' ');

        //
        // Base64 encode the wrapper
        //

        OrderPreservingBase64.encodeToWriter(data, pw);
        pw.write('\r');
        pw.write('\n');

        //
        // Sensision metrics
        //

        labels.clear();
        labels.put(SensisionConstants.SENSISION_LABEL_APPLICATION,
                wrapper.getMetadata().getLabels().get(Constants.APPLICATION_LABEL));

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS, Sensision.EMPTY_LABELS,
                1);
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_PERAPP, labels, 1);

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_SIZE,
                Sensision.EMPTY_LABELS, data.length);
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_SIZE_PERAPP, labels,
                data.length);

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_DATAPOINTS,
                Sensision.EMPTY_LABELS, wrapper.getCount());
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_DATAPOINTS_PERAPP, labels,
                wrapper.getCount());

    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:org.apache.usergrid.services.notifications.QueueListener.java

private void execute(int threadNumber) {

    if (Thread.currentThread().isDaemon()) {
        Thread.currentThread().setDaemon(true);
    }// w w  w  .  j a  va2 s  .c o m

    Thread.currentThread().setName(getClass().getSimpleName() + "_Push-"
            + RandomStringUtils.randomAlphanumeric(4) + "-" + threadNumber);

    final AtomicInteger consecutiveExceptions = new AtomicInteger();

    if (logger.isTraceEnabled()) {
        logger.trace("QueueListener: Starting execute process.");
    }

    Meter meter = metricsService.getMeter(QueueListener.class, "execute.commit");
    com.codahale.metrics.Timer timer = metricsService.getTimer(QueueListener.class, "execute.dequeue");

    if (logger.isTraceEnabled()) {
        logger.trace("getting from queue {} ", queueName);
    }

    LegacyQueueScope queueScope = new LegacyQueueScopeImpl(queueName,
            LegacyQueueScope.RegionImplementation.LOCAL);
    LegacyQueueManager legacyQueueManager = queueManagerFactory.getQueueManager(queueScope);

    // run until there are no more active jobs
    final AtomicLong runCount = new AtomicLong(0);

    while (true) {

        if (sleepBetweenRuns > 0) {
            if (logger.isTraceEnabled()) {
                logger.trace("sleep between rounds...sleep...{}", sleepBetweenRuns);
            }
            try {
                Thread.sleep(sleepBetweenRuns);
            } catch (InterruptedException ignored) {
            }
        }

        Timer.Context timerContext = timer.time();
        rx.Observable.from(legacyQueueManager.getMessages(MAX_TAKE, ApplicationQueueMessage.class))
                .buffer(MAX_TAKE).doOnNext(messages -> {

                    try {
                        if (logger.isTraceEnabled()) {
                            logger.trace("retrieved batch of {} messages from queue {}", messages.size(),
                                    queueName);
                        }

                        if (messages.size() > 0) {
                            HashMap<UUID, List<LegacyQueueMessage>> messageMap = new HashMap<>(messages.size());

                            //group messages into hash map by app id
                            for (LegacyQueueMessage message : messages) {
                                //TODO: stop copying around this area as it gets notification specific.
                                ApplicationQueueMessage queueMessage = (ApplicationQueueMessage) message
                                        .getBody();
                                UUID applicationId = queueMessage.getApplicationId();

                                // Groups queue messages by application Id,
                                // (they are all probably going to the same place)
                                if (!messageMap.containsKey(applicationId)) {
                                    //For each app id it sends the set.
                                    List<LegacyQueueMessage> lqms = new ArrayList<LegacyQueueMessage>();
                                    lqms.add(message);
                                    messageMap.put(applicationId, lqms);
                                } else {
                                    messageMap.get(applicationId).add(message);
                                }
                            }

                            long now = System.currentTimeMillis();
                            Observable merge = null;

                            //send each set of app ids together
                            for (Map.Entry<UUID, List<LegacyQueueMessage>> entry : messageMap.entrySet()) {
                                UUID applicationId = entry.getKey();

                                ApplicationQueueManager manager = applicationQueueManagerCache
                                        .getApplicationQueueManager(emf.getEntityManager(applicationId),
                                                legacyQueueManager,
                                                new JobScheduler(smf.getServiceManager(applicationId),
                                                        emf.getEntityManager(applicationId)),
                                                metricsService, properties);

                                if (logger.isTraceEnabled()) {
                                    logger.trace("send batch for app {} of {} messages", entry.getKey(),
                                            entry.getValue().size());
                                }
                                Observable current = manager.sendBatchToProviders(entry.getValue(), queueName);

                                if (merge == null)
                                    merge = current;
                                else {
                                    merge = Observable.merge(merge, current);
                                }
                            }

                            if (merge != null) {
                                merge.toBlocking().lastOrDefault(null);
                            }
                            legacyQueueManager.commitMessages(messages);

                            meter.mark(messages.size());
                            if (logger.isTraceEnabled()) {
                                logger.trace("sent batch {} messages duration {} ms", messages.size(),
                                        System.currentTimeMillis() - now);
                            }

                            if (runCount.incrementAndGet() % consecutiveCallsToRemoveDevices == 0) {
                                for (ApplicationQueueManager aqm : applicationQueueManagerCache.asMap()
                                        .values()) {
                                    try {
                                        aqm.asyncCheckForInactiveDevices();
                                    } catch (Exception inactiveDeviceException) {
                                        logger.error("Inactive Device Get failed", inactiveDeviceException);
                                    }
                                }
                                //clear everything
                                runCount.set(0);
                            }
                        }

                else {
                            if (logger.isTraceEnabled()) {
                                logger.trace("no messages...sleep...{}", sleepWhenNoneFound);
                            }
                            try {
                                Thread.sleep(sleepWhenNoneFound);
                            } catch (InterruptedException e) {
                                // noop
                            }
                        }
                        timerContext.stop();
                        //send to the providers
                        consecutiveExceptions.set(0);
                    } catch (Exception ex) {
                        logger.error("failed to dequeue", ex);

                        // clear the queue name cache b/c tests might have wiped the keyspace
                        legacyQueueManager.clearQueueNameCache();
                        try {
                            long sleeptime = sleepWhenNoneFound * consecutiveExceptions.incrementAndGet();
                            long maxSleep = 15000;
                            sleeptime = sleeptime > maxSleep ? maxSleep : sleeptime;
                            logger.info("sleeping due to failures {} ms", sleeptime);
                            Thread.sleep(sleeptime);
                        } catch (InterruptedException ie) {
                            if (logger.isTraceEnabled()) {
                                logger.trace("sleep interrupted");
                            }
                        }
                    }
                }).toBlocking().lastOrDefault(null);

    }
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

private Path internalFlushCache(final SortedSet<KeyValue> set, final long logCacheFlushId,
        TimeRangeTracker snapshotTimeRangeTracker, AtomicLong flushedSize, MonitoredTask status)
        throws IOException {
    StoreFile.Writer writer;/*from w  w w  . ja va 2s .  c  o m*/
    // Find the smallest read point across all the Scanners.
    long smallestReadPoint = region.getSmallestReadPoint();
    long flushed = 0;
    Path pathName;
    // Don't flush if there are no entries.
    if (set.size() == 0) {
        return null;
    }
    // Use a store scanner to find which rows to flush.
    // Note that we need to retain deletes, hence
    // treat this as a minor compaction.
    InternalScanner scanner = null;
    KeyValueScanner memstoreScanner = new CollectionBackedScanner(set, this.comparator);
    if (getHRegion().getCoprocessorHost() != null) {
        scanner = getHRegion().getCoprocessorHost().preFlushScannerOpen(this, memstoreScanner);
    }
    if (scanner == null) {
        Scan scan = new Scan();
        scan.setMaxVersions(scanInfo.getMaxVersions());
        scanner = new StoreScanner(this, scanInfo, scan, Collections.singletonList(memstoreScanner),
                ScanType.MINOR_COMPACT, this.region.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
    }
    if (getHRegion().getCoprocessorHost() != null) {
        InternalScanner cpScanner = getHRegion().getCoprocessorHost().preFlush(this, scanner);
        // NULL scanner returned from coprocessor hooks means skip normal processing
        if (cpScanner == null) {
            return null;
        }
        scanner = cpScanner;
    }
    try {
        int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, 10);
        // TODO:  We can fail in the below block before we complete adding this
        // flush to list of store files.  Add cleanup of anything put on filesystem
        // if we fail.
        synchronized (flushLock) {
            status.setStatus("Flushing " + this + ": creating writer");
            // A. Write the map out to the disk
            writer = createWriterInTmp(set.size());
            writer.setTimeRangeTracker(snapshotTimeRangeTracker);
            pathName = writer.getPath();
            try {
                List<KeyValue> kvs = new ArrayList<KeyValue>();
                boolean hasMore;
                do {
                    //next?KV
                    hasMore = scanner.next(kvs, compactionKVMax);
                    if (!kvs.isEmpty()) {
                        for (KeyValue kv : kvs) {
                            // If we know that this KV is going to be included always, then let us
                            // set its memstoreTS to 0. This will help us save space when writing to disk.
                            if (kv.getMemstoreTS() <= smallestReadPoint) {
                                // let us not change the original KV. It could be in the memstore
                                // changing its memstoreTS could affect other threads/scanners.
                                kv = kv.shallowCopy();
                                kv.setMemstoreTS(0);
                            }
                            writer.append(kv);
                            flushed += this.memstore.heapSizeChange(kv, true);
                        }
                        kvs.clear();
                    }
                } while (hasMore);
            } finally {
                // Write out the log sequence number that corresponds to this output
                // hfile.  The hfile is current up to and including logCacheFlushId.
                status.setStatus("Flushing " + this + ": appending metadata");
                writer.appendMetadata(logCacheFlushId, false);
                status.setStatus("Flushing " + this + ": closing flushed file");
                writer.close();
            }
        }
    } finally {
        flushedSize.set(flushed);
        scanner.close();
    }
    if (LOG.isInfoEnabled()) {
        LOG.info("Flushed " + ", sequenceid=" + logCacheFlushId + ", memsize="
                + StringUtils.humanReadableInt(flushed) + ", into tmp file " + pathName);
    }
    return pathName;
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

/**
 * Output a text version of fetched data. Deduplication is done on the fly so we don't decode twice.
 * /*from   w  w  w.ja va  2 s  .  c  om*/
 */
private static void textDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean raw,
        boolean dedup, boolean signed, boolean showAttributes, AtomicReference<Metadata> lastMeta,
        AtomicLong lastCount, boolean sortMeta) throws IOException {

    String name = null;
    Map<String, String> labels = null;

    StringBuilder sb = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        //
        // Only display the class + labels if they have changed since the previous GTS
        //

        Map<String, String> lbls = decoder.getLabels();

        //
        // Compute the new name
        //

        boolean displayName = false;

        if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) {
            displayName = true;
            name = decoder.getName();
            labels = lbls;
            sb.setLength(0);
            GTSHelper.encodeName(sb, name);
            sb.append("{");
            boolean first = true;

            if (sortMeta) {
                lbls = new TreeMap<String, String>(lbls);
            }

            for (Entry<String, String> entry : lbls.entrySet()) {
                //
                // Skip owner/producer labels and any other 'private' labels
                //
                if (!signed) {
                    if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                    if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                }

                if (!first) {
                    sb.append(",");
                }
                GTSHelper.encodeName(sb, entry.getKey());
                sb.append("=");
                GTSHelper.encodeName(sb, entry.getValue());
                first = false;
            }
            sb.append("}");

            if (showAttributes) {
                Metadata meta = decoder.getMetadata();
                if (meta.getAttributesSize() > 0) {

                    if (sortMeta) {
                        meta.setAttributes(new TreeMap<String, String>(meta.getAttributes()));
                    }

                    GTSHelper.labelsToString(sb, meta.getAttributes());
                } else {
                    sb.append("{}");
                }
            }
        }

        long timestamp = 0L;
        long location = GeoTimeSerie.NO_LOCATION;
        long elevation = GeoTimeSerie.NO_ELEVATION;
        Object value = null;

        boolean dup = true;

        long decoded = 0;

        do {

            if (toDecodeCount == decoded) {
                break;
            }

            // FIXME(hbs): only display the results which match the authorized (according to token) timerange and geo zones

            //
            // Filter out any value not in the time range
            //

            long newTimestamp = decoder.getTimestamp();

            if (newTimestamp > now || (timespan >= 0 && newTimestamp <= (now - timespan))) {
                continue;
            }

            //
            // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set
            //

            long newLocation = decoder.getLocation();
            long newElevation = decoder.getElevation();
            Object newValue = decoder.getValue();

            dup = true;

            if (dedup) {
                if (location != newLocation || elevation != newElevation) {
                    dup = false;
                } else {
                    if (null == newValue) {
                        // Consider nulls as duplicates (can't happen!)
                        dup = false;
                    } else if (newValue instanceof Number) {
                        if (!((Number) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof String) {
                        if (!((String) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof Boolean) {
                        if (!((Boolean) newValue).equals(value)) {
                            dup = false;
                        }
                    }
                }
            }

            decoded++;

            location = newLocation;
            elevation = newElevation;
            timestamp = newTimestamp;
            value = newValue;

            if (raw) {
                if (!dedup || !dup) {
                    pw.println(GTSHelper.tickToString(sb, timestamp, location, elevation, value));
                }
            } else {
                // Display the name only if we have at least one value to display
                // We force 'dup' to be false when we must show the name
                if (displayName) {
                    pw.println(GTSHelper.tickToString(sb, decoder.getTimestamp(), decoder.getLocation(),
                            decoder.getElevation(), decoder.getValue()));
                    displayName = false;
                    dup = false;
                } else {
                    if (!dedup || !dup) {
                        pw.print("=");
                        pw.println(GTSHelper.tickToString(timestamp, location, elevation, value));
                    }
                }
            }
        } while (decoder.next());

        // Update currentcount
        if (timespan < 0) {
            currentCount += decoded;
        }

        // Print any remaining value
        if (dedup && dup) {
            if (raw) {
                pw.println(GTSHelper.tickToString(sb, timestamp, location, elevation, value));
            } else {
                pw.print("=");
                pw.println(GTSHelper.tickToString(timestamp, location, elevation, value));
            }
        }

        //
        // If displayName is still true it means we should have displayed the name but no value matched,
        // so set name to null so we correctly display the name for the next decoder if it has values
        //

        if (displayName) {
            name = null;
        }
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

private static void jsonDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean dedup,
        boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount) throws IOException {

    String name = null;//from w w w  . j  av a2  s  . c  o m
    Map<String, String> labels = null;

    pw.print("[");

    boolean hasValues = false;

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    try {
        StringBuilder sb = new StringBuilder();

        JsonSerializer serializer = new JsonSerializerFactory().create();

        boolean firstgts = true;

        long mask = (long) (Math.random() * Long.MAX_VALUE);

        while (iter.hasNext()) {
            GTSDecoder decoder = iter.next();

            if (dedup) {
                decoder = decoder.dedup();
            }

            if (!decoder.next()) {
                continue;
            }

            long toDecodeCount = Long.MAX_VALUE;

            if (timespan < 0) {
                Metadata meta = decoder.getMetadata();
                if (!meta.equals(lastMetadata)) {
                    lastMetadata = meta;
                    currentCount = 0;
                }
                toDecodeCount = Math.max(0, -timespan - currentCount);
            }

            //
            // Only display the class + labels if they have changed since the previous GTS
            //

            Map<String, String> lbls = decoder.getLabels();

            //
            // Compute the new name
            //

            boolean displayName = false;

            if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) {
                displayName = true;
                name = decoder.getName();
                labels = lbls;
                sb.setLength(0);

                sb.append("{\"c\":");

                //sb.append(gson.toJson(name));
                sb.append(serializer.serialize(name));

                boolean first = true;

                sb.append(",\"l\":{");

                for (Entry<String, String> entry : lbls.entrySet()) {
                    //
                    // Skip owner/producer labels and any other 'private' labels
                    //
                    if (!signed) {
                        if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                            continue;
                        }
                        if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                            continue;
                        }
                    }

                    if (!first) {
                        sb.append(",");
                    }

                    //sb.append(gson.toJson(entry.getKey()));
                    sb.append(serializer.serialize(entry.getKey()));
                    sb.append(":");
                    //sb.append(gson.toJson(entry.getValue()));
                    sb.append(serializer.serialize(entry.getValue()));
                    first = false;
                }
                sb.append("}");

                sb.append(",\"a\":{");

                first = true;
                for (Entry<String, String> entry : decoder.getMetadata().getAttributes().entrySet()) {
                    if (!first) {
                        sb.append(",");
                    }

                    //sb.append(gson.toJson(entry.getKey()));
                    sb.append(serializer.serialize(entry.getKey()));
                    sb.append(":");
                    //sb.append(gson.toJson(entry.getValue()));
                    sb.append(serializer.serialize(entry.getValue()));
                    first = false;
                }

                sb.append("}");
                sb.append(",\"i\":\"");
                sb.append(decoder.getLabelsId() & mask);
                sb.append("\",\"v\":[");
            }

            long decoded = 0L;

            do {

                if (toDecodeCount == decoded) {
                    break;
                }

                // FIXME(hbs): only display the results which match the authorized (according to token) timerange and geo zones

                //
                // Filter out any value not in the time range
                //

                if (decoder.getTimestamp() > now
                        || (timespan >= 0 && decoder.getTimestamp() <= (now - timespan))) {
                    continue;
                }

                decoded++;

                //
                // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set
                //

                // Display the name only if we have at least one value to display
                if (displayName) {
                    if (!firstgts) {
                        pw.print("]},");
                    }
                    pw.print(sb.toString());
                    firstgts = false;
                    displayName = false;
                } else {
                    pw.print(",");
                }
                hasValues = true;
                pw.print("[");
                pw.print(decoder.getTimestamp());
                if (GeoTimeSerie.NO_LOCATION != decoder.getLocation()) {
                    double[] latlon = GeoXPLib.fromGeoXPPoint(decoder.getLocation());
                    pw.print(",");
                    pw.print(latlon[0]);
                    pw.print(",");
                    pw.print(latlon[1]);
                }
                if (GeoTimeSerie.NO_ELEVATION != decoder.getElevation()) {
                    pw.print(",");
                    pw.print(decoder.getElevation());
                }
                pw.print(",");
                Object value = decoder.getValue();

                if (value instanceof Number) {
                    pw.print(value);
                } else if (value instanceof Boolean) {
                    pw.print(Boolean.TRUE.equals(value) ? "true" : "false");
                } else {
                    //pw.print(gson.toJson(value.toString()));
                    pw.print(serializer.serialize(value.toString()));
                }
                pw.print("]");
            } while (decoder.next());

            if (timespan < 0) {
                currentCount += decoded;
            }

            //
            // If displayName is still true it means we should have displayed the name but no value matched,
            // so set name to null so we correctly display the name for the next decoder if it has values
            //

            if (displayName) {
                name = null;
            }
        }

    } catch (Throwable t) {
        throw t;
    } finally {
        if (hasValues) {
            pw.print("]}");
        }
        pw.print("]");
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

private void packedDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean dedup,
        boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount, int maxDecoderLen,
        String classSuffix, long chunksize, boolean sortMeta) throws IOException {

    String name = null;//  w  w  w .  j  a va2 s.  c  o m
    Map<String, String> labels = null;

    StringBuilder sb = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    List<GTSEncoder> encoders = new ArrayList<GTSEncoder>();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (dedup) {
            decoder = decoder.dedup();
        }

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        GTSEncoder encoder = decoder.getEncoder(true);

        //
        // Only display the class + labels if they have changed since the previous GTS
        //

        Map<String, String> lbls = decoder.getLabels();

        //
        // Compute the name
        //

        name = decoder.getName();
        labels = lbls;
        sb.setLength(0);
        GTSHelper.encodeName(sb, name + classSuffix);
        sb.append("{");
        boolean first = true;

        if (sortMeta) {
            lbls = new TreeMap<String, String>(lbls);
        }

        for (Entry<String, String> entry : lbls.entrySet()) {
            //
            // Skip owner/producer labels and any other 'private' labels
            //
            if (!signed) {
                if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                    continue;
                }
                if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                    continue;
                }
            }

            if (!first) {
                sb.append(",");
            }
            GTSHelper.encodeName(sb, entry.getKey());
            sb.append("=");
            GTSHelper.encodeName(sb, entry.getValue());
            first = false;
        }
        sb.append("}");

        // We treat the case where encoder.getCount() is 0 in a special way
        // as this may be because the encoder was generated from a partly
        // consumed decoder and thus its count was reset to 0
        if (0 == encoder.getCount() || encoder.getCount() > toDecodeCount) {
            // We have too much data, shrink the encoder
            GTSEncoder enc = new GTSEncoder();
            enc.safeSetMetadata(decoder.getMetadata());
            while (decoder.next() && toDecodeCount > 0) {
                enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(),
                        decoder.getValue());
                toDecodeCount--;
            }
            encoder = enc;
        }

        if (timespan < 0) {
            currentCount += encoder.getCount();
        }

        encoders.clear();

        //
        // Add encoders per chunk
        //

        GTSDecoder chunkdec = encoder.getDecoder(true);

        GTSEncoder chunkenc = null;

        Long lastchunk = null;

        if (Long.MAX_VALUE == chunksize) {
            encoders.add(encoder);
        } else {
            while (chunkdec.next()) {
                long ts = chunkdec.getTimestamp();
                long chunk = ts >= 0 ? ts / chunksize : ((ts + 1) / chunksize) - 1;

                //
                // If it is the first chunk or we changed chunk, create a new encoder
                //

                if (null == chunkenc || (null != lastchunk && chunk != lastchunk)) {
                    chunkenc = new GTSEncoder(0L);
                    chunkenc.setMetadata(encoder.getMetadata());
                    encoders.add(chunkenc);
                }

                lastchunk = chunk;

                chunkenc.addValue(ts, chunkdec.getLocation(), chunkdec.getElevation(), chunkdec.getValue());
            }
        }

        while (!encoders.isEmpty()) {
            encoder = encoders.remove(0);

            if (encoder.size() > 0) {
                //
                // Determine most recent timestamp
                //

                GTSDecoder dec = encoder.getDecoder(true);

                dec.next();

                long timestamp = dec.getTimestamp();

                //
                // Build GTSWrapper
                //

                encoder.setMetadata(new Metadata());
                // Clear labels
                encoder.setName("");
                encoder.setLabels(new HashMap<String, String>());
                encoder.getMetadata().setAttributes(new HashMap<String, String>());

                GTSWrapper wrapper = GTSWrapperHelper.fromGTSEncoderToGTSWrapper(encoder, true);

                TSerializer ser = new TSerializer(new TCompactProtocol.Factory());
                byte[] serialized;

                try {
                    serialized = ser.serialize(wrapper);
                } catch (TException te) {
                    throw new IOException(te);
                }

                //
                // Check the size of the generatd wrapper. If it is over 75% of maxDecoderLen,
                // split the original encoder in two
                //

                if (serialized.length >= Math.floor(0.75D * maxDecoderLen) && encoder.getCount() > 2) {
                    GTSEncoder split = new GTSEncoder(0L);
                    split.setMetadata(encoder.getMetadata());

                    List<GTSEncoder> splits = new ArrayList<GTSEncoder>();

                    splits.add(split);

                    int threshold = encoder.size() / 2;

                    GTSDecoder deco = encoder.getDecoder(true);

                    while (deco.next()) {
                        split.addValue(deco.getTimestamp(), deco.getLocation(), deco.getElevation(),
                                deco.getValue());
                        if (split.size() > threshold) {
                            split = new GTSEncoder(0L);
                            splits.add(split);
                        }
                    }

                    //
                    // Now insert the splits at the beginning of 'encoders'
                    //

                    for (int i = splits.size() - 1; i >= 0; i--) {
                        encoders.add(0, splits.get(i));
                    }
                    continue;
                }

                if (serialized.length > Math.ceil(0.75D * maxDecoderLen)) {
                    throw new IOException(
                            "Encountered a value whose length is above the configured threshold of "
                                    + maxDecoderLen);
                }

                pw.print(timestamp);
                pw.print("//");
                pw.print(encoder.getCount());
                pw.print(" ");
                pw.print(sb.toString());
                pw.print(" '");

                OrderPreservingBase64.encodeToWriter(serialized, pw);

                pw.print("'");
                pw.write('\r');
                pw.write('\n');
            }
        }
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

/**
 * Output a tab separated version of fetched data. Deduplication is done on the fly so we don't decode twice.
 * /* w w w.  j  a va2  s  . c  om*/
 */
private static void tsvDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean raw,
        boolean dedup, boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount,
        boolean sortMeta) throws IOException {

    String name = null;
    Map<String, String> labels = null;

    StringBuilder classSB = new StringBuilder();
    StringBuilder labelsSB = new StringBuilder();
    StringBuilder attributesSB = new StringBuilder();
    StringBuilder valueSB = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        //
        // Only display the class + labels if they have changed since the previous GTS
        //

        Map<String, String> lbls = decoder.getLabels();

        //
        // Compute the new name
        //

        boolean displayName = false;

        if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) {
            displayName = true;
            name = decoder.getName();
            labels = lbls;
            classSB.setLength(0);
            GTSHelper.encodeName(classSB, name);
            labelsSB.setLength(0);
            attributesSB.setLength(0);
            boolean first = true;

            if (sortMeta) {
                lbls = new TreeMap<String, String>(lbls);
            }
            for (Entry<String, String> entry : lbls.entrySet()) {
                //
                // Skip owner/producer labels and any other 'private' labels
                //
                if (!signed) {
                    if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                    if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                }

                if (!first) {
                    labelsSB.append(",");
                }
                GTSHelper.encodeName(labelsSB, entry.getKey());
                labelsSB.append("=");
                GTSHelper.encodeName(labelsSB, entry.getValue());
                first = false;
            }

            first = true;
            if (decoder.getMetadata().getAttributesSize() > 0) {

                if (sortMeta) {
                    decoder.getMetadata()
                            .setAttributes(new TreeMap<String, String>(decoder.getMetadata().getAttributes()));
                }

                for (Entry<String, String> entry : decoder.getMetadata().getAttributes().entrySet()) {
                    if (!first) {
                        attributesSB.append(",");
                    }
                    GTSHelper.encodeName(attributesSB, entry.getKey());
                    attributesSB.append("=");
                    GTSHelper.encodeName(attributesSB, entry.getValue());
                    first = false;
                }
            }

        }

        long timestamp = 0L;
        long location = GeoTimeSerie.NO_LOCATION;
        long elevation = GeoTimeSerie.NO_ELEVATION;
        Object value = null;

        boolean dup = true;

        long decoded = 0;

        do {

            if (toDecodeCount == decoded) {
                break;
            }

            //
            // Filter out any value not in the time range
            //

            long newTimestamp = decoder.getTimestamp();

            if (newTimestamp > now || (timespan >= 0 && newTimestamp <= (now - timespan))) {
                continue;
            }

            //
            // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set
            //

            long newLocation = decoder.getLocation();
            long newElevation = decoder.getElevation();
            Object newValue = decoder.getValue();

            dup = true;

            if (dedup) {
                if (location != newLocation || elevation != newElevation) {
                    dup = false;
                } else {
                    if (null == newValue) {
                        // Consider nulls as duplicates (can't happen!)
                        dup = false;
                    } else if (newValue instanceof Number) {
                        if (!((Number) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof String) {
                        if (!((String) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof Boolean) {
                        if (!((Boolean) newValue).equals(value)) {
                            dup = false;
                        }
                    }
                }
            }

            decoded++;

            location = newLocation;
            elevation = newElevation;
            timestamp = newTimestamp;
            value = newValue;

            if (raw) {
                if (!dedup || !dup) {
                    pw.print(classSB);
                    pw.print('\t');
                    pw.print(labelsSB);
                    pw.print('\t');
                    pw.print(attributesSB);
                    pw.print('\t');

                    pw.print(timestamp);
                    pw.print('\t');

                    if (GeoTimeSerie.NO_LOCATION != location) {
                        double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                        pw.print(latlon[0]);
                        pw.print('\t');
                        pw.print(latlon[1]);
                    } else {
                        pw.print('\t');
                    }

                    pw.print('\t');

                    if (GeoTimeSerie.NO_ELEVATION != elevation) {
                        pw.print(elevation);
                    }
                    pw.print('\t');

                    valueSB.setLength(0);
                    GTSHelper.encodeValue(valueSB, value);
                    pw.println(valueSB);
                }
            } else {
                // Display the name only if we have at least one value to display
                // We force 'dup' to be false when we must show the name
                if (displayName) {
                    pw.print("# ");
                    pw.print(classSB);
                    pw.print("{");
                    pw.print(labelsSB);
                    pw.print("}");
                    pw.print("{");
                    pw.print(attributesSB);
                    pw.println("}");
                    displayName = false;
                    dup = false;
                }

                if (!dedup || !dup) {
                    pw.print(timestamp);
                    pw.print('\t');
                    if (GeoTimeSerie.NO_LOCATION != location) {
                        double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                        pw.print(latlon[0]);
                        pw.print('\t');
                        pw.print(latlon[1]);
                    } else {
                        pw.print('\t');
                    }

                    pw.print('\t');

                    if (GeoTimeSerie.NO_ELEVATION != elevation) {
                        pw.print(elevation);
                    }
                    pw.print('\t');

                    valueSB.setLength(0);
                    GTSHelper.encodeValue(valueSB, value);
                    pw.println(valueSB);
                }
            }
        } while (decoder.next());

        // Update currentcount
        if (timespan < 0) {
            currentCount += decoded;
        }

        // Print any remaining value
        if (dedup && dup) {
            if (raw) {
                pw.print(classSB);
                pw.print('\t');
                pw.print(labelsSB);
                pw.print('\t');
                pw.print(attributesSB);
                pw.print('\t');

                pw.print(timestamp);
                pw.print('\t');

                if (GeoTimeSerie.NO_LOCATION != location) {
                    double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                    pw.print(latlon[0]);
                    pw.print('\t');
                    pw.print(latlon[1]);
                } else {
                    pw.print('\t');
                }

                pw.print('\t');

                if (GeoTimeSerie.NO_ELEVATION != elevation) {
                    pw.print(elevation);
                }
                pw.print('\t');

                valueSB.setLength(0);
                GTSHelper.encodeValue(valueSB, value);
                pw.println(valueSB);
            } else {
                pw.print(timestamp);
                pw.print('\t');
                if (GeoTimeSerie.NO_LOCATION != location) {
                    double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                    pw.print(latlon[0]);
                    pw.print('\t');
                    pw.print(latlon[1]);
                } else {
                    pw.print('\t');
                }

                pw.print('\t');

                if (GeoTimeSerie.NO_ELEVATION != elevation) {
                    pw.print(elevation);
                }
                pw.print('\t');

                valueSB.setLength(0);
                GTSHelper.encodeValue(valueSB, value);
                pw.println(valueSB);
            }

        }

        //
        // If displayName is still true it means we should have displayed the name but no value matched,
        // so set name to null so we correctly display the name for the next decoder if it has values
        //

        if (displayName) {
            name = null;
        }
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:org.apache.solr.cloud.TestStressInPlaceUpdates.java

@Test
@ShardsFixed(num = 3)/*from w  w w .j  ava 2  s .  c  o m*/
public void stressTest() throws Exception {
    waitForRecoveriesToFinish(true);

    this.leaderClient = getClientForLeader();
    assertNotNull("Couldn't obtain client for the leader of the shard", this.leaderClient);

    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = random().nextInt(8);
    final int ndocs = atLeast(5);
    int nWriteThreads = 5 + random().nextInt(25);
    int fullUpdatePercent = 5 + random().nextInt(50);

    // query variables
    final int percentRealtimeQuery = 75;
    // number of cumulative read/write operations by all threads
    final AtomicLong operations = new AtomicLong(25000);
    int nReadThreads = 5 + random().nextInt(25);

    /** // testing
     final int commitPercent = 5;
     final int softCommitPercent = 100; // what percent of the commits are soft
     final int deletePercent = 0;
     final int deleteByQueryPercent = 50;
     final int ndocs = 10;
     int nWriteThreads = 10;
            
     final int maxConcurrentCommits = nWriteThreads;   // number of committers at a time... it should be <= maxWarmingSearchers
            
     // query variables
     final int percentRealtimeQuery = 101;
     final AtomicLong operations = new AtomicLong(50000);  // number of query operations to perform in total
     int nReadThreads = 10;
            
     int fullUpdatePercent = 20;
     **/

    log.info("{}",
            Arrays.asList("commitPercent", commitPercent, "softCommitPercent", softCommitPercent,
                    "deletePercent", deletePercent, "deleteByQueryPercent", deleteByQueryPercent, "ndocs",
                    ndocs, "nWriteThreads", nWriteThreads, "percentRealtimeQuery", percentRealtimeQuery,
                    "operations", operations, "nReadThreads", nReadThreads));

    initModel(ndocs);

    List<Thread> threads = new ArrayList<>();

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            Map<Integer, DocInfo> newCommittedModel;
                            long version;

                            synchronized (TestStressInPlaceUpdates.this) {
                                // take a snapshot of the model
                                // this is safe to do w/o synchronizing on the model because it's a ConcurrentHashMap
                                newCommittedModel = new HashMap<>(model);
                                version = snapshotCount++;

                                int chosenClientIndex = rand.nextInt(clients.size());

                                if (rand.nextInt(100) < softCommitPercent) {
                                    log.info("softCommit start");
                                    clients.get(chosenClientIndex).commit(true, true, true);
                                    log.info("softCommit end");
                                } else {
                                    log.info("hardCommit start");
                                    clients.get(chosenClientIndex).commit();
                                    log.info("hardCommit end");
                                }

                                // install this model snapshot only if it's newer than the current one
                                if (version >= committedModelClock) {
                                    if (VERBOSE) {
                                        log.info("installing new committedModel version={}",
                                                committedModelClock);
                                    }
                                    clientIndexUsedForCommit = chosenClientIndex;
                                    committedModel = newCommittedModel;
                                    committedModelClock = version;
                                }
                            }
                            continue;
                        }

                        int id;

                        if (rand.nextBoolean()) {
                            id = rand.nextInt(ndocs);
                        } else {
                            id = lastId; // reuse the last ID half of the time to force more race conditions
                        }

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        DocInfo info = model.get(id);

                        // yield after getting the next version to increase the odds of updates happening out of order
                        if (rand.nextBoolean())
                            Thread.yield();

                        if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                            final boolean dbq = (oper >= commitPercent + deletePercent);
                            final String delType = dbq ? "DBI" : "DBQ";
                            log.info("{} id {}: {}", delType, id, info);

                            Long returnedVersion = null;

                            try {
                                returnedVersion = deleteDocAndGetVersion(Integer.toString(id),
                                        params("_version_", Long.toString(info.version)), dbq);
                                log.info(delType + ": Deleting id=" + id + ", version=" + info.version
                                        + ".  Returned version=" + returnedVersion);
                            } catch (RuntimeException e) {
                                if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                        || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                    // Its okay for a leader to reject a concurrent request
                                    log.warn("Conflict during {}, rejected id={}, {}", delType, id, e);
                                    returnedVersion = null;
                                } else {
                                    throw e;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), 0, 0));
                                }
                            }

                        } else {
                            int val1 = info.intFieldValue;
                            long val2 = info.longFieldValue;
                            int nextVal1 = val1;
                            long nextVal2 = val2;

                            int addOper = rand.nextInt(100);
                            Long returnedVersion;
                            if (addOper < fullUpdatePercent || info.version <= 0) { // if document was never indexed or was deleted
                                // FULL UPDATE
                                nextVal1 = Primes.nextPrime(val1 + 1);
                                nextVal2 = nextVal1 * 1000000000l;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "title_s", "title" + id,
                                            "val1_i_dvo", nextVal1, "val2_l_dvo", nextVal2, "_version_",
                                            info.version);
                                    log.info("FULL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);

                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during full update, rejected id={}, {}", id, e);
                                        returnedVersion = null;
                                    } else {
                                        throw e;
                                    }
                                }
                            } else {
                                // PARTIAL
                                nextVal2 = val2 + val1;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "val2_l_dvo",
                                            map("inc", String.valueOf(val1)), "_version_", info.version);
                                    log.info("PARTIAL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);
                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during partial update, rejected id={}, {}", id, e);
                                    } else if (e.getMessage() != null
                                            && e.getMessage().contains("Document not found for update.")
                                            && e.getMessage().contains("id=" + id)) {
                                        log.warn(
                                                "Attempted a partial update for a recently deleted document, rejected id={}, {}",
                                                id, e);
                                    } else {
                                        throw e;
                                    }
                                    returnedVersion = null;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), nextVal1, nextVal2));
                                }

                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);

    }

    // Read threads
    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @SuppressWarnings("unchecked")
            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        boolean realTime = rand.nextInt(100) < percentRealtimeQuery;
                        DocInfo expected;

                        if (realTime) {
                            expected = model.get(id);
                        } else {
                            synchronized (TestStressInPlaceUpdates.this) {
                                expected = committedModel.get(id);
                            }
                        }

                        if (VERBOSE) {
                            log.info("querying id {}", id);
                        }
                        ModifiableSolrParams params = new ModifiableSolrParams();
                        if (realTime) {
                            params.set("wt", "json");
                            params.set("qt", "/get");
                            params.set("ids", Integer.toString(id));
                        } else {
                            params.set("wt", "json");
                            params.set("q", "id:" + Integer.toString(id));
                            params.set("omitHeader", "true");
                        }

                        int clientId = rand.nextInt(clients.size());
                        if (!realTime)
                            clientId = clientIndexUsedForCommit;

                        QueryResponse response = clients.get(clientId).query(params);
                        if (response.getResults().size() == 0) {
                            // there's no info we can get back with a delete, so not much we can check without further synchronization
                        } else if (response.getResults().size() == 1) {
                            final SolrDocument actual = response.getResults().get(0);
                            final String msg = "Realtime=" + realTime + ", expected=" + expected + ", actual="
                                    + actual;
                            assertNotNull(msg, actual);

                            final Long foundVersion = (Long) actual.getFieldValue("_version_");
                            assertNotNull(msg, foundVersion);
                            assertTrue(msg + "... solr doc has non-positive version???",
                                    0 < foundVersion.longValue());
                            final Integer intVal = (Integer) actual.getFieldValue("val1_i_dvo");
                            assertNotNull(msg, intVal);

                            final Long longVal = (Long) actual.getFieldValue("val2_l_dvo");
                            assertNotNull(msg, longVal);

                            assertTrue(msg + " ...solr returned older version then model. "
                                    + "should not be possible given the order of operations in writer threads",
                                    Math.abs(expected.version) <= foundVersion.longValue());

                            if (foundVersion.longValue() == expected.version) {
                                assertEquals(msg, expected.intFieldValue, intVal.intValue());
                                assertEquals(msg, expected.longFieldValue, longVal.longValue());
                            }

                            // Some things we can assert about any Doc returned from solr,
                            // even if it's newer then our (expected) model information...

                            assertTrue(msg + " ...how did a doc in solr get a non positive intVal?",
                                    0 < intVal);
                            assertTrue(msg + " ...how did a doc in solr get a non positive longVal?",
                                    0 < longVal);
                            assertEquals(msg
                                    + " ...intVal and longVal in solr doc are internally (modulo) inconsistent w/eachother",
                                    0, (longVal % intVal));

                            // NOTE: when foundVersion is greater then the version read from the model,
                            // it's not possible to make any assertions about the field values in solr relative to the
                            // field values in the model -- ie: we can *NOT* assert expected.longFieldVal <= doc.longVal
                            //
                            // it's tempting to think that this would be possible if we changed our model to preserve the
                            // "old" valuess when doing a delete, but that's still no garuntee because of how oportunistic
                            // concurrency works with negative versions:  When adding a doc, we can assert that it must not
                            // exist with version<0, but we can't assert that the *reason* it doesn't exist was because of
                            // a delete with the specific version of "-42".
                            // So a wrtier thread might (1) prep to add a doc for the first time with "intValue=1,_version_=-1",
                            // and that add may succeed and (2) return some version X which is put in the model.  but
                            // inbetween #1 and #2 other threads may have added & deleted the doc repeatedly, updating
                            // the model with intValue=7,_version_=-42, and a reader thread might meanwhile read from the
                            // model before #2 and expect intValue=5, but get intValue=1 from solr (with a greater version)

                        } else {
                            fail(String.format(Locale.ENGLISH, "There were more than one result: {}",
                                    response));
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }
    // Start all threads
    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    { // final pass over uncommitted model with RTG

        for (SolrClient client : clients) {
            for (Map.Entry<Integer, DocInfo> entry : model.entrySet()) {
                final Integer id = entry.getKey();
                final DocInfo expected = entry.getValue();
                final SolrDocument actual = client.getById(id.toString());

                String msg = "RTG: " + id + "=" + expected;
                if (null == actual) {
                    // a deleted or non-existent document
                    // sanity check of the model agrees...
                    assertTrue(msg + " is deleted/non-existent in Solr, but model has non-neg version",
                            expected.version < 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.intFieldValue, 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.longFieldValue, 0);
                } else {
                    msg = msg + " <==VS==> " + actual;
                    assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                    assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                    assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                    assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);
                }
            }
        }
    }

    { // do a final search and compare every result with the model

        // because commits don't provide any sort of concrete versioning (or optimistic concurrency constraints)
        // there's no way to garuntee that our committedModel matches what was in Solr at the time of the last commit.
        // It's possible other threads made additional writes to solr before the commit was processed, but after
        // the committedModel variable was assigned it's new value.
        //
        // what we can do however, is commit all completed updates, and *then* compare solr search results
        // against the (new) committed model....

        waitForThingsToLevelOut(30); // NOTE: this does an automatic commit for us & ensures replicas are up to date
        committedModel = new HashMap<>(model);

        // first, prune the model of any docs that have negative versions
        // ie: were never actually added, or were ultimately deleted.
        for (int i = 0; i < ndocs; i++) {
            DocInfo info = committedModel.get(i);
            if (info.version < 0) {
                // first, a quick sanity check of the model itself...
                assertEquals("Inconsistent int value in model for deleted doc" + i + "=" + info, 0,
                        info.intFieldValue);
                assertEquals("Inconsistent long value in model for deleted doc" + i + "=" + info, 0L,
                        info.longFieldValue);

                committedModel.remove(i);
            }
        }

        for (SolrClient client : clients) {
            QueryResponse rsp = client.query(params("q", "*:*", "sort", "id asc", "rows", ndocs + ""));
            for (SolrDocument actual : rsp.getResults()) {
                final Integer id = Integer.parseInt(actual.getFieldValue("id").toString());
                final DocInfo expected = committedModel.get(id);

                assertNotNull("Doc found but missing/deleted from model: " + actual, expected);

                final String msg = "Search: " + id + "=" + expected + " <==VS==> " + actual;
                assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);

                // also sanity check the model (which we already know matches the doc)
                assertEquals("Inconsistent (modulo) values in model for id " + id + "=" + expected, 0,
                        (expected.longFieldValue % expected.intFieldValue));
            }
            assertEquals(committedModel.size(), rsp.getResults().getNumFound());
        }
    }
}