Example usage for org.apache.commons.math3.primes Primes nextPrime

List of usage examples for org.apache.commons.math3.primes Primes nextPrime

Introduction

In this page you can find the example usage for org.apache.commons.math3.primes Primes nextPrime.

Prototype

public static int nextPrime(int n) 

Source Link

Document

Return the smallest prime greater than or equal to n.

Usage

From source file:HannonHillSecret.HannonHillSecret.java

/**
 * @param args the command line arguments
 *//*from   w ww  .  j  av a  2  s . c o  m*/
public static void main(String[] args) {
    // This is a placeholder for the provided value of N
    final int N = 55;
    int currentNum = 0;
    int x = 0;
    int sizePrimes;
    boolean isAdditive = true;
    ArrayList<Integer> primeNumbers = new ArrayList<>();

    currentNum = Primes.nextPrime(currentNum);

    //Add all prime numbers less than or equal to N
    while (currentNum <= N) {
        primeNumbers.add(currentNum);
        currentNum = Primes.nextPrime(currentNum++);
    }

    sizePrimes = primeNumbers.size();

    // If there are only two prime numbers in the arraylist, it means it is empty or there
    // is only one.
    if (sizePrimes < 2) {
        System.out.println("Cannot test if Secret is additive since there "
                + "are not two or more prime numbers less than N!");
    } else // Testing for additive property is possible
    {
        outerloop:
        // Assuming the additive test only requires pair combinations, go through
        // all possible pairs until all pass or one fails
        while (x < sizePrimes && isAdditive) {
            for (int y = x + 1; y <= sizePrimes; y++) {
                isAdditive = isSecretAdditive(primeNumbers.get(x), primeNumbers.get(x));

                //Failed additive test for a combination of prime numbers,
                // so break the while loop and return false
                if (!isAdditive) {
                    break outerloop;
                }
            }
            x++;
        }

        if (isAdditive) {
            System.out.println("Secret is additive!");
        } else {
            System.out.println("Secret is NOT additive!");
        }
    }
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.util.weights.UniformDesignGenerator.java

/**
 * Returns the first k prime numbers.//from   w w w  . j a  v a  2s  . c om
 * 
 * @param k the number of prime numbers to return
 * @return the first k prime numbers
 */
protected int[] generateFirstKPrimes(int k) {
    int[] primes = new int[k];
    primes[0] = 2;

    for (int i = 1; i < k; i++) {
        primes[i] = Primes.nextPrime(primes[i - 1]);
    }

    return primes;
}

From source file:com.cloudera.oryx.common.random.RandomUtils.java

/**
 * Finds next-largest "twin primes": numbers p and p+2 such that both are prime. Finds the smallest such p
 * such that the smaller twin, p, is greater than or equal to n. Returns p+2, the larger of the two twins.
 *///from   w w  w .  j  a va  2  s.  co  m
public static int nextTwinPrime(int n) {
    if (n > MAX_INT_SMALLER_TWIN_PRIME) {
        throw new IllegalArgumentException();
    }
    if (n <= 3) {
        return 5;
    }
    int next = Primes.nextPrime(n);
    while (!Primes.isPrime(next + 2)) {
        next = Primes.nextPrime(next + 4);
    }
    return next + 2;
}

From source file:it.unimi.dsi.sux4j.mph.CHDMinimalPerfectHashFunction.java

/**
 * Creates a new CHD minimal perfect hash function for the given keys.
 * //from w w  w  .  jav  a2  s  . c o m
 * @param keys the keys to hash, or {@code null}.
 * @param transform a transformation strategy for the keys.
 * @param lambda the average bucket size.
 * @param loadFactor the load factor.
 * @param signatureWidth a signature width, or 0 for no signature.
 * @param tempDir a temporary directory for the store files, or {@code null} for the standard temporary directory.
 * @param chunkedHashStore a chunked hash store containing the keys, or {@code null}; the store
 * can be unchecked, but in this case <code>keys</code> and <code>transform</code> must be non-{@code null}. 
 */
protected CHDMinimalPerfectHashFunction(final Iterable<? extends T> keys,
        final TransformationStrategy<? super T> transform, final int lambda, double loadFactor,
        final int signatureWidth, final File tempDir, ChunkedHashStore<T> chunkedHashStore) throws IOException {
    this.transform = transform;

    final ProgressLogger pl = new ProgressLogger(LOGGER);
    pl.displayLocalSpeed = true;
    pl.displayFreeMemory = true;
    final RandomGenerator r = new XorShift1024StarRandomGenerator();
    pl.itemsName = "keys";

    final boolean givenChunkedHashStore = chunkedHashStore != null;
    if (!givenChunkedHashStore) {
        chunkedHashStore = new ChunkedHashStore<T>(transform, tempDir, pl);
        chunkedHashStore.reset(r.nextLong());
        chunkedHashStore.addAll(keys.iterator());
    }
    n = chunkedHashStore.size();

    defRetValue = -1; // For the very few cases in which we can decide

    int log2NumChunks = Math.max(0, Fast.mostSignificantBit(n >> LOG2_CHUNK_SIZE));
    chunkShift = chunkedHashStore.log2Chunks(log2NumChunks);
    final int numChunks = 1 << log2NumChunks;

    LOGGER.debug("Number of chunks: " + numChunks);
    LOGGER.debug("Average chunk size: " + (double) n / numChunks);

    offsetNumBucketsSeed = new long[(numChunks + 1) * 3 + 2];

    int duplicates = 0;
    final LongArrayList holes = new LongArrayList();

    @SuppressWarnings("resource")
    final OfflineIterable<MutableLong, MutableLong> coefficients = new OfflineIterable<MutableLong, MutableLong>(
            new Serializer<MutableLong, MutableLong>() {

                @Override
                public void write(final MutableLong a, final DataOutput dos) throws IOException {
                    long x = a.longValue();
                    while ((x & ~0x7FL) != 0) {
                        dos.writeByte((int) (x | 0x80));
                        x >>>= 7;
                    }
                    dos.writeByte((int) x);
                }

                @Override
                public void read(final DataInput dis, final MutableLong x) throws IOException {
                    byte b = dis.readByte();
                    long t = b & 0x7F;
                    for (int shift = 7; (b & 0x80) != 0; shift += 7) {
                        b = dis.readByte();
                        t |= (b & 0x7FL) << shift;
                    }
                    x.setValue(t);
                }
            }, new MutableLong());

    for (;;) {
        LOGGER.debug("Generating minimal perfect hash function...");

        holes.clear();
        coefficients.clear();
        pl.expectedUpdates = numChunks;
        pl.itemsName = "chunks";
        pl.start("Analysing chunks... ");

        try {
            int chunkNumber = 0;

            for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
                /* We treat a chunk as a single hash function. The number of bins is thus
                 * the first prime larger than the chunk size divided by the load factor. */
                final int p = Primes.nextPrime((int) Math.ceil(chunk.size() / loadFactor) + 1);
                final boolean used[] = new boolean[p];

                final int numBuckets = (chunk.size() + lambda - 1) / lambda;
                numBuckets(chunkNumber + 1, numBuckets(chunkNumber) + numBuckets);
                final int[] cc0 = new int[numBuckets];
                final int[] cc1 = new int[numBuckets];
                @SuppressWarnings("unchecked")
                final ArrayList<long[]>[] bucket = new ArrayList[numBuckets];
                for (int i = bucket.length; i-- != 0;)
                    bucket[i] = new ArrayList<long[]>();

                tryChunk: for (;;) {
                    for (ArrayList<long[]> b : bucket)
                        b.clear();
                    Arrays.fill(used, false);

                    /* At each try, the allocation to keys to bucket is randomized differently. */
                    final long seed = r.nextLong();
                    // System.err.println( "Number of keys: " + chunk.size()  + " Number of bins: " + p + " seed: " + seed );
                    /* We distribute the keys in this chunks in the buckets. */
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        final long[] h = new long[3];
                        Hashes.spooky4(triple, seed, h);
                        final ArrayList<long[]> b = bucket[(int) ((h[0] >>> 1) % numBuckets)];
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;

                        // All elements in a bucket must have either different h[ 1 ] or different h[ 2 ]
                        for (long[] t : b)
                            if (t[1] == h[1] && t[2] == h[2]) {
                                LOGGER.info("Duplicate index" + Arrays.toString(t));
                                continue tryChunk;
                            }
                        b.add(h);
                    }

                    final int[] perm = Util.identity(bucket.length);
                    IntArrays.quickSort(perm, new AbstractIntComparator() {
                        private static final long serialVersionUID = 1L;

                        @Override
                        public int compare(int a0, int a1) {
                            return Integer.compare(bucket[a1].size(), bucket[a0].size());
                        }
                    });

                    for (int i = 0; i < perm.length;) {
                        final LinkedList<Integer> bucketsToDo = new LinkedList<Integer>();
                        final int size = bucket[perm[i]].size();
                        //System.err.println( "Bucket size: " + size );
                        int j;
                        // Gather indices of all buckets with the same size
                        for (j = i; j < perm.length && bucket[perm[j]].size() == size; j++)
                            bucketsToDo.add(Integer.valueOf(perm[j]));

                        // Examine for each pair (c0,c1) the buckets still to do
                        ext: for (int c1 = 0; c1 < p; c1++)
                            for (int c0 = 0; c0 < p; c0++) {
                                //System.err.println( "Testing " + c0 + ", " + c1 + " (to do: " + bucketsToDo.size() + ")" );
                                for (Iterator<Integer> iterator = bucketsToDo.iterator(); iterator.hasNext();) {
                                    final int k = iterator.next().intValue();
                                    final ArrayList<long[]> b = bucket[k];
                                    boolean completed = true;
                                    final IntArrayList done = new IntArrayList();
                                    // Try to see whether the necessary entries are not used
                                    for (long[] h : b) {
                                        //assert k == h[ 0 ];

                                        int pos = (int) ((h[1] + c0 * h[2] + c1) % p);
                                        //System.err.println( "Testing pos " + pos + " for " + Arrays.toString( e  ));
                                        if (used[pos]) {
                                            completed = false;
                                            break;
                                        } else {
                                            used[pos] = true;
                                            done.add(pos);
                                        }
                                    }

                                    if (completed) {
                                        // All positions were free
                                        cc0[k] = c0;
                                        cc1[k] = c1;
                                        iterator.remove();
                                    } else
                                        for (int d : done)
                                            used[d] = false;
                                }
                                if (bucketsToDo.isEmpty())
                                    break ext;
                            }
                        if (!bucketsToDo.isEmpty())
                            continue tryChunk;

                        seed(chunkNumber, seed);
                        i = j;
                    }
                    break;
                }

                // System.err.println("DONE!");

                if (ASSERTS) {
                    final IntOpenHashSet pos = new IntOpenHashSet();
                    final long h[] = new long[3];
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        Hashes.spooky4(triple, seed(chunkNumber), h);
                        h[0] = (h[0] >>> 1) % numBuckets;
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;
                        //System.err.println( Arrays.toString(  e  ) );
                        assert pos.add((int) ((h[1] + cc0[(int) (h[0])] * h[2] + cc1[(int) (h[0])]) % p));
                    }
                }

                final MutableLong l = new MutableLong();
                for (int i = 0; i < numBuckets; i++) {
                    l.setValue(cc0[i] + cc1[i] * p);
                    coefficients.add(l);
                }

                for (int i = 0; i < p; i++)
                    if (!used[i])
                        holes.add(offset(chunkNumber) + i);

                offset(chunkNumber + 1, offset(chunkNumber) + p);
                chunkNumber++;
                pl.update();
            }

            pl.done();
            break;
        } catch (ChunkedHashStore.DuplicateException e) {
            if (keys == null)
                throw new IllegalStateException(
                        "You provided no keys, but the chunked hash store was not checked");
            if (duplicates++ > 3)
                throw new IllegalArgumentException("The input list contains duplicates");
            LOGGER.warn("Found duplicate. Recomputing triples...");
            chunkedHashStore.reset(r.nextLong());
            chunkedHashStore.addAll(keys.iterator());
        }
    }

    rank = new SparseRank(offset(offsetNumBucketsSeed.length / 3 - 1), holes.size(), holes.iterator());

    globalSeed = chunkedHashStore.seed();

    this.coefficients = new EliasFanoLongBigList(new AbstractLongIterator() {
        final OfflineIterator<MutableLong, MutableLong> iterator = coefficients.iterator();

        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        public long nextLong() {
            return iterator.next().longValue();
        }
    }, 0, true);

    coefficients.close();

    LOGGER.info("Completed.");
    LOGGER.info("Actual bit cost per key: " + (double) numBits() / n);

    if (signatureWidth != 0) {
        signatureMask = -1L >>> Long.SIZE - signatureWidth;
        (signatures = LongArrayBitVector.getInstance().asLongBigList(signatureWidth)).size(n);
        pl.expectedUpdates = n;
        pl.itemsName = "signatures";
        pl.start("Signing...");
        for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
            Iterator<long[]> iterator = chunk.iterator();
            for (int i = chunk.size(); i-- != 0;) {
                final long[] triple = iterator.next();
                long t = getLongByTripleNoCheck(triple);
                signatures.set(t, signatureMask & triple[0]);
                pl.lightUpdate();
            }
        }
        pl.done();
    } else {
        signatureMask = 0;
        signatures = null;
    }

    if (!givenChunkedHashStore)
        chunkedHashStore.close();
}

From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java

protected void setNumberOfReducers(Job job) throws IOException {
    long inputSize = getInputSize();
    long targetFileSize = getTargetFileSize();
    int numReducers = Math.min(Ints.checkedCast(inputSize / targetFileSize) + 1, getMaxNumReducers());
    if (this.usePrimeReducers && numReducers != 1) {
        numReducers = Primes.nextPrime(numReducers);
    }/* ww w  .  java2  s.c o m*/
    job.setNumReduceTasks(numReducers);
}

From source file:org.apache.gobblin.compaction.mapreduce.CompactionAvroJobConfigurator.java

/**
 * Refer to {@link MRCompactorAvroKeyDedupJobRunner#setNumberOfReducers(Job)}
 *///from   w  w w .ja v a2s.  c o m
protected void setNumberOfReducers(Job job) throws IOException {

    // get input size
    long inputSize = 0;
    for (Path inputPath : this.mapReduceInputPaths) {
        inputSize += this.fs.getContentSummary(inputPath).getLength();
    }

    // get target file size
    long targetFileSize = this.state.getPropAsLong(
            MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE,
            MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_TARGET_OUTPUT_FILE_SIZE);

    // get max reducers
    int maxNumReducers = state.getPropAsInt(MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_MAX_NUM_REDUCERS,
            MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_MAX_NUM_REDUCERS);

    int numReducers = Math.min(Ints.checkedCast(inputSize / targetFileSize) + 1, maxNumReducers);

    // get use prime reducers
    boolean usePrimeReducers = state.getPropAsBoolean(
            MRCompactorAvroKeyDedupJobRunner.COMPACTION_JOB_USE_PRIME_REDUCERS,
            MRCompactorAvroKeyDedupJobRunner.DEFAULT_COMPACTION_JOB_USE_PRIME_REDUCERS);

    if (usePrimeReducers && numReducers != 1) {
        numReducers = Primes.nextPrime(numReducers);
    }
    job.setNumReduceTasks(numReducers);
}

From source file:org.apache.solr.client.solrj.io.eval.PrimesEvaluator.java

@Override
public Object doWork(Object... values) throws IOException {
    if (2 != values.length) {
        throw new IOException(
                String.format(Locale.ROOT, "%s(...) only works with 2 values but %d were provided",
                        constructingFactory.getFunctionName(getClass()), values.length));
    }/*from   w ww .  j  a  va  2s .  c  o  m*/

    int sizeNum = ((Number) values[0]).intValue();
    int startNum = ((Number) values[1]).intValue();
    List<Number> primes = new ArrayList();

    for (int i = 0; i < sizeNum; i++) {
        int prime = Primes.nextPrime(startNum);
        primes.add(prime);
        startNum = prime;
        ++startNum;
    }

    return primes;
}

From source file:org.apache.solr.cloud.TestStressInPlaceUpdates.java

@Test
@ShardsFixed(num = 3)//w  w  w  .jav  a 2  s  .c om
public void stressTest() throws Exception {
    waitForRecoveriesToFinish(true);

    this.leaderClient = getClientForLeader();
    assertNotNull("Couldn't obtain client for the leader of the shard", this.leaderClient);

    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = random().nextInt(8);
    final int ndocs = atLeast(5);
    int nWriteThreads = 5 + random().nextInt(25);
    int fullUpdatePercent = 5 + random().nextInt(50);

    // query variables
    final int percentRealtimeQuery = 75;
    // number of cumulative read/write operations by all threads
    final AtomicLong operations = new AtomicLong(25000);
    int nReadThreads = 5 + random().nextInt(25);

    /** // testing
     final int commitPercent = 5;
     final int softCommitPercent = 100; // what percent of the commits are soft
     final int deletePercent = 0;
     final int deleteByQueryPercent = 50;
     final int ndocs = 10;
     int nWriteThreads = 10;
            
     final int maxConcurrentCommits = nWriteThreads;   // number of committers at a time... it should be <= maxWarmingSearchers
            
     // query variables
     final int percentRealtimeQuery = 101;
     final AtomicLong operations = new AtomicLong(50000);  // number of query operations to perform in total
     int nReadThreads = 10;
            
     int fullUpdatePercent = 20;
     **/

    log.info("{}",
            Arrays.asList("commitPercent", commitPercent, "softCommitPercent", softCommitPercent,
                    "deletePercent", deletePercent, "deleteByQueryPercent", deleteByQueryPercent, "ndocs",
                    ndocs, "nWriteThreads", nWriteThreads, "percentRealtimeQuery", percentRealtimeQuery,
                    "operations", operations, "nReadThreads", nReadThreads));

    initModel(ndocs);

    List<Thread> threads = new ArrayList<>();

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            Map<Integer, DocInfo> newCommittedModel;
                            long version;

                            synchronized (TestStressInPlaceUpdates.this) {
                                // take a snapshot of the model
                                // this is safe to do w/o synchronizing on the model because it's a ConcurrentHashMap
                                newCommittedModel = new HashMap<>(model);
                                version = snapshotCount++;

                                int chosenClientIndex = rand.nextInt(clients.size());

                                if (rand.nextInt(100) < softCommitPercent) {
                                    log.info("softCommit start");
                                    clients.get(chosenClientIndex).commit(true, true, true);
                                    log.info("softCommit end");
                                } else {
                                    log.info("hardCommit start");
                                    clients.get(chosenClientIndex).commit();
                                    log.info("hardCommit end");
                                }

                                // install this model snapshot only if it's newer than the current one
                                if (version >= committedModelClock) {
                                    if (VERBOSE) {
                                        log.info("installing new committedModel version={}",
                                                committedModelClock);
                                    }
                                    clientIndexUsedForCommit = chosenClientIndex;
                                    committedModel = newCommittedModel;
                                    committedModelClock = version;
                                }
                            }
                            continue;
                        }

                        int id;

                        if (rand.nextBoolean()) {
                            id = rand.nextInt(ndocs);
                        } else {
                            id = lastId; // reuse the last ID half of the time to force more race conditions
                        }

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        DocInfo info = model.get(id);

                        // yield after getting the next version to increase the odds of updates happening out of order
                        if (rand.nextBoolean())
                            Thread.yield();

                        if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                            final boolean dbq = (oper >= commitPercent + deletePercent);
                            final String delType = dbq ? "DBI" : "DBQ";
                            log.info("{} id {}: {}", delType, id, info);

                            Long returnedVersion = null;

                            try {
                                returnedVersion = deleteDocAndGetVersion(Integer.toString(id),
                                        params("_version_", Long.toString(info.version)), dbq);
                                log.info(delType + ": Deleting id=" + id + ", version=" + info.version
                                        + ".  Returned version=" + returnedVersion);
                            } catch (RuntimeException e) {
                                if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                        || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                    // Its okay for a leader to reject a concurrent request
                                    log.warn("Conflict during {}, rejected id={}, {}", delType, id, e);
                                    returnedVersion = null;
                                } else {
                                    throw e;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), 0, 0));
                                }
                            }

                        } else {
                            int val1 = info.intFieldValue;
                            long val2 = info.longFieldValue;
                            int nextVal1 = val1;
                            long nextVal2 = val2;

                            int addOper = rand.nextInt(100);
                            Long returnedVersion;
                            if (addOper < fullUpdatePercent || info.version <= 0) { // if document was never indexed or was deleted
                                // FULL UPDATE
                                nextVal1 = Primes.nextPrime(val1 + 1);
                                nextVal2 = nextVal1 * 1000000000l;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "title_s", "title" + id,
                                            "val1_i_dvo", nextVal1, "val2_l_dvo", nextVal2, "_version_",
                                            info.version);
                                    log.info("FULL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);

                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during full update, rejected id={}, {}", id, e);
                                        returnedVersion = null;
                                    } else {
                                        throw e;
                                    }
                                }
                            } else {
                                // PARTIAL
                                nextVal2 = val2 + val1;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "val2_l_dvo",
                                            map("inc", String.valueOf(val1)), "_version_", info.version);
                                    log.info("PARTIAL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);
                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during partial update, rejected id={}, {}", id, e);
                                    } else if (e.getMessage() != null
                                            && e.getMessage().contains("Document not found for update.")
                                            && e.getMessage().contains("id=" + id)) {
                                        log.warn(
                                                "Attempted a partial update for a recently deleted document, rejected id={}, {}",
                                                id, e);
                                    } else {
                                        throw e;
                                    }
                                    returnedVersion = null;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), nextVal1, nextVal2));
                                }

                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);

    }

    // Read threads
    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @SuppressWarnings("unchecked")
            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        boolean realTime = rand.nextInt(100) < percentRealtimeQuery;
                        DocInfo expected;

                        if (realTime) {
                            expected = model.get(id);
                        } else {
                            synchronized (TestStressInPlaceUpdates.this) {
                                expected = committedModel.get(id);
                            }
                        }

                        if (VERBOSE) {
                            log.info("querying id {}", id);
                        }
                        ModifiableSolrParams params = new ModifiableSolrParams();
                        if (realTime) {
                            params.set("wt", "json");
                            params.set("qt", "/get");
                            params.set("ids", Integer.toString(id));
                        } else {
                            params.set("wt", "json");
                            params.set("q", "id:" + Integer.toString(id));
                            params.set("omitHeader", "true");
                        }

                        int clientId = rand.nextInt(clients.size());
                        if (!realTime)
                            clientId = clientIndexUsedForCommit;

                        QueryResponse response = clients.get(clientId).query(params);
                        if (response.getResults().size() == 0) {
                            // there's no info we can get back with a delete, so not much we can check without further synchronization
                        } else if (response.getResults().size() == 1) {
                            final SolrDocument actual = response.getResults().get(0);
                            final String msg = "Realtime=" + realTime + ", expected=" + expected + ", actual="
                                    + actual;
                            assertNotNull(msg, actual);

                            final Long foundVersion = (Long) actual.getFieldValue("_version_");
                            assertNotNull(msg, foundVersion);
                            assertTrue(msg + "... solr doc has non-positive version???",
                                    0 < foundVersion.longValue());
                            final Integer intVal = (Integer) actual.getFieldValue("val1_i_dvo");
                            assertNotNull(msg, intVal);

                            final Long longVal = (Long) actual.getFieldValue("val2_l_dvo");
                            assertNotNull(msg, longVal);

                            assertTrue(msg + " ...solr returned older version then model. "
                                    + "should not be possible given the order of operations in writer threads",
                                    Math.abs(expected.version) <= foundVersion.longValue());

                            if (foundVersion.longValue() == expected.version) {
                                assertEquals(msg, expected.intFieldValue, intVal.intValue());
                                assertEquals(msg, expected.longFieldValue, longVal.longValue());
                            }

                            // Some things we can assert about any Doc returned from solr,
                            // even if it's newer then our (expected) model information...

                            assertTrue(msg + " ...how did a doc in solr get a non positive intVal?",
                                    0 < intVal);
                            assertTrue(msg + " ...how did a doc in solr get a non positive longVal?",
                                    0 < longVal);
                            assertEquals(msg
                                    + " ...intVal and longVal in solr doc are internally (modulo) inconsistent w/eachother",
                                    0, (longVal % intVal));

                            // NOTE: when foundVersion is greater then the version read from the model,
                            // it's not possible to make any assertions about the field values in solr relative to the
                            // field values in the model -- ie: we can *NOT* assert expected.longFieldVal <= doc.longVal
                            //
                            // it's tempting to think that this would be possible if we changed our model to preserve the
                            // "old" valuess when doing a delete, but that's still no garuntee because of how oportunistic
                            // concurrency works with negative versions:  When adding a doc, we can assert that it must not
                            // exist with version<0, but we can't assert that the *reason* it doesn't exist was because of
                            // a delete with the specific version of "-42".
                            // So a wrtier thread might (1) prep to add a doc for the first time with "intValue=1,_version_=-1",
                            // and that add may succeed and (2) return some version X which is put in the model.  but
                            // inbetween #1 and #2 other threads may have added & deleted the doc repeatedly, updating
                            // the model with intValue=7,_version_=-42, and a reader thread might meanwhile read from the
                            // model before #2 and expect intValue=5, but get intValue=1 from solr (with a greater version)

                        } else {
                            fail(String.format(Locale.ENGLISH, "There were more than one result: {}",
                                    response));
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }
    // Start all threads
    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    { // final pass over uncommitted model with RTG

        for (SolrClient client : clients) {
            for (Map.Entry<Integer, DocInfo> entry : model.entrySet()) {
                final Integer id = entry.getKey();
                final DocInfo expected = entry.getValue();
                final SolrDocument actual = client.getById(id.toString());

                String msg = "RTG: " + id + "=" + expected;
                if (null == actual) {
                    // a deleted or non-existent document
                    // sanity check of the model agrees...
                    assertTrue(msg + " is deleted/non-existent in Solr, but model has non-neg version",
                            expected.version < 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.intFieldValue, 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.longFieldValue, 0);
                } else {
                    msg = msg + " <==VS==> " + actual;
                    assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                    assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                    assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                    assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);
                }
            }
        }
    }

    { // do a final search and compare every result with the model

        // because commits don't provide any sort of concrete versioning (or optimistic concurrency constraints)
        // there's no way to garuntee that our committedModel matches what was in Solr at the time of the last commit.
        // It's possible other threads made additional writes to solr before the commit was processed, but after
        // the committedModel variable was assigned it's new value.
        //
        // what we can do however, is commit all completed updates, and *then* compare solr search results
        // against the (new) committed model....

        waitForThingsToLevelOut(30); // NOTE: this does an automatic commit for us & ensures replicas are up to date
        committedModel = new HashMap<>(model);

        // first, prune the model of any docs that have negative versions
        // ie: were never actually added, or were ultimately deleted.
        for (int i = 0; i < ndocs; i++) {
            DocInfo info = committedModel.get(i);
            if (info.version < 0) {
                // first, a quick sanity check of the model itself...
                assertEquals("Inconsistent int value in model for deleted doc" + i + "=" + info, 0,
                        info.intFieldValue);
                assertEquals("Inconsistent long value in model for deleted doc" + i + "=" + info, 0L,
                        info.longFieldValue);

                committedModel.remove(i);
            }
        }

        for (SolrClient client : clients) {
            QueryResponse rsp = client.query(params("q", "*:*", "sort", "id asc", "rows", ndocs + ""));
            for (SolrDocument actual : rsp.getResults()) {
                final Integer id = Integer.parseInt(actual.getFieldValue("id").toString());
                final DocInfo expected = committedModel.get(id);

                assertNotNull("Doc found but missing/deleted from model: " + actual, expected);

                final String msg = "Search: " + id + "=" + expected + " <==VS==> " + actual;
                assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);

                // also sanity check the model (which we already know matches the doc)
                assertEquals("Inconsistent (modulo) values in model for id " + id + "=" + expected, 0,
                        (expected.longFieldValue % expected.intFieldValue));
            }
            assertEquals(committedModel.size(), rsp.getResults().getNumFound());
        }
    }
}

From source file:org.lightjason.agentspeak.action.buildin.math.CNextPrime.java

@Override
public final IFuzzyValue<Boolean> execute(final IContext p_context, final boolean p_parallel,
        final List<ITerm> p_argument, final List<ITerm> p_return, final List<ITerm> p_annotation) {
    CCommon.flatcollection(p_argument).map(ITerm::<Number>raw).mapToLong(i -> Primes.nextPrime(i.intValue()))
            .boxed().map(CRawTerm::from).forEach(p_return::add);

    return CFuzzyValue.from(true);
}

From source file:org.lightjason.agentspeak.action.builtin.math.CNextPrime.java

@Nonnull
@Override/*from  ww  w  .  java2 s  .  c om*/
public final IFuzzyValue<Boolean> execute(final boolean p_parallel, @Nonnull final IContext p_context,
        @Nonnull final List<ITerm> p_argument, @Nonnull final List<ITerm> p_return) {
    CCommon.flatten(p_argument).map(ITerm::<Number>raw).mapToDouble(i -> Primes.nextPrime(i.intValue())).boxed()
            .map(CRawTerm::from).forEach(p_return::add);

    return CFuzzyValue.from(true);
}