Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.springframework.cloud.gateway.filter.WeightCalculatorWebFilter.java

void addWeightConfig(WeightConfig weightConfig) {
    String group = weightConfig.getGroup();
    GroupWeightConfig c = groupWeights.get(group);
    if (c == null) {
        c = new GroupWeightConfig(group);
        groupWeights.put(group, c);/*from   w w  w  .j av  a2  s .  com*/
    }
    GroupWeightConfig config = c;
    config.weights.put(weightConfig.getRouteId(), weightConfig.getWeight());

    // recalculate

    // normalize weights
    int weightsSum = config.weights.values().stream().mapToInt(Integer::intValue).sum();

    final AtomicInteger index = new AtomicInteger(0);
    config.weights.forEach((routeId, weight) -> {
        Double nomalizedWeight = weight / (double) weightsSum;
        config.normalizedWeights.put(routeId, nomalizedWeight);

        // recalculate rangeIndexes
        config.rangeIndexes.put(index.getAndIncrement(), routeId);
    });

    // TODO: calculate ranges
    config.ranges.clear();

    config.ranges.add(0.0);

    List<Double> values = new ArrayList<>(config.normalizedWeights.values());
    for (int i = 0; i < values.size(); i++) {
        Double currentWeight = values.get(i);
        Double previousRange = config.ranges.get(i);
        Double range = previousRange + currentWeight;
        config.ranges.add(range);
    }

    if (log.isTraceEnabled()) {
        log.trace("Recalculated group weight config " + config);
    }
}

From source file:com.simiacryptus.mindseye.applications.ObjectLocationBase.java

/**
 * Run.//w  w  w . ja v  a 2 s  . c om
 *
 * @param log the log
 */
public void run(@Nonnull final NotebookOutput log) {
    //    @Nonnull String logName = "cuda_" + log.getName() + ".log";
    //    log.p(log.file((String) null, logName, "GPU Log"));
    //    CudaSystem.addLog(new PrintStream(log.file(logName)));

    ImageClassifierBase classifier = getClassifierNetwork();
    Layer classifyNetwork = classifier.getNetwork();

    ImageClassifierBase locator = getLocatorNetwork();
    Layer locatorNetwork = locator.getNetwork();
    ArtistryUtil.setPrecision((DAGNetwork) classifyNetwork, Precision.Float);
    ArtistryUtil.setPrecision((DAGNetwork) locatorNetwork, Precision.Float);

    Tensor[][] inputData = loadImages_library();
    //    Tensor[][] inputData = loadImage_Caltech101(log);
    double alphaPower = 0.8;

    final AtomicInteger index = new AtomicInteger(0);
    Arrays.stream(inputData).limit(10).forEach(row -> {
        log.h3("Image " + index.getAndIncrement());
        final Tensor img = row[0];
        log.p(log.image(img.toImage(), ""));
        Result classifyResult = classifyNetwork.eval(new MutableResult(row));
        Result locationResult = locatorNetwork.eval(new MutableResult(row));
        Tensor classification = classifyResult.getData().get(0);
        List<CharSequence> categories = classifier.getCategories();
        int[] sortedIndices = IntStream.range(0, categories.size()).mapToObj(x -> x)
                .sorted(Comparator.comparing(i -> -classification.get(i))).mapToInt(x -> x).limit(10).toArray();
        logger.info(Arrays.stream(sortedIndices)
                .mapToObj(
                        i -> String.format("%s: %s = %s%%", i, categories.get(i), classification.get(i) * 100))
                .reduce((a, b) -> a + "\n" + b).orElse(""));
        LinkedHashMap<CharSequence, Tensor> vectors = new LinkedHashMap<>();
        List<CharSequence> predictionList = Arrays.stream(sortedIndices).mapToObj(categories::get)
                .collect(Collectors.toList());
        Arrays.stream(sortedIndices).limit(6).forEach(category -> {
            CharSequence name = categories.get(category);
            log.h3(name);
            Tensor alphaTensor = renderAlpha(alphaPower, img, locationResult, classification, category);
            log.p(log.image(img.toRgbImageAlphaMask(0, 1, 2, alphaTensor), ""));
            vectors.put(name, alphaTensor.unit());
        });

        Tensor avgDetection = vectors.values().stream().reduce((a, b) -> a.add(b)).get()
                .scale(1.0 / vectors.size());
        Array2DRowRealMatrix covarianceMatrix = new Array2DRowRealMatrix(predictionList.size(),
                predictionList.size());
        for (int x = 0; x < predictionList.size(); x++) {
            for (int y = 0; y < predictionList.size(); y++) {
                Tensor l = vectors.get(predictionList.get(x));
                Tensor r = vectors.get(predictionList.get(y));

                covarianceMatrix.setEntry(x, y,
                        null == l || null == r ? 0 : (l.minus(avgDetection)).dot(r.minus(avgDetection)));
            }
        }
        @Nonnull
        final EigenDecomposition decomposition = new EigenDecomposition(covarianceMatrix);

        for (int objectVector = 0; objectVector < 10; objectVector++) {
            log.h3("Eigenobject " + objectVector);
            double eigenvalue = decomposition.getRealEigenvalue(objectVector);
            RealVector eigenvector = decomposition.getEigenvector(objectVector);
            Tensor detectionRegion = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                Tensor tensor = vectors.get(predictionList.get(i));
                return null == tensor ? null : tensor.scale(eigenvector.getEntry(i));
            }).filter(x -> null != x).reduce((a, b) -> a.add(b)).get();
            detectionRegion = detectionRegion.scale(255.0 / detectionRegion.rms());
            CharSequence categorization = IntStream.range(0, eigenvector.getDimension()).mapToObj(i -> {
                CharSequence category = predictionList.get(i);
                double component = eigenvector.getEntry(i);
                return String.format("<li>%s = %.4f</li>", category, component);
            }).reduce((a, b) -> a + "" + b).get();
            log.p(String.format("Object Detected: <ol>%s</ol>", categorization));
            log.p("Object Eigenvalue: " + eigenvalue);
            log.p("Object Region: " + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion), ""));
            log.p("Object Region Compliment: "
                    + log.image(img.toRgbImageAlphaMask(0, 1, 2, detectionRegion.scale(-1)), ""));
        }

        //      final int[] orderedVectors = IntStream.range(0, 10).mapToObj(x -> x)
        //        .sorted(Comparator.comparing(x -> -decomposition.getRealEigenvalue(x))).mapToInt(x -> x).toArray();
        //      IntStream.range(0, orderedVectors.length)
        //        .mapToObj(i -> {
        //            //double realEigenvalue = decomposition.getRealEigenvalue(orderedVectors[i]);
        //            return decomposition.getEigenvector(orderedVectors[i]).toArray();
        //          }
        //        ).toArray(i -> new double[i][]);

        log.p(String.format(
                "<table><tr><th>Cosine Distance</th>%s</tr>%s</table>", Arrays.stream(sortedIndices).limit(10)
                        .mapToObj(col -> "<th>" + categories.get(col) + "</th>").reduce((a, b) -> a + b).get(),
                Arrays.stream(sortedIndices).limit(10).mapToObj(r -> {
                    return String.format("<tr><td>%s</td>%s</tr>", categories.get(r),
                            Arrays.stream(sortedIndices).limit(10).mapToObj(col -> {
                                Tensor l = vectors.get(categories.get(r));
                                Tensor r2 = vectors.get(categories.get(col));
                                return String.format("<td>%.4f</td>",
                                        (null == l || null == r2) ? 0 : Math.acos(l.dot(r2)));
                            }).reduce((a, b) -> a + b).get());
                }).reduce((a, b) -> a + b).orElse("")));
    });

    log.setFrontMatterProperty("status", "OK");
}

From source file:org.languagetool.server.PipelinePool.java

Pipeline getPipeline(PipelineSettings settings) throws Exception {
    if (pool != null) {
        // expire old pipelines in queues (where settings may be used, but some of the created pipelines are unused)
        long expireCheckDelta = System.currentTimeMillis() - pipelineExpireCheckTimestamp;
        if (expireCheckDelta > PIPELINE_EXPIRE_TIME) {
            AtomicInteger removed = new AtomicInteger();
            pipelineExpireCheckTimestamp = System.currentTimeMillis();
            //pool.asMap().forEach((s, queue) -> queue.removeIf(Pipeline::isExpired));
            pool.asMap().forEach((s, queue) -> queue.removeIf(pipeline -> {
                if (pipeline.isExpired()) {
                    removed.getAndIncrement();
                    return true;
                } else {
                    return false;
                }//ww w  .  j a v a2  s. co m
            }));
            ServerTools.print("Removing " + removed.get() + " expired pipelines");
        }

        requests++;
        ConcurrentLinkedQueue<Pipeline> pipelines = pool.get(settings);
        if (requests % 1000 == 0) {
            ServerTools.print(
                    String.format("Pipeline cache stats: %f hit rate", (double) pipelinesUsed / requests));
        }
        Pipeline pipeline = pipelines.poll();
        if (pipeline == null) {
            //ServerTools.print(String.format("No prepared pipeline found for %s; creating one.", settings));
            pipeline = createPipeline(settings.lang, settings.motherTongue, settings.query,
                    settings.globalConfig, settings.user);
        } else {
            pipelinesUsed++;
            //ServerTools.print(String.format("Prepared pipeline found for %s; using it.", settings));
        }
        return pipeline;
    } else {
        return createPipeline(settings.lang, settings.motherTongue, settings.query, settings.globalConfig,
                settings.user);
    }
}

From source file:org.fcrepo.client.ConnectionManagementTest.java

/**
 * Demonstrates that are connections are NOT released if the user of the FcrepoClient does not handle the response
 * body at all.//from w  ww.  java 2s  .c  o  m
 */
@Test
public void connectionNotReleasedWhenEntityBodyIgnored() {
    final int expectedCount = (int) Stream.of(HttpMethods.values()).filter(m -> m.entity).count();
    final AtomicInteger actualCount = new AtomicInteger(0);
    final MockHttpExpectations.Uris uri = uris.uri200;

    Stream.of(HttpMethods.values()).filter(method -> method.entity).forEach(method -> {
        connect(client, uri, method, null);
        actualCount.getAndIncrement();
    });

    assertEquals("Expected to make " + expectedCount + " connections; made " + actualCount.get(), expectedCount,
            actualCount.get());
    verifyConnectionRequestedButNotClosed(actualCount.get(), connectionManager);
}

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 *
 * Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start {numThreads} worker threads to fetch queries from the queue and send them.
 * We start with the start QPS, and keep adding delta QPS to the start QPS during the test. The main thread is
 * responsible for collecting the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.//from ww  w  . j av a 2s  .  c  o  m
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS
 * @param deltaQPS delta QPS
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numThreads,
        double startQPS, double deltaQPS) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int timePerTargetQPSMillis = 60000;
    final int queueLengthThreshold = Math.max(20, (int) startQPS);

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }
    final int numQueries = queries.size();

    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    double currentQPS = startQPS;
    int intervalMillis = (int) (MILLIS_PER_SECOND / currentQPS);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                while (true) {
                    String query = queryQueue.poll();
                    if (query == null) {
                        try {
                            Thread.sleep(1);
                            continue;
                        } catch (InterruptedException e) {
                            LOGGER.error("Interrupted.", e);
                            return;
                        }
                    }
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(System.currentTimeMillis() - startTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
            }
        });
    }

    LOGGER.info("Start with QPS: {}, delta QPS: {}", startQPS, deltaQPS);
    while (true) {
        long startTime = System.currentTimeMillis();
        while (System.currentTimeMillis() - startTime <= timePerTargetQPSMillis) {
            if (queryQueue.size() > queueLengthThreshold) {
                executorService.shutdownNow();
                throw new RuntimeException("Cannot achieve target QPS of: " + currentQPS);
            }
            queryQueue.add(queries.get(random.nextInt(numQueries)));
            Thread.sleep(intervalMillis);
        }
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.getAndSet(0);
        double avgResponseTime = ((double) totalResponseTime.getAndSet(0)) / count;
        LOGGER.info("Target QPS: {}, Interval: {}ms, Actual QPS: {}, Avg Response Time: {}ms", currentQPS,
                intervalMillis, count / timePassedSeconds, avgResponseTime);

        // Find a new interval
        int newIntervalMillis;
        do {
            currentQPS += deltaQPS;
            newIntervalMillis = (int) (MILLIS_PER_SECOND / currentQPS);
        } while (newIntervalMillis == intervalMillis);
        intervalMillis = newIntervalMillis;
    }
}

From source file:org.apache.hadoop.hive.ql.exec.tez.TestHostAffinitySplitLocationProvider.java

private double testHashDistribution(int locs, final int missCount, FileSplit[] splits,
        AtomicInteger errorCount) {
    // This relies heavily on what method determineSplits ... calls and doesn't.
    // We could do a wrapper with only size() and get() methods instead of List, to be sure.
    @SuppressWarnings("unchecked")
    List<String> partLocs = (List<String>) Mockito.mock(List.class);
    Mockito.when(partLocs.size()).thenReturn(locs);
    final AtomicInteger state = new AtomicInteger(0);
    Mockito.when(partLocs.get(Mockito.anyInt())).thenAnswer(new Answer<String>() {
        @Override//from  www. j a v  a 2  s  . co m
        public String answer(InvocationOnMock invocation) throws Throwable {
            return (state.getAndIncrement() == missCount) ? "not-null" : null;
        }
    });
    int[] hitCounts = new int[locs];
    for (int splitIx = 0; splitIx < splits.length; ++splitIx) {
        state.set(0);
        int index = HostAffinitySplitLocationProvider.determineLocation(partLocs,
                splits[splitIx].getPath().toString(), splits[splitIx].getStart(), null);
        ++hitCounts[index];
    }
    SummaryStatistics ss = new SummaryStatistics();
    for (int hitCount : hitCounts) {
        ss.addValue(hitCount);
    }
    // All of this is completely bogus and mostly captures the following function:
    // f(output) = I-eyeballed-the(output) == they-look-ok.
    // It's pretty much a golden file... 
    // The fact that stdev doesn't increase with increasing missCount is captured outside.
    double avg = ss.getSum() / ss.getN(), stdev = ss.getStandardDeviation(), cv = stdev / avg;
    double allowedMin = avg - 2.5 * stdev, allowedMax = avg + 2.5 * stdev;
    if (allowedMin > ss.getMin() || allowedMax < ss.getMax() || cv > 0.22) {
        LOG.info("The distribution for " + locs + " locations, " + missCount + " misses isn't to "
                + "our liking: avg " + avg + ", stdev " + stdev + ", cv " + cv + ", min " + ss.getMin()
                + ", max " + ss.getMax());
        errorCount.incrementAndGet();
    }
    return cv;
}

From source file:com.metamx.rdiclient.RdiClientImplTest.java

@Test
public void testExceptionOnPostRecoverable() throws Exception {

    final ObjectMapper objectMapper = new ObjectMapper().registerModule(new JodaModule());
    final Serializer<MmxAuctionSummary> serializer = new JacksonSerializer<>(objectMapper);
    final RdiClientImpl<MmxAuctionSummary> rdiClient = makeRdiClient(serializer, 1);

    final AtomicInteger failures = new AtomicInteger(0);
    mockClient.setGoHandler(new GoHandler() {
        @Override/*  www  .  j av  a 2  s  .  c  o m*/
        protected <Intermediate, Final> ListenableFuture<Final> go(Request<Intermediate, Final> request)
                throws Exception {
            if (failures.getAndIncrement() == 0) {
                return Futures.immediateFailedFuture(new IOException("Something Crazy Happened!"));
            } else {
                return Futures.immediateFuture((Final) okResponse());
            }
        }
    }.times(2));
    rdiClient.start();

    final ListenableFuture<RdiResponse> result = rdiClient.send(sampleEventBasic);
    Assert.assertEquals(RdiResponse.create(), result.get());
    rdiClient.close();
}

From source file:org.nuxeo.ecm.platform.ui.web.application.NuxeoServerSideStateHelper.java

/**
 * @param ctx the <code>FacesContext</code> for the current request
 * @return a unique ID for building the keys used to store views within a session
 *///  www .  j a  v  a  2  s.  com
private String createIncrementalRequestId(FacesContext ctx) {
    Map<String, Object> sm = ctx.getExternalContext().getSessionMap();
    AtomicInteger idgen = (AtomicInteger) sm.get(STATEMANAGED_SERIAL_ID_KEY);
    if (idgen == null) {
        idgen = new AtomicInteger(1);
    }
    // always call put/setAttribute as we may be in a clustered environment.
    sm.put(STATEMANAGED_SERIAL_ID_KEY, idgen);
    return (UIViewRoot.UNIQUE_ID_PREFIX + idgen.getAndIncrement());

}

From source file:org.fcrepo.client.ConnectionManagementTest.java

/**
 * Demonstrates that are connections are released when the user of the FcrepoClient reads the HTTP entity body.
 *///  w  ww. j  ava2  s  . c  om
@Test
public void connectionReleasedOnEntityBodyRead() {
    final int expectedCount = (int) Stream.of(HttpMethods.values()).filter(m -> m.entity).count();
    final AtomicInteger actualCount = new AtomicInteger(0);
    final MockHttpExpectations.Uris uri = uris.uri200;

    Stream.of(HttpMethods.values()).filter(method -> method.entity).forEach(method -> {
        connect(client, uri, method, FcrepoResponseHandler.readEntityBody);
        actualCount.getAndIncrement();
    });

    assertEquals("Expected to make " + expectedCount + " connections; made " + actualCount.get(), expectedCount,
            actualCount.get());
    verifyConnectionRequestedAndClosed(actualCount.get(), connectionManager);
}

From source file:org.fcrepo.client.ConnectionManagementTest.java

/**
 * Demonstrates that HTTP connections are released when the user of the FcrepoClient closes the HTTP entity body.
 * Each method of the FcrepoClient (get, put, post, etc.) is tested.
 *///from  w  ww  .j av  a  2s .  c  o m
@Test
public void connectionReleasedOnEntityBodyClose() {
    final int expectedCount = (int) Stream.of(HttpMethods.values()).filter(m -> m.entity).count();
    final AtomicInteger actualCount = new AtomicInteger(0);
    final MockHttpExpectations.Uris uri = uris.uri200;

    Stream.of(HttpMethods.values()).filter(method -> method.entity).forEach(method -> {
        connect(client, uri, method, FcrepoResponseHandler.closeEntityBody);
        actualCount.getAndIncrement();
    });

    assertEquals("Expected to make " + expectedCount + " connections; made " + actualCount.get(), expectedCount,
            actualCount.get());
    verifyConnectionRequestedAndClosed(actualCount.get(), connectionManager);
}