Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServerTest.java

@Test
public void testConcurrentUpdate() throws Exception {
    TestServlet.clear();//w w w  .j a v  a  2  s  .  c  o  m

    String serverUrl = jetty.getBaseUrl().toString() + "/cuss/foo";

    int cussThreadCount = 2;
    int cussQueueSize = 100;

    // for tracking callbacks from CUSS
    final AtomicInteger successCounter = new AtomicInteger(0);
    final AtomicInteger errorCounter = new AtomicInteger(0);
    final StringBuilder errors = new StringBuilder();

    @SuppressWarnings("serial")
    ConcurrentUpdateSolrServer cuss = new ConcurrentUpdateSolrServer(serverUrl, cussQueueSize,
            cussThreadCount) {
        @Override
        public void handleError(Throwable ex) {
            errorCounter.incrementAndGet();
            errors.append(" " + ex);
        }

        @Override
        public void onSuccess(HttpResponse resp) {
            successCounter.incrementAndGet();
        }
    };

    cuss.setParser(new BinaryResponseParser());
    cuss.setRequestWriter(new BinaryRequestWriter());
    cuss.setPollQueueTime(0);

    // ensure it doesn't block where there's nothing to do yet
    cuss.blockUntilFinished();

    int poolSize = 5;
    ExecutorService threadPool = Executors.newFixedThreadPool(poolSize,
            new SolrjNamedThreadFactory("testCUSS"));

    int numDocs = 100;
    int numRunnables = 5;
    for (int r = 0; r < numRunnables; r++)
        threadPool.execute(new SendDocsRunnable(String.valueOf(r), numDocs, cuss));

    // ensure all docs are sent
    threadPool.awaitTermination(5, TimeUnit.SECONDS);
    threadPool.shutdown();

    // wait until all requests are processed by CUSS 
    cuss.blockUntilFinished();
    cuss.shutdownNow();

    assertEquals("post", TestServlet.lastMethod);

    // expect all requests to be successful
    int expectedSuccesses = TestServlet.numReqsRcvd.get();
    assertTrue(expectedSuccesses > 0); // at least one request must have been sent

    assertTrue("Expected no errors but got " + errorCounter.get() + ", due to: " + errors.toString(),
            errorCounter.get() == 0);
    assertTrue("Expected " + expectedSuccesses + " successes, but got " + successCounter.get(),
            successCounter.get() == expectedSuccesses);

    int expectedDocs = numDocs * numRunnables;
    assertTrue("Expected CUSS to send " + expectedDocs + " but got " + TestServlet.numDocsRcvd.get(),
            TestServlet.numDocsRcvd.get() == expectedDocs);
}

From source file:org.apache.hadoop.lib.service.instrumentation.TestInstrumentationService.java

@Test
@TestDir//from w w  w. j a va  2  s .  c  om
@SuppressWarnings("unchecked")
public void sampling() throws Exception {
    String dir = TestDirHelper.getTestDir().getAbsolutePath();
    String services = StringUtils.join(",",
            Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName()));
    Configuration conf = new Configuration(false);
    conf.set("server.services", services);
    Server server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    Instrumentation instrumentation = server.get(Instrumentation.class);

    final AtomicInteger count = new AtomicInteger();

    Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
        @Override
        public Long getValue() {
            return (long) count.incrementAndGet();
        }
    };
    instrumentation.addSampler("g", "s", 10, varToSample);

    sleep(2000);
    int i = count.get();
    assertTrue(i > 0);

    Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
    Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
    InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
    assertTrue(sampler.getRate() > 0);

    server.destroy();
}

From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java

@Test
public void testKillSession() throws Exception {
    final Timing timing = new Timing();

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();//from   w w w.jav  a  2  s .c o m
    try {
        final Semaphore semaphore = new Semaphore(0);
        final CountDownLatch interruptedLatch = new CountDownLatch(1);
        final AtomicInteger leaderCount = new AtomicInteger(0);
        LeaderSelectorListener listener = new LeaderSelectorListener() {
            private volatile Thread ourThread;

            @Override
            public void takeLeadership(CuratorFramework client) throws Exception {
                leaderCount.incrementAndGet();
                try {
                    ourThread = Thread.currentThread();
                    semaphore.release();
                    try {
                        Thread.sleep(1000000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        interruptedLatch.countDown();
                    }
                } finally {
                    leaderCount.decrementAndGet();
                }
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if ((newState == ConnectionState.LOST) && (ourThread != null)) {
                    ourThread.interrupt();
                }
            }
        };
        LeaderSelector leaderSelector1 = new LeaderSelector(client, PATH_NAME, listener);
        LeaderSelector leaderSelector2 = new LeaderSelector(client, PATH_NAME, listener);

        leaderSelector1.start();
        leaderSelector2.start();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));

        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());

        Assert.assertTrue(timing.awaitLatch(interruptedLatch));
        timing.sleepABit();

        leaderSelector1.requeue();
        leaderSelector2.requeue();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));
        Assert.assertEquals(leaderCount.get(), 1);

        leaderSelector1.close();
        leaderSelector2.close();
    } finally {
        client.close();
    }
}

From source file:com.spectralogic.ds3client.metadata.MetadataAccessImpl_Test.java

@Test
public void testMetadataAccessFailureHandlerWithEventHandler() throws IOException, InterruptedException {
    Assume.assumeFalse(Platform.isWindows());

    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    final String fileName = "Gracie.txt";

    final Path filePath = Files.createFile(Paths.get(tempDirectory.toString(), fileName));

    try {//from ww  w.  j  a  va  2s  .  com
        tempDirectory.toFile().setExecutable(false);

        final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder();

        fileMapper.put(filePath.toString(), filePath);

        final AtomicInteger numTimesFailureHandlerCalled = new AtomicInteger(0);

        try (final InputStream inputStream = Runtime.getRuntime().exec("ls -lR").getInputStream()) {
            LOG.info(IOUtils.toString(inputStream));
        }
        new MetadataAccessImpl(fileMapper.build(), new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numTimesFailureHandlerCalled.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.RecordingMetadata, failureEvent.doingWhat());
            }
        }, "localhost").getMetadataValue("forceAFailureByUsingANonExistentFileBecauseTheDockerImageRunsAsRoot");

        assertEquals(1, numTimesFailureHandlerCalled.get());
    } finally {
        tempDirectory.toFile().setExecutable(true);
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.cassandra.db.commitlog.CommitLogReader.java

/**
 * Deserializes and passes a Mutation to the ICommitLogReadHandler requested
 *
 * @param handler Handler that will take action based on deserialized Mutations
 * @param inputBuffer raw byte array w/Mutation data
 * @param size deserialized size of mutation
 * @param minPosition We need to suppress replay of mutations that are before the required minPosition
 * @param entryLocation filePointer offset of mutation within CommitLogSegment
 * @param desc CommitLogDescriptor being worked on
 *///  w  w  w  . j  a v  a2  s  . co m
@VisibleForTesting
protected void readMutation(CommitLogReadHandler handler, byte[] inputBuffer, int size,
        CommitLogPosition minPosition, final int entryLocation, final CommitLogDescriptor desc)
        throws IOException {
    // For now, we need to go through the motions of deserializing the mutation to determine its size and move
    // the file pointer forward accordingly, even if we're behind the requested minPosition within this SyncSegment.
    boolean shouldReplay = entryLocation > minPosition.position;

    final Mutation mutation;
    try (RebufferingInputStream bufIn = new DataInputBuffer(inputBuffer, 0, size)) {
        mutation = Mutation.serializer.deserialize(bufIn, desc.getMessagingVersion(),
                SerializationHelper.Flag.LOCAL);
        // doublecheck that what we read is still] valid for the current schema
        for (PartitionUpdate upd : mutation.getPartitionUpdates())
            upd.validate();
    } catch (UnknownColumnFamilyException ex) {
        if (ex.cfId == null)
            return;
        AtomicInteger i = invalidMutations.get(ex.cfId);
        if (i == null) {
            i = new AtomicInteger(1);
            invalidMutations.put(ex.cfId, i);
        } else
            i.incrementAndGet();
        return;
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        File f = File.createTempFile("mutation", "dat");

        try (DataOutputStream out = new DataOutputStream(new FileOutputStream(f))) {
            out.write(inputBuffer, 0, size);
        }

        // Checksum passed so this error can't be permissible.
        handler.handleUnrecoverableError(new CommitLogReadException(
                String.format("Unexpected error deserializing mutation; saved to %s.  "
                        + "This may be caused by replaying a mutation against a table with the same name but incompatible schema.  "
                        + "Exception follows: %s", f.getAbsolutePath(), t),
                CommitLogReadErrorReason.MUTATION_ERROR, false));
        return;
    }

    if (logger.isTraceEnabled())
        logger.trace("Read mutation for {}.{}: {}", mutation.getKeyspaceName(), mutation.key(),
                "{" + StringUtils.join(mutation.getPartitionUpdates().iterator(), ", ") + "}");

    if (shouldReplay)
        handler.handleMutation(mutation, size, entryLocation, desc);
}

From source file:org.apache.blur.shell.QueryCommand.java

private void count(Row row, AtomicInteger rowCount, AtomicInteger recordCount, AtomicInteger columnCount,
        AtomicInteger columnSize) {
    rowCount.incrementAndGet();
    List<Record> records = row.getRecords();
    if (records != null) {
        for (Record r : records) {
            count(r, recordCount, columnCount, columnSize);
        }/*from   w ww.j a va  2  s  .c om*/
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestSuppressTransientExceptions.java

void runWithErrors(final int failures, final IOException e, HDFSOperationType type) throws IOException {
    final AtomicInteger count = new AtomicInteger(0);
    HopsTransactionalRequestHandler completeFileHandler = new HopsTransactionalRequestHandler(type) {
        @Override/*  w w  w.  j  a va  2 s .c  om*/
        public void acquireLock(TransactionLocks locks) throws IOException {
        }

        @Override
        public Object performTask() throws IOException {
            if (count.get() >= failures) {
                return null;
            } else {
                count.incrementAndGet();
                throw e;
            }
        }
    };

    completeFileHandler.handle(this);
}

From source file:de.codesourcery.eve.skills.ui.utils.ParallelUITasksRunner.java

public static boolean submitParallelTasks(final ApplicationThreadManager threadManager, final UITask parent,
        UITask... children) {//from  ww w.ja va 2  s  . c  om
    final AtomicInteger childSuccesses = new AtomicInteger(0);
    final AtomicInteger childFailures = new AtomicInteger(0);

    if (parent == null) {
        throw new IllegalArgumentException("parent task cannot be NULL");
    }

    if (ArrayUtils.isEmpty(children)) {
        throw new IllegalArgumentException("Need to provide at least one child task");
    }
    final CyclicBarrier startBarrier = new CyclicBarrier(children.length + 1) {
        @Override
        public void reset() {
            System.out.println("========== resetting start barrier =========== ");
            super.reset();
        }
    };

    final CountDownLatch childrenTerminated = new CountDownLatch(children.length);

    int submittedChildren = 0;
    for (final UITask child : children) {

        final UITask wrapped = new UITask() {

            @Override
            public void successHook() throws Exception {
                boolean success = false;
                try {
                    child.successHook();
                    success = true;
                } finally {
                    if (success) {
                        childSuccesses.incrementAndGet();
                    } else {
                        childFailures.incrementAndGet();
                    }
                    childrenTerminated.countDown();
                }
            }

            @Override
            public void beforeExecution() {
                child.beforeExecution();
                // note: when this method throws an exception , #failure() is invoked
            }

            @Override
            public void setEnabledHook(boolean yesNo) {
                child.setEnabledHook(yesNo);

            }

            @Override
            public void failureHook(Throwable t) throws Exception {
                try {
                    child.failureHook(t);
                } finally {
                    childFailures.incrementAndGet();
                    childrenTerminated.countDown();
                }
            }

            @Override
            public void cancellationHook() {
                try {
                    child.cancellationHook();
                } finally {
                    childFailures.incrementAndGet();
                    childrenTerminated.countDown();
                }
            }

            @Override
            public String getId() {
                return child.getId();
            }

            @Override
            public void run() throws Exception {
                try {
                    if (log.isTraceEnabled()) {
                        log.trace("run(): Child task " + getId() + " is now waiting.");
                    }
                    startBarrier.await(); // will BrokenBarrierException if any of the child tasks could not be started,
                } catch (InterruptedException e) {
                    log.error("run(): Child task " + getId() + " was interrupted");
                    Thread.currentThread().interrupt();
                } catch (BrokenBarrierException e) {
                    log.error("run(): Child task" + getId() + " aborted, barrier broken.");
                    throw new RuntimeException(
                            "Child task not started because another child task failed submitTask()");
                }

                if (log.isTraceEnabled()) {
                    log.trace("run(): Child task " + getId() + " is now running.");
                }
                child.run();
            }
        };

        if (null == threadManager.submitTask(wrapped, false)) {
            log.error("submitParallelTasks(): Failed to submit child " + child);

            // note: I wait for (submittedChildren-childFailures) because some 
            // child task may have already failed before reaching their run() method
            while (startBarrier.getNumberWaiting() != (submittedChildren - childFailures.get())) {
                log.info("submitParallelTasks(): Waiting for all child tasks to reach barrier ( "
                        + startBarrier.getNumberWaiting() + " waiting)");
                try {
                    java.lang.Thread.sleep(500);
                } catch (Exception e) {
                }
                ;
            }

            startBarrier.reset(); // will cause all child threads waiting on this barrier to terminate
            return false;
        }
        submittedChildren++;
    }

    /*
     * All children are submitted and waiting at the barrier.
     */
    final boolean parentSubmitted = null != threadManager.submitTask("Control thread of " + parent.getId(),
            new Runnable() {

                @Override
                public void run() {
                    try {

                        while (true) {
                            try {
                                log.debug("run(): Parent task " + parent.getId()
                                        + " is waiting for it's children to start...");
                                startBarrier.await(5, TimeUnit.SECONDS);
                                break;
                            } catch (TimeoutException e) {
                                if (childFailures.get() != 0) {
                                    runFailureHookOnEDT(parent,
                                            childFailures.get() + " child tasks of parent task "
                                                    + parent.getId() + " failed to start.");
                                    return;
                                }
                            }
                        }
                    } catch (InterruptedException e) {
                        runFailureHookOnEDT(parent, "Parent task " + parent.getId()
                                + " was interrupted while waiting" + " for it's children to start.");
                        startBarrier.reset(); // let children fail.
                        Thread.currentThread().interrupt();
                        return;
                    } catch (BrokenBarrierException e) {
                        runFailureHookOnEDT(parent,
                                "Parent task " + parent.getId() + " failed to wait for it's children");
                        throw new RuntimeException("Internal error - task " + parent.getId()
                                + " failed to wait for it's children?");
                    }

                    log.debug("run(): Task " + parent.getId() + " is waiting for it's children to finish");
                    try {
                        childrenTerminated.await();
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        runFailureHookOnEDT(parent, "Parent task " + parent.getId()
                                + " was interrupted while waiting for it's children");
                        return;
                    }

                    log.info("run(): All children of parent task " + parent.getId()
                            + " have finished ( success: " + childSuccesses.get() + " / failure: "
                            + childFailures.get() + ")");

                    if (childFailures.get() > 0) {
                        runFailureHookOnEDT(parent, childFailures.get() + " child tasks of parent "
                                + parent.getId() + " have FAILED.");
                        return;
                    }

                    if (null == threadManager.submitTask(parent, false)) {
                        runFailureHookOnEDT(parent, "Failed to submit parent task " + parent.getId());
                        return;
                    }

                }
            }, false);

    if (!parentSubmitted) {
        log.debug("submitParallelTasks(): Failed to submit parent task " + parent.getId()
                + " , terminating child tasks.");
        startBarrier.reset(); // aborts all child tasks with a BrokenBarrierException
    }

    return parentSubmitted;
}

From source file:com.alibaba.dubbo.demo.consumer.DemoAction.java

public void start() throws Exception {
    int threads = 100;

    final DescriptiveStatistics stats = new SynchronizedDescriptiveStatistics();

    DubboBenchmark.BenchmarkMessage msg = prepareArgs();
    final byte[] msgBytes = msg.toByteArray();

    int n = 1000000;
    final CountDownLatch latch = new CountDownLatch(n);

    ExecutorService es = Executors.newFixedThreadPool(threads);

    final AtomicInteger trans = new AtomicInteger(0);
    final AtomicInteger transOK = new AtomicInteger(0);

    long start = System.currentTimeMillis();
    for (int i = 0; i < n; i++) {
        es.submit(() -> {//from w w w  . ja  v  a  2  s. co m
            try {

                long t = System.currentTimeMillis();
                DubboBenchmark.BenchmarkMessage m = testSay(msgBytes);
                t = System.currentTimeMillis() - t;
                stats.addValue(t);

                trans.incrementAndGet();

                if (m != null && m.getField1().equals("OK")) {
                    transOK.incrementAndGet();
                }

            } catch (InterruptedException e) {
                e.printStackTrace();
            } finally {
                latch.countDown();
            }
        });
    }

    latch.await();

    start = System.currentTimeMillis() - start;

    System.out.printf("sent     requests    : %d\n", n);
    System.out.printf("received requests    : %d\n", trans.get());
    System.out.printf("received requests_OK : %d\n", transOK.get());
    System.out.printf("throughput  (TPS)    : %d\n", n * 1000 / start);

    System.out.printf("mean: %f\n", stats.getMean());
    System.out.printf("median: %f\n", stats.getPercentile(50));
    System.out.printf("max: %f\n", stats.getMax());
    System.out.printf("min: %f\n", stats.getMin());

    System.out.printf("99P: %f\n", stats.getPercentile(90));
}

From source file:com.streamsets.pipeline.stage.cloudstorage.destination.GoogleCloudStorageTarget.java

@Override
public void write(Batch batch) throws StageException {
    String pathExpression = GcsUtil.normalizePrefix(gcsTargetConfig.commonPrefix)
            + gcsTargetConfig.partitionTemplate;
    if (gcsTargetConfig.dataFormat == DataFormat.WHOLE_FILE) {
        handleWholeFileFormat(batch, elVars);
    } else {//from  www .ja  v  a 2 s. co  m
        Multimap<String, Record> pathToRecordMap = ELUtils.partitionBatchByExpression(partitionEval, elVars,
                pathExpression, timeDriverElEval, elVars, gcsTargetConfig.timeDriverTemplate,
                Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(gcsTargetConfig.timeZoneID))), batch);

        pathToRecordMap.keySet().forEach(path -> {
            Collection<Record> records = pathToRecordMap.get(path);
            String fileName = GcsUtil.normalizePrefix(path) + gcsTargetConfig.fileNamePrefix + '_'
                    + UUID.randomUUID();
            if (StringUtils.isNotEmpty(gcsTargetConfig.fileNameSuffix)) {
                fileName = fileName + "." + gcsTargetConfig.fileNameSuffix;
            }
            try {
                ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                OutputStream os = bOut;
                if (gcsTargetConfig.compress) {
                    fileName = fileName + ".gz";
                    os = new GZIPOutputStream(bOut);
                }
                BlobId blobId = BlobId.of(gcsTargetConfig.bucketTemplate, fileName);
                BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(getContentType()).build();
                final AtomicInteger recordsWithoutErrors = new AtomicInteger(0);
                try (DataGenerator dg = gcsTargetConfig.dataGeneratorFormatConfig.getDataGeneratorFactory()
                        .getGenerator(os)) {
                    records.forEach(record -> {
                        try {
                            dg.write(record);
                            recordsWithoutErrors.incrementAndGet();
                        } catch (DataGeneratorException | IOException e) {
                            LOG.error("Error writing record {}. Reason {}", record.getHeader().getSourceId(),
                                    e);
                            getContext().toError(record, Errors.GCS_02, record.getHeader().getSourceId(), e);
                        }
                    });
                } catch (IOException e) {
                    LOG.error("Error happened when creating Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }

                try {
                    if (recordsWithoutErrors.get() > 0) {
                        Blob blob = storage.create(blobInfo, bOut.toByteArray());
                        GCSEvents.GCS_OBJECT_WRITTEN.create(getContext())
                                .with(GCSEvents.BUCKET, blob.getBucket())
                                .with(GCSEvents.OBJECT_KEY, blob.getName())
                                .with(GCSEvents.RECORD_COUNT, recordsWithoutErrors.longValue()).createAndSend();
                    }
                } catch (StorageException e) {
                    LOG.error("Error happened when writing to Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }
            } catch (IOException e) {
                LOG.error("Error happened when creating Output stream. Reason {}", e);
                records.forEach(record -> getContext().toError(record, e));
            }
        });
    }
}