Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:com.streamsets.pipeline.stage.cloudstorage.destination.GoogleCloudStorageTarget.java

@Override
public void write(Batch batch) throws StageException {
    String pathExpression = GcsUtil.normalizePrefix(gcsTargetConfig.commonPrefix)
            + gcsTargetConfig.partitionTemplate;
    if (gcsTargetConfig.dataFormat == DataFormat.WHOLE_FILE) {
        handleWholeFileFormat(batch, elVars);
    } else {//from  w w  w .j  av  a 2  s.  co m
        Multimap<String, Record> pathToRecordMap = ELUtils.partitionBatchByExpression(partitionEval, elVars,
                pathExpression, timeDriverElEval, elVars, gcsTargetConfig.timeDriverTemplate,
                Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(gcsTargetConfig.timeZoneID))), batch);

        pathToRecordMap.keySet().forEach(path -> {
            Collection<Record> records = pathToRecordMap.get(path);
            String fileName = GcsUtil.normalizePrefix(path) + gcsTargetConfig.fileNamePrefix + '_'
                    + UUID.randomUUID();
            if (StringUtils.isNotEmpty(gcsTargetConfig.fileNameSuffix)) {
                fileName = fileName + "." + gcsTargetConfig.fileNameSuffix;
            }
            try {
                ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                OutputStream os = bOut;
                if (gcsTargetConfig.compress) {
                    fileName = fileName + ".gz";
                    os = new GZIPOutputStream(bOut);
                }
                BlobId blobId = BlobId.of(gcsTargetConfig.bucketTemplate, fileName);
                BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(getContentType()).build();
                final AtomicInteger recordsWithoutErrors = new AtomicInteger(0);
                try (DataGenerator dg = gcsTargetConfig.dataGeneratorFormatConfig.getDataGeneratorFactory()
                        .getGenerator(os)) {
                    records.forEach(record -> {
                        try {
                            dg.write(record);
                            recordsWithoutErrors.incrementAndGet();
                        } catch (DataGeneratorException | IOException e) {
                            LOG.error("Error writing record {}. Reason {}", record.getHeader().getSourceId(),
                                    e);
                            getContext().toError(record, Errors.GCS_02, record.getHeader().getSourceId(), e);
                        }
                    });
                } catch (IOException e) {
                    LOG.error("Error happened when creating Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }

                try {
                    if (recordsWithoutErrors.get() > 0) {
                        Blob blob = storage.create(blobInfo, bOut.toByteArray());
                        GCSEvents.GCS_OBJECT_WRITTEN.create(getContext())
                                .with(GCSEvents.BUCKET, blob.getBucket())
                                .with(GCSEvents.OBJECT_KEY, blob.getName())
                                .with(GCSEvents.RECORD_COUNT, recordsWithoutErrors.longValue()).createAndSend();
                    }
                } catch (StorageException e) {
                    LOG.error("Error happened when writing to Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }
            } catch (IOException e) {
                LOG.error("Error happened when creating Output stream. Reason {}", e);
                records.forEach(record -> getContext().toError(record, e));
            }
        });
    }
}

From source file:com.twitter.distributedlog.config.TestConfigurationSubscription.java

@Test(timeout = 60000)
public void testExceptionInConfigLoad() throws Exception {
    PropertiesWriter writer = new PropertiesWriter();
    writer.setProperty("prop1", "1");
    writer.save();// w  ww . j  a v  a 2 s  .  co  m

    DeterministicScheduler mockScheduler = new DeterministicScheduler();
    FileConfigurationBuilder builder = new PropertiesConfigurationBuilder(writer.getFile().toURI().toURL());
    ConcurrentConstConfiguration conf = new ConcurrentConstConfiguration(new DistributedLogConfiguration());
    List<FileConfigurationBuilder> fileConfigBuilders = Lists.newArrayList(builder);
    ConfigurationSubscription confSub = new ConfigurationSubscription(conf, fileConfigBuilders, mockScheduler,
            100, TimeUnit.MILLISECONDS);

    final AtomicInteger count = new AtomicInteger(1);
    conf.addConfigurationListener(new ConfigurationListener() {
        @Override
        public void configurationChanged(ConfigurationEvent event) {
            LOG.info("config changed {}", event);
            // Throw after so we actually see the update anyway.
            if (!event.isBeforeUpdate()) {
                count.getAndIncrement();
                throw new RuntimeException("config listener threw and exception");
            }
        }
    });

    int i = 0;
    int initial = 0;
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }

    initial = count.get();
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }
}

From source file:fi.luontola.cqrshotel.framework.EventStoreContract.java

@Test
public void concurrent_writers_to_same_stream() throws Exception {
    final int BATCH_SIZE = 10;
    final int ITERATIONS = 100;

    UUID streamId = UUID.randomUUID();
    long initialPosition = eventStore.getCurrentPosition();
    AtomicInteger taskIdSeq = new AtomicInteger(0);

    repeatInParallel(ITERATIONS, () -> {
        int taskId = taskIdSeq.incrementAndGet();
        List<Event> batch = createBatch(BATCH_SIZE, taskId);

        while (true) {
            try {
                int version1 = eventStore.getCurrentVersion(streamId);
                eventStore.saveEvents(streamId, batch, version1);
                return;
            } catch (OptimisticLockingException e) {
                // retry
            }//ww  w . ja v  a2s .  c om
        }
    }, createRuntimeInvariantChecker(BATCH_SIZE));

    List<Event> streamEvents = eventStore.getEventsForStream(streamId);
    assertThat("number of saved events", streamEvents.size(), is(BATCH_SIZE * ITERATIONS));
    assertAtomicBatches(BATCH_SIZE, streamEvents);
    List<Event> allEvents = eventStore.getAllEvents(initialPosition);
    assertThat("global order should equal stream order", allEvents, contains(streamEvents.toArray()));
}

From source file:jurls.core.becca.DefaultZiptie.java

public double getRowGeneralizedMean(RealMatrix c, Function<Integer, Double> rowEntryMultiplier, double exponent,
        int rowStart, int rowEnd, int column) {
    AtomicDouble s = new AtomicDouble(0);
    AtomicInteger n = new AtomicInteger(0);
    c.walkInOptimizedOrder(new DefaultRealMatrixPreservingVisitor() {
        @Override//from   ww w.  j  av a2  s  .com
        public void visit(int row, int column, double value) {
            double a = Math.pow(value, exponent);
            double b = rowEntryMultiplier.apply(row);
            s.addAndGet(a * b);
            n.incrementAndGet();
        }
    }, rowStart, rowEnd, column, column);

    return (1.0 / n.doubleValue()) * Math.pow(s.doubleValue(), 1.0 / exponent);
}

From source file:com.nextdoor.bender.operation.conditional.ConditionalOperation.java

public Stream<InternalEvent> getOutputStream(Stream<InternalEvent> input) {
    /*//  w  w w  . jav  a 2s  .  c  om
     * outputStreams keeps track of the output Stream of each Condition.
     */
    List<Stream<InternalEvent>> outputStreams = new ArrayList<Stream<InternalEvent>>(
            this.conditionsAndProcs.size());

    /*
     * From a list of operation configurations in each condition construct queues and streams.
     */
    this.filtersAndQueues = new ArrayList<Pair<FilterOperation, Queue<InternalEvent>>>(
            this.conditionsAndProcs.size());
    for (Pair<FilterOperation, List<OperationProcessor>> filterAndProcs : this.conditionsAndProcs) {

        FilterOperation filter = filterAndProcs.getLeft();
        List<OperationProcessor> procs = filterAndProcs.getRight();

        /*
         * Construct a Queue for each conditional. This is the input to each Condition.
         */
        Queue<InternalEvent> queue = new Queue<InternalEvent>(
                new LinkedBlockingQueue<InternalEvent>(procs.size()));

        this.filtersAndQueues.add(new ImmutablePair<FilterOperation, Queue<InternalEvent>>(filter, queue));

        /*
         * Connect the condition's input Queue with operations. Each operation returns a stream with its
         * operation concatenated on.
         */
        Stream<InternalEvent> conditionInput = queue.jdkStream();
        for (OperationProcessor proc : procs) {
            conditionInput = proc.perform(conditionInput);
        }

        /*
         * Last input is the output.
         */
        outputStreams.add(conditionInput);
    }

    /*
     * Condition Consumer Threads
     * 
     * Combine each condition's output stream and write to the output Queue. When all data is consumed
     * the last condition closes the output Queue.
     */
    Queue<InternalEvent> outputQueue = new Queue<InternalEvent>(
            new LinkedBlockingQueue<InternalEvent>(this.conditionsAndProcs.size()));
    AtomicInteger lock = new AtomicInteger(outputStreams.size());

    outputStreams.forEach(stream -> {
        this.es.execute(new StreamToQueue(stream, outputQueue, lock));
    });

    /*
     * Consume input Stream in a thread and publish to each condition's Queue.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            input.forEach(ievent -> {
                boolean matches = false;

                for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                    FilterOperation filter = filterAndQueue.getLeft();

                    /*
                     * If event passes the filter offer event to queue.
                     */
                    if (filter.test(ievent)) {
                        filterAndQueue.getRight().offer(ievent);
                        matches = true;
                        break;
                    }
                }

                /*
                 * Send to output queue if no case matches
                 */
                if (!matches && !filterNonMatch) {
                    outputQueue.offer(ievent);
                }
            });

            /*
             * Close queues when source queue is consumed.
             */
            for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                filterAndQueue.getRight().close();
            }
        }
    }).start();

    return outputQueue.jdkStream();
}

From source file:com.ibm.crail.tools.CrailFsck.java

private void incStats(HashMap<String, AtomicInteger> stats, String host) {
    if (!stats.containsKey(host)) {
        stats.put(host, new AtomicInteger(0));
    }//from  w  ww .  j  av  a 2  s .  c  o  m
    stats.get(host).incrementAndGet();
}

From source file:com.neoteric.starter.metrics.report.elastic.ElasticsearchReporter.java

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
        SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters,
        SortedMap<String, Timer> timers) {

    // nothing to do if we dont have any metrics to report
    if (gauges.isEmpty() && counters.isEmpty() && histograms.isEmpty() && meters.isEmpty()
            && timers.isEmpty()) {
        LOGGER.info("All metrics empty, nothing to report");
        return;/*from w w  w.j a  va  2  s  . c o  m*/
    }

    if (!checkedForIndexTemplate) {
        checkForIndexTemplate();
    }
    final long timestamp = clock.getTime() / 1000;

    currentIndexName = index;
    if (indexDateFormat != null) {
        currentIndexName += "-" + indexDateFormat.format(new Date(timestamp * 1000));
    }

    try {
        HttpURLConnection connection;
        try {
            connection = openConnection("/_bulk", "POST");
        } catch (ElasticsearchConnectionException e) {
            LOGGER.error("Could not connect to any configured elasticsearch instances: {}",
                    Arrays.asList(hosts), e);
            return;
        }

        List<JsonMetric> percolationMetrics = new ArrayList<>();
        AtomicInteger entriesWritten = new AtomicInteger(0);

        for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
            if (entry.getValue().getValue() != null) {
                JsonMetric jsonMetric = new JsonGauge(name(prefix, entry.getKey()), timestamp,
                        entry.getValue());
                connection = writeJsonMetricAndRecreateConnectionIfNeeded(jsonMetric, connection,
                        entriesWritten);
                addJsonMetricToPercolationIfMatching(jsonMetric, percolationMetrics);
            }
        }

        for (Map.Entry<String, Counter> entry : counters.entrySet()) {
            JsonCounter jsonMetric = new JsonCounter(name(prefix, entry.getKey()), timestamp, entry.getValue());
            connection = writeJsonMetricAndRecreateConnectionIfNeeded(jsonMetric, connection, entriesWritten);
            addJsonMetricToPercolationIfMatching(jsonMetric, percolationMetrics);
        }

        for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
            JsonHistogram jsonMetric = new JsonHistogram(name(prefix, entry.getKey()), timestamp,
                    entry.getValue());
            connection = writeJsonMetricAndRecreateConnectionIfNeeded(jsonMetric, connection, entriesWritten);
            addJsonMetricToPercolationIfMatching(jsonMetric, percolationMetrics);
        }

        for (Map.Entry<String, Meter> entry : meters.entrySet()) {
            JsonMeter jsonMetric = new JsonMeter(name(prefix, entry.getKey()), timestamp, entry.getValue());
            connection = writeJsonMetricAndRecreateConnectionIfNeeded(jsonMetric, connection, entriesWritten);
            addJsonMetricToPercolationIfMatching(jsonMetric, percolationMetrics);
        }

        for (Map.Entry<String, Timer> entry : timers.entrySet()) {
            JsonTimer jsonMetric = new JsonTimer(name(prefix, entry.getKey()), timestamp, entry.getValue());
            connection = writeJsonMetricAndRecreateConnectionIfNeeded(jsonMetric, connection, entriesWritten);
            addJsonMetricToPercolationIfMatching(jsonMetric, percolationMetrics);
        }

        closeConnection(connection);

        // execute the notifier impl, in case percolation found matches
        if (percolationMetrics.size() > 0 && notifier != null) {
            for (JsonMetric jsonMetric : percolationMetrics) {
                List<String> matches = getPercolationMatches(jsonMetric);
                for (String match : matches) {
                    notifier.notify(jsonMetric, match);
                }
            }
        }
        // catch the exception to make sure we do not interrupt the live application
    } catch (ElasticsearchConnectionException e) {
        LOGGER.error("Couldnt report to elasticsearch server", e);
    } catch (IOException e) {
        LOGGER.error("Couldnt report to elasticsearch server", e);
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testCluster() throws Exception {
    final int QTY = 20;
    final int OPERATION_TIME_MS = 1000;
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    final Timing timing = new Timing();
    TestingCluster cluster = new TestingCluster(3);
    List<SemaphoreClient> semaphoreClients = Lists.newArrayList();
    try {//  www .  j a v  a 2  s  .  c  o  m
        cluster.start();

        final AtomicInteger opCount = new AtomicInteger(0);
        for (int i = 0; i < QTY; ++i) {
            SemaphoreClient semaphoreClient = new SemaphoreClient(cluster.getConnectString(), PATH,
                    new Callable<Void>() {
                        @Override
                        public Void call() throws Exception {
                            opCount.incrementAndGet();
                            Thread.sleep(OPERATION_TIME_MS);
                            return null;
                        }
                    });
            completionService.submit(semaphoreClient);
            semaphoreClients.add(semaphoreClient);
        }

        timing.forWaiting().sleepABit();

        Assert.assertNotNull(SemaphoreClient.getActiveClient());

        final CountDownLatch latch = new CountDownLatch(1);
        CuratorFramework client = CuratorFrameworkFactory.newClient(cluster.getConnectString(),
                timing.session(), timing.connection(), new ExponentialBackoffRetry(100, 3));
        ConnectionStateListener listener = new ConnectionStateListener() {
            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if (newState == ConnectionState.LOST) {
                    latch.countDown();
                }
            }
        };
        client.getConnectionStateListenable().addListener(listener);
        client.start();
        try {
            client.getZookeeperClient().blockUntilConnectedOrTimedOut();

            cluster.stop();

            latch.await();
        } finally {
            IOUtils.closeQuietly(client);
        }

        long startTicks = System.currentTimeMillis();
        for (;;) {
            int thisOpCount = opCount.get();
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (thisOpCount == opCount.get()) {
                break; // checking that the op count isn't increasing
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }

        int thisOpCount = opCount.get();

        Iterator<InstanceSpec> iterator = cluster.getInstances().iterator();
        cluster = new TestingCluster(iterator.next(), iterator.next());
        cluster.start();
        timing.forWaiting().sleepABit();

        startTicks = System.currentTimeMillis();
        for (;;) {
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (opCount.get() > thisOpCount) {
                break; // checking that semaphore has started working again
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }
    } finally {
        for (SemaphoreClient semaphoreClient : semaphoreClients) {
            IOUtils.closeQuietly(semaphoreClient);
        }
        IOUtils.closeQuietly(cluster);
        executorService.shutdownNow();
    }
}

From source file:org.dasein.cloud.azure.tests.network.AzureVlanSupportTest.java

@Test
public void removeVlanShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock//from   ww w.j a  va2  s.  c  o m
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")
                    && VIRTUAL_NETWORK_SITES_URL.equals(request.getURI().toString())) {
                DaseinObjectToXmlEntity<VirtualNetworkSitesModel> daseinEntity = new DaseinObjectToXmlEntity<VirtualNetworkSitesModel>(
                        createVirtualNetworkSitesModel(ID, NAME, REGION, CIDR, "Updating"));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("GET".equals(request.getMethod())
                    && NETWORK_CONFIG_URL.equals(request.getURI().toString())) {
                DaseinObjectToXmlEntity<NetworkConfigurationModel> daseinEntity = new DaseinObjectToXmlEntity<NetworkConfigurationModel>(
                        createNetworkConfigurationModel(NAME, REGION, CIDR));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("PUT".equals(request.getMethod())) {
                putCount.incrementAndGet();
                NetworkConfigurationModel networkConfigurationModel = createNetworkConfigurationModel(null,
                        null, null);
                assertPut(request, NETWORK_CONFIG_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        networkConfigurationModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }

        }
    };
    vlanSupport.removeVlan(ID);
    assertEquals("removeVlan PUT network config should perform only 1 times", 1, putCount.get());
}

From source file:de.fosd.jdime.artifact.file.FileArtifact.java

/**
 * Constructs a new <code>FileArtifact</code> representing the given <code>File</code>.
 *
 * @param revision//from w ww.  ja  v a2s.co m
 *         the <code>Revision</code> the artifact belongs to
 * @param file
 *         the <code>File</code> in which the artifact is stored
 * @param recursive
 *         If <code>file</code> is a directory then <code>FileArtifact</code>s representing its contents will be
 *         added as children to this <code>FileArtifact</code>.
 * @throws IllegalArgumentException
 *         if {@code file} does not exist
 */
public FileArtifact(Revision revision, File file, boolean recursive) {
    this(revision, new AtomicInteger(0)::getAndIncrement, file, recursive);
}