Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:io.microprofile.showcase.speaker.persistence.SpeakerDAO.java

@PostConstruct
private void initStore() {
    Logger.getLogger(SpeakerDAO.class.getName()).log(Level.INFO, "Initialise speaker DAO from bootstrap data");

    final Set<Speaker> featured = new HashSet<>(0);

    for (final Venue venue : this.venues) {
        featured.addAll(venue.getSpeakers());
    }/*from  w w  w.  ja v a2s  . c  o m*/

    final AtomicInteger idc = new AtomicInteger(0);

    this.bootstrapData.getSpeaker().forEach(bootstrap -> {

        final int intId = Integer.valueOf(bootstrap.getId());

        if (intId > idc.get()) {
            idc.set(intId);
        }

        final String id = String.valueOf(intId);
        final String[] names = bootstrap.getFullName().split(" ");
        final Speaker sp = new Speaker();
        sp.setId(id);
        sp.setNameFirst(names[0].trim());
        sp.setNameLast(names[1].trim());
        sp.setOrganization(bootstrap.getCompany());
        sp.setBiography(bootstrap.getJobTitle());

        sp.setPicture("assets/images/unknown.jpg");

        appendFeatured(featured, sp);

        this.speakers.put(id, sp);
    });

    for (final Speaker fs : featured) {

        boolean found = false;

        for (final Speaker sp : this.speakers.values()) {
            if (fs.getNameFirst().toLowerCase().equals(sp.getNameFirst().toLowerCase())
                    && fs.getNameLast().toLowerCase().equals(sp.getNameLast().toLowerCase())) {
                found = true;
                break;
            }
        }

        if (!found) {
            fs.setId(String.valueOf(idc.incrementAndGet()));
            this.speakers.put(fs.getId(), fs);
        }
    }

    //TODO - Merge back to source json
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids.//from   ww  w. ja  v  a2  s. c  o  m
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flush(true);
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.shutdown();
    runWALSplit(this.conf);
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());

    // Next test.  Add more edits, then 'crash' this region by stealing its wal
    // out from under it and assert that replay of the log adds the edits back
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            WAL wal3 = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
                @Override
                protected boolean restoreEdit(Store s, Cell cell) {
                    boolean b = super.restoreEdit(s, cell);
                    countOfRestoredEdits.incrementAndGet();
                    return b;
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());

            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.close();
            return null;
        }
    });
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldFireKeepAlive() throws Exception {
    final AtomicInteger keepAliveEventCounter = new AtomicInteger();
    final AtomicReference<ChannelHandlerContext> ctxRef = new AtomicReference();

    QueryHandler testHandler = new QueryHandler(endpoint, responseRingBuffer, queue, false) {
        @Override/*from   w ww  .  j av a  2 s.  c  o m*/
        public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
            super.channelRegistered(ctx);
            ctxRef.compareAndSet(null, ctx);
        }

        @Override
        protected void onKeepAliveFired(ChannelHandlerContext ctx, CouchbaseRequest keepAliveRequest) {
            assertEquals(1, keepAliveEventCounter.incrementAndGet());
        }

        @Override
        protected void onKeepAliveResponse(ChannelHandlerContext ctx, CouchbaseResponse keepAliveResponse) {
            assertEquals(2, keepAliveEventCounter.incrementAndGet());
        }
    };
    EmbeddedChannel channel = new EmbeddedChannel(testHandler);

    //test idle event triggers a query keepAlive request and hook is called
    testHandler.userEventTriggered(ctxRef.get(), IdleStateEvent.FIRST_ALL_IDLE_STATE_EVENT);

    assertEquals(1, keepAliveEventCounter.get());
    assertTrue(queue.peek() instanceof QueryHandler.KeepAliveRequest);
    QueryHandler.KeepAliveRequest keepAliveRequest = (QueryHandler.KeepAliveRequest) queue.peek();

    //test responding to the request with http response is interpreted into a KeepAliveResponse and hook is called
    HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
    LastHttpContent responseEnd = new DefaultLastHttpContent();
    channel.writeInbound(response, responseEnd);
    QueryHandler.KeepAliveResponse keepAliveResponse = keepAliveRequest.observable()
            .cast(QueryHandler.KeepAliveResponse.class).timeout(1, TimeUnit.SECONDS).toBlocking().single();

    ReferenceCountUtil.releaseLater(response);
    ReferenceCountUtil.releaseLater(responseEnd);

    assertEquals(2, keepAliveEventCounter.get());
    assertEquals(ResponseStatus.NOT_EXISTS, keepAliveResponse.status());
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Create an HRegion with the result of a HLog split and test we only see the
 * good edits/*from   w  ww  .j a  v  a2  s . c  om*/
 * @throws Exception
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region2);
    final HLog wal = createWAL(this.conf);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();
    final AtomicLong sequenceId = new AtomicLong(1);

    // Add 1k to each family.
    final int countPerFamily = 1000;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, sequenceId);
    }

    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName);
    wal.completeCacheFlush(regionName);

    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTimeMillis();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, tableName, edit, now, htd, sequenceId);

    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTimeMillis();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, tableName, edit, now, htd, sequenceId);

    // Sync.
    wal.sync();
    // Set down maximum recovery so we dfsclient doesn't linger retrying something
    // long gone.
    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            HLog newWal = createWAL(newConf);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
                    protected FlushResult internalFlushcache(final HLog wal, final long myseqid,
                            MonitoredTask status) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid,
                                Mockito.mock(MonitoredTask.class));
                        flushcount.incrementAndGet();
                        return fs;
                    };
                };
                long seqid = region.initialize();
                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue(seqid - 1 == sequenceId.get());

                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.closeAndDelete();
            }
            return null;
        }
    });
}

From source file:ome.server.utests.MTPixelDataTest.java

public void testBasic() throws Exception {

    final AtomicInteger pixelsId = new AtomicInteger();
    final int numThreads = 4;

    // MT items//from   w  ww  .  j  a v  a  2  s .  c  o  m
    ExecutorService threads = Executors.newFixedThreadPool(numThreads);

    // nio mocks
    Mock boMock = mock(BackOff.class);
    BackOff backOff = (BackOff) boMock.proxy();
    Mock fprMock = mock(FilePathResolver.class);
    FilePathResolver resolver = (FilePathResolver) fprMock.proxy();
    fprMock.expects(atLeastOnce()).method("getOriginalFilePath").will(returnValue(tiny()));
    fprMock.expects(atLeastOnce()).method("getPixelsParams").will(returnValue(new HashMap<String, String>()));

    // nio settings
    String path = dir(uuid);
    TileSizes tileSizes = new ConfiguredTileSizes(5, 5, 10, 10);
    PixelsService service = new PixelsService(path, resolver, backOff, tileSizes);

    // session mocks
    Mock mgrMock = mock(SessionManager.class);
    Mock sqlMock = mock(SqlAction.class);
    SessionManager mgr = (SessionManager) mgrMock.proxy();
    Executor ex = new DummyExecutor(null, null, threads);
    SqlAction sql = (SqlAction) sqlMock.proxy();
    sqlMock.expects(atLeastOnce()).method("setStatsInfo").will(returnValue(1L));

    // pixeldata
    PersistentEventLogLoader loader = new PersistentEventLogLoader("REPO", numThreads) {
        @Override
        protected EventLog query() {
            long id = (long) pixelsId.incrementAndGet();
            EventLog log = new EventLog();
            log.setEntityId(id);
            return log;
        }
    };

    PixelDataHandler handler = new PixelDataHandler(loader, service) {
        @Override
        protected Pixels getPixels(Long id, ServiceFactory sf) {
            Pixels pix = new Pixels(id, true);
            pix.setSizeX(20);
            pix.setSizeY(20);
            pix.setSizeZ(5);
            pix.setSizeC(1);
            pix.setSizeT(6);
            pix.setDimensionOrder(new DimensionOrder("XYZCT"));
            pix.setPixelsType(new PixelsType("int16"));
            pix.addChannel(new Channel());
            return pix;
        }
    };
    handler.setSqlAction(sql);

    PixelDataThread thread = new PixelDataThread(true, mgr, ex, handler, new Principal("test"), uuid,
            numThreads) {
        @Override
        protected void onExecutionException(ExecutionException ee) {
            Throwable t = ee.getCause();
            if (t instanceof RuntimeException) {
                throw (RuntimeException) t;
            } else {
                throw new RuntimeException(t);
            }
        }
    };

    // test
    thread.doRun();
}

From source file:org.apache.nifi.processors.standard.ForkRecord.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();/*ww  w  .  ja  v a 2  s.c om*/
    if (flowFile == null) {
        return;
    }

    final List<RecordPath> recordPaths = new ArrayList<>();
    Map<PropertyDescriptor, String> processorProperties = context.getProperties();
    for (final Map.Entry<PropertyDescriptor, String> entry : processorProperties.entrySet()) {
        PropertyDescriptor property = entry.getKey();
        if (property.isDynamic() && property.isExpressionLanguageSupported()) {
            String path = context.getProperty(property).evaluateAttributeExpressions(flowFile).getValue();
            if (StringUtils.isNotBlank(path)) {
                recordPaths.add(recordPathCache.getCompiled(
                        context.getProperty(property).evaluateAttributeExpressions(flowFile).getValue()));
            }
        }
    }

    final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER)
            .asControllerService(RecordReaderFactory.class);
    final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER)
            .asControllerService(RecordSetWriterFactory.class);
    final boolean addParentFields = context.getProperty(INCLUDE_PARENT_FIELDS).asBoolean();
    final boolean isSplitMode = context.getProperty(MODE).getValue().equals(MODE_SPLIT.getValue());

    final FlowFile original = flowFile;
    final Map<String, String> originalAttributes = original.getAttributes();

    final FlowFile outFlowFile = session.create(original);
    final AtomicInteger readCount = new AtomicInteger(0);
    final AtomicInteger writeCount = new AtomicInteger(0);

    try {

        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream in) throws IOException {
                try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in,
                        getLogger())) {

                    final RecordSchema writeSchema = writerFactory.getSchema(originalAttributes,
                            reader.getSchema());
                    final OutputStream out = session.write(outFlowFile);

                    try (final RecordSetWriter recordSetWriter = writerFactory.createWriter(getLogger(),
                            writeSchema, out)) {

                        recordSetWriter.beginRecordSet();

                        // we read each record of the input flow file
                        Record record;
                        while ((record = reader.nextRecord()) != null) {

                            readCount.incrementAndGet();

                            for (RecordPath recordPath : recordPaths) {

                                // evaluate record path in each record of the flow file
                                Iterator<FieldValue> it = recordPath.evaluate(record).getSelectedFields()
                                        .iterator();

                                while (it.hasNext()) {
                                    FieldValue fieldValue = it.next();
                                    RecordFieldType fieldType = fieldValue.getField().getDataType()
                                            .getFieldType();

                                    // we want to have an array here, nothing else allowed
                                    if (fieldType != RecordFieldType.ARRAY) {
                                        getLogger().debug("The record path " + recordPath.getPath()
                                                + " is matching a field " + "of type " + fieldType
                                                + " when the type ARRAY is expected.");
                                        continue;
                                    }

                                    if (isSplitMode) {

                                        Object[] items = (Object[]) fieldValue.getValue();
                                        for (Object item : items) {
                                            fieldValue.updateValue(new Object[] { item });
                                            recordSetWriter.write(record);
                                        }

                                    } else {

                                        // we get the type of the elements of the array
                                        final ArrayDataType arrayDataType = (ArrayDataType) fieldValue
                                                .getField().getDataType();
                                        final DataType elementType = arrayDataType.getElementType();

                                        // we want to have records in the array
                                        if (elementType.getFieldType() != RecordFieldType.RECORD) {
                                            getLogger().debug("The record path " + recordPath.getPath()
                                                    + " is matching an array field with " + "values of type "
                                                    + elementType.getFieldType()
                                                    + " when the type RECORD is expected.");
                                            continue;
                                        }

                                        Object[] records = (Object[]) fieldValue.getValue();
                                        for (Object elementRecord : records) {

                                            if (elementRecord == null) {
                                                continue;
                                            }

                                            Record recordToWrite = (Record) elementRecord;

                                            if (addParentFields) {
                                                // in this case we want to recursively add the parent fields into the record to write
                                                // but we need to ensure that the Record has the appropriate schema for that
                                                recordToWrite.incorporateSchema(writeSchema);
                                                recursivelyAddParentFields(recordToWrite, fieldValue);
                                            }

                                            recordSetWriter.write(recordToWrite);
                                        }

                                    }

                                }

                            }
                        }

                        final WriteResult writeResult = recordSetWriter.finishRecordSet();

                        try {
                            recordSetWriter.close();
                        } catch (final IOException ioe) {
                            getLogger().warn("Failed to close Writer for {}", new Object[] { outFlowFile });
                        }

                        final Map<String, String> attributes = new HashMap<>();
                        writeCount.set(writeResult.getRecordCount());
                        attributes.put("record.count", String.valueOf(writeResult.getRecordCount()));
                        attributes.put(CoreAttributes.MIME_TYPE.key(), recordSetWriter.getMimeType());
                        attributes.putAll(writeResult.getAttributes());
                        session.transfer(session.putAllAttributes(outFlowFile, attributes), REL_FORK);
                    }

                } catch (final SchemaNotFoundException | MalformedRecordException e) {
                    throw new ProcessException("Could not parse incoming data: " + e.getLocalizedMessage(), e);
                }
            }

            private void recursivelyAddParentFields(Record recordToWrite, FieldValue fieldValue) {
                try {
                    // we get the parent data
                    FieldValue parentField = fieldValue.getParent().get();
                    Record parentRecord = fieldValue.getParentRecord().get();

                    // for each field of the parent
                    for (String field : parentRecord.getSchema().getFieldNames()) {
                        // if and only if there is not an already existing field with this name
                        // (we want to give priority to the deeper existing fields)
                        if (recordToWrite.getValue(field) == null) {
                            // Updates the value of the field with the given name to the given value.
                            // If the field specified is not present in the schema, will do nothing.
                            recordToWrite.setValue(field, parentRecord.getValue(field));
                        }
                    }

                    // recursive call
                    recursivelyAddParentFields(recordToWrite, parentField);
                } catch (NoSuchElementException e) {
                    return;
                }
            }
        });

    } catch (Exception e) {
        getLogger().error("Failed to fork {}", new Object[] { flowFile, e });
        session.remove(outFlowFile);
        session.transfer(original, REL_FAILURE);
        return;
    }

    session.adjustCounter("Records Processed", readCount.get(), false);
    session.adjustCounter("Records Generated", writeCount.get(), false);
    getLogger().debug("Successfully forked {} records into {} records in {}",
            new Object[] { readCount.get(), writeCount.get(), flowFile });
    session.transfer(original, REL_ORIGINAL);
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaIntegrationTest.java

@Ignore("long running test")
@Test/*from  w  w w  .ja va2s  .co  m*/
public void partitionAssignmentChange() throws InterruptedException {
    ServiceProperties serviceProperties = new ServiceProperties();
    serviceProperties.initialize(new String[] {}); // Reads environment variables set by DockerComposeHelper

    // Topics are created with 3 partitions - see docker-compose-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);
    final AtomicInteger sentMessages = new AtomicInteger(0);

    final AtomicInteger receivedMessagesConsumer1 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer1 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer2 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer2 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer3 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer3 = new CountDownLatch(1);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            try {
                while (produceMessages.get()) {
                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);

                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(250);
                }
            } catch (Throwable t) {
                logger.error("Exception in producer loop", t);
            }
        }
    });

    // Start first producer. It should get all 3 partitions assigned.
    Consumer consumer1 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer1.incrementAndGet();
                    firstMessageProcessedConsumer1.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until consumer 1 is up.
    firstMessageProcessedConsumer1.await();
    Thread.sleep(5000); // consume some messages

    // Now, start second processor. It should get at least one partition assigned.
    Consumer consumer2 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer2.incrementAndGet();
                    firstMessageProcessedConsumer2.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until the second consumer is up.
    firstMessageProcessedConsumer2.await();
    Thread.sleep(5000); // let both consumers run a bit

    brutallyKillConsumer("pool-14-thread-1"); // consumer2 thread, HACKY: if this is too brittle, change the test to shutdown()

    //Need to wait a bit longer while Kafka "restabilizes the group" after consumer 2 was killed.
    // -> Consumer 1 should now get all three partitions back again.
    Thread.sleep(30000); // must be > than max.poll.interval.ms

    // Now, start third processor. It should get at least one partition assigned.
    Consumer consumer3 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer3.incrementAndGet();
                    firstMessageProcessedConsumer3.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());
    firstMessageProcessedConsumer3.await();
    Thread.sleep(5000);

    // Now shut down the first consumer.
    consumer1.shutdown();
    Thread.sleep(10000);

    // Stop the producer.
    produceMessages.set(false);
    producer.shutdown();
    producerExecutor.shutdown();

    Thread.sleep(3000); // give the remaining consumer the chance to consume all messages
    consumer3.shutdown(); // no assignment any longer

    // Finally, the assertions:
    int receivedMessagesTotal = receivedMessagesConsumer1.get() + receivedMessagesConsumer2.get()
            + receivedMessagesConsumer3.get();
    assertEquals(sentMessages.get(), receivedMessagesTotal);

    assertTrue(receivedMessagesConsumer1.get() > 0);
    assertTrue(receivedMessagesConsumer2.get() > 0);
    assertTrue(receivedMessagesConsumer3.get() > 0);
}

From source file:org.eclipse.hono.service.credentials.impl.FileBasedCredentialsService.java

protected void loadCredentialsData() {
    if (filename != null) {
        final FileSystem fs = vertx.fileSystem();
        log.debug("trying to load credentials information from file {}", filename);
        if (fs.existsBlocking(filename)) {
            final AtomicInteger credentialsCount = new AtomicInteger();
            fs.readFile(filename, readAttempt -> {
                if (readAttempt.succeeded()) {
                    JsonArray allObjects = new JsonArray(new String(readAttempt.result().getBytes()));
                    for (Object obj : allObjects) {
                        JsonObject tenant = (JsonObject) obj;
                        String tenantId = tenant.getString(FIELD_TENANT);
                        Map<String, JsonArray> credentialsMap = new HashMap<>();
                        for (Object credentialsObj : tenant.getJsonArray(ARRAY_CREDENTIALS)) {
                            JsonObject credentials = (JsonObject) credentialsObj;
                            JsonArray authIdCredentials;
                            if (credentialsMap.containsKey(credentials.getString(FIELD_AUTH_ID))) {
                                authIdCredentials = credentialsMap.get(credentials.getString(FIELD_AUTH_ID));
                            } else {
                                authIdCredentials = new JsonArray();
                            }/*www  . ja  va2 s .c om*/
                            authIdCredentials.add(credentials);
                            credentialsMap.put(credentials.getString(FIELD_AUTH_ID), authIdCredentials);
                            credentialsCount.incrementAndGet();
                        }
                        credentials.put(tenantId, credentialsMap);
                    }
                    log.info("successfully loaded {} credentials from file [{}]", credentialsCount.get(),
                            filename);
                } else {
                    log.warn("could not load credentials from file [{}]", filename, readAttempt.cause());
                }
            });
        } else {
            log.debug("credentials file {} does not exist (yet)", filename);
        }
    }
}

From source file:org.apache.tez.dag.app.TestMockDAGAppMaster.java

@Test(timeout = 100000)
public void testConcurrencyLimit() throws Exception {
    // the test relies on local mode behavior of launching a new container per task.
    // so task concurrency == container concurrency
    TezConfiguration tezconf = new TezConfiguration(defaultConf);

    final int concurrencyLimit = 5;
    MockTezClient tezClient = new MockTezClient("testMockAM", tezconf, true, null, null, null, null, false,
            false, concurrencyLimit * 4, 1000);

    tezClient.start();//from  w w  w. j  a va  2 s  .c o m

    MockDAGAppMaster mockApp = tezClient.getLocalClient().getMockApp();
    MockContainerLauncher mockLauncher = mockApp.getContainerLauncher();
    mockLauncher.startScheduling(false);

    final AtomicInteger concurrency = new AtomicInteger(0);
    final AtomicBoolean exceededConcurrency = new AtomicBoolean(false);
    mockApp.containerDelegate = new ContainerDelegate() {
        @Override
        public void stop(ContainerStopRequest event) {
            concurrency.decrementAndGet();
        }

        @Override
        public void launch(ContainerLaunchRequest event) {
            int maxConc = concurrency.incrementAndGet();
            if (maxConc > concurrencyLimit) {
                exceededConcurrency.set(true);
            }
            System.out.println("Launched: " + maxConc);
        }
    };
    DAG dag = DAG.create("testConcurrencyLimit");
    Vertex vA = Vertex.create("A", ProcessorDescriptor.create("Proc.class"), 20)
            .setConf(TezConfiguration.TEZ_AM_VERTEX_MAX_TASK_CONCURRENCY, String.valueOf(concurrencyLimit));
    dag.addVertex(vA);

    mockLauncher.startScheduling(true);
    DAGClient dagClient = tezClient.submitDAG(dag);
    dagClient.waitForCompletion();
    Assert.assertEquals(DAGStatus.State.SUCCEEDED, dagClient.getDAGStatus(null).getState());
    Assert.assertFalse(exceededConcurrency.get());
    tezClient.stop();
}

From source file:org.apache.nifi.cluster.coordination.http.replication.TestThreadPoolRequestReplicator.java

@Test(timeout = 15000)
public void testOneNodeRejectsTwoPhaseCommit() {
    final Set<NodeIdentifier> nodeIds = new HashSet<>();
    nodeIds.add(new NodeIdentifier("1", "localhost", 8100, "localhost", 8101, "localhost", 8102, 8103, false));
    nodeIds.add(new NodeIdentifier("2", "localhost", 8200, "localhost", 8201, "localhost", 8202, 8203, false));

    final ClusterCoordinator coordinator = createClusterCoordinator();
    final AtomicInteger requestCount = new AtomicInteger(0);
    final ThreadPoolRequestReplicator replicator = new ThreadPoolRequestReplicator(2, new Client(), coordinator,
            "1 sec", "1 sec", null, null, NiFiProperties.createBasicNiFiProperties(null, null)) {
        @Override//w w w .  j  ava  2 s .co m
        protected NodeResponse replicateRequest(final WebResource.Builder resourceBuilder,
                final NodeIdentifier nodeId, final String method, final URI uri, final String requestId,
                Map<String, String> givenHeaders) {
            // the resource builder will not expose its headers to us, so we are using Mockito's Whitebox class to extract them.
            final OutBoundHeaders headers = (OutBoundHeaders) Whitebox.getInternalState(resourceBuilder,
                    "metadata");
            final Object expectsHeader = headers
                    .getFirst(ThreadPoolRequestReplicator.REQUEST_VALIDATION_HTTP_HEADER);

            final int requestIndex = requestCount.incrementAndGet();
            assertEquals(ThreadPoolRequestReplicator.NODE_CONTINUE, expectsHeader);

            if (requestIndex == 1) {
                final ClientResponse clientResponse = new ClientResponse(150, new InBoundHeaders(),
                        new ByteArrayInputStream(new byte[0]), null);
                return new NodeResponse(nodeId, method, uri, clientResponse, -1L, requestId);
            } else {
                final IllegalClusterStateException explanation = new IllegalClusterStateException(
                        "Intentional Exception for Unit Testing");
                return new NodeResponse(nodeId, method, uri, explanation);
            }
        }
    };

    try {
        // set the user
        final Authentication authentication = new NiFiAuthenticationToken(
                new NiFiUserDetails(StandardNiFiUser.ANONYMOUS));
        SecurityContextHolder.getContext().setAuthentication(authentication);

        final AsyncClusterResponse clusterResponse = replicator.replicate(nodeIds, HttpMethod.POST,
                new URI("http://localhost:80/processors/1"), new ProcessorEntity(), new HashMap<>(), true,
                true);
        clusterResponse.awaitMergedResponse();

        Assert.fail("Expected to get an IllegalClusterStateException but did not");
    } catch (final IllegalClusterStateException e) {
        // Expected
    } catch (final Exception e) {
        Assert.fail(e.toString());
    } finally {
        replicator.shutdown();
    }
}