Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

private void shouldDecodeChunked(String... chunks) throws Exception {
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    Object[] httpChunks = new Object[chunks.length + 1];
    httpChunks[0] = responseHeader;/*from  w w w .j a va  2 s.co  m*/
    for (int i = 1; i <= chunks.length; i++) {
        String chunk = chunks[i - 1];
        if (i == chunks.length) {
            httpChunks[i] = new DefaultLastHttpContent(Unpooled.copiedBuffer(chunk, CharsetUtil.UTF_8));
        } else {
            httpChunks[i] = new DefaultHttpContent(Unpooled.copiedBuffer(chunk, CharsetUtil.UTF_8));
        }
    }

    Subject<CouchbaseResponse, CouchbaseResponse> obs = AsyncSubject.create();
    GenericQueryRequest requestMock = mock(GenericQueryRequest.class);
    when(requestMock.observable()).thenReturn(obs);
    queue.add(requestMock);
    channel.writeInbound(httpChunks);
    GenericQueryResponse inbound = (GenericQueryResponse) obs.timeout(1, TimeUnit.SECONDS).toBlocking().last();

    final AtomicInteger found = new AtomicInteger(0);
    final AtomicInteger errors = new AtomicInteger(0);
    assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, "123456\\\"78901234567890", "success",
            "{\"horseName\":\"json\"}", new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf byteBuf) {
                    found.incrementAndGet();
                    String content = byteBuf.toString(CharsetUtil.UTF_8);
                    byteBuf.release();
                    assertNotNull(content);
                    assertTrue(!content.isEmpty());
                    try {
                        Map decoded = mapper.readValue(content, Map.class);
                        assertTrue(decoded.size() > 0);
                        assertTrue(decoded.containsKey("horseName"));
                    } catch (Exception e) {
                        assertTrue(false);
                    }
                }
            }, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    buf.release();
                    errors.incrementAndGet();
                }
            }, expectedMetricsCounts(5678, 1234) //these are the numbers parsed from metrics object, not real count
    );
    assertEquals(5, found.get());
    assertEquals(4, errors.get());
}

From source file:org.dasein.cloud.azure.tests.network.AzureIpAddressSupportTest.java

@Test
public void stopForwardToServerShouldPostCorrectRequestIfNoMatchEndpointFound()
        throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock(invocations = 2)//from   w  ww .  j  ava  2 s .  co  m
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")) {
                DaseinObjectToXmlEntity<PersistentVMRoleModel> daseinEntity = new DaseinObjectToXmlEntity<PersistentVMRoleModel>(
                        createPersistentVMRoleModelWithEndpoint());
                assertGet(request, EXPECTED_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if (request.getMethod().equals("PUT")) {
                putCount.incrementAndGet();
                PersistentVMRoleModel persistentVMRoleModel = createPersistentVMRoleModelWithEndpoint();
                assertPut(request, EXPECTED_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        persistentVMRoleModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_ACCEPTED), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };
    String ruleId = new AzureRuleIdParts(VM_ID, Protocol.TCP.toString(), String.valueOf(PRIVATE_PORT + 1))
            .toProviderId();
    ipAddressSupport.stopForwardToServer(ruleId, VM_ID);
    assertEquals("PUT count doesn't match", 1, putCount.get());
}

From source file:org.dasein.cloud.azure.tests.network.AzureIpAddressSupportTest.java

@Test
public void stopForwardToServerShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock(invocations = 2)//  w ww . j  a v a 2  s  .c om
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")) {
                DaseinObjectToXmlEntity<PersistentVMRoleModel> daseinEntity = new DaseinObjectToXmlEntity<PersistentVMRoleModel>(
                        createPersistentVMRoleModelWithEndpoint());
                assertGet(request, EXPECTED_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if (request.getMethod().equals("PUT")) {
                putCount.incrementAndGet();
                PersistentVMRoleModel persistentVMRoleModel = createPersistentVMRoleModelWithoutEndpoint();
                //set an empty list otherwise unitils will assert fail as one is null while another is empty list
                persistentVMRoleModel.getConfigurationSets().get(0)
                        .setInputEndpoints(new ArrayList<PersistentVMRoleModel.InputEndpoint>());
                assertPut(request, EXPECTED_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        persistentVMRoleModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_ACCEPTED), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };
    String ruleId = new AzureRuleIdParts(VM_ID, Protocol.TCP.toString(), String.valueOf(PRIVATE_PORT))
            .toProviderId();
    ipAddressSupport.stopForwardToServer(ruleId, VM_ID);
    assertEquals("PUT count doesn't match", 1, putCount.get());
}

From source file:org.dasein.cloud.azure.tests.network.AzureIpAddressSupportTest.java

@Test
public void forwardShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock(invocations = 2)/* www  . jav  a  2s. c o  m*/
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")) {
                DaseinObjectToXmlEntity<PersistentVMRoleModel> daseinEntity = new DaseinObjectToXmlEntity<PersistentVMRoleModel>(
                        createPersistentVMRoleModelWithoutEndpoint());
                assertGet(request, EXPECTED_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if (request.getMethod().equals("PUT")) {
                putCount.incrementAndGet();
                PersistentVMRoleModel persistentVMRoleModel = createPersistentVMRoleModelWithEndpoint();
                assertPut(request, EXPECTED_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        persistentVMRoleModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_ACCEPTED), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };

    String result = ipAddressSupport.forward("127.0.0.1", PUBLIC_PORT, PROTOCOL, PRIVATE_PORT, VM_ID);
    assertEquals("IpAddressSupport.forward() doesn't return correct result",
            new AzureRuleIdParts(VM_ID, Protocol.TCP.toString(), String.valueOf(PRIVATE_PORT)).toProviderId(),
            result);
    assertEquals("PUT count doesn't match", 1, putCount.get());
}

From source file:edu.brown.workload.Workload.java

/**
 * /*w w  w  .j a va  2 s  . c om*/
 * @param txn_id
 * @param catalog_stmt
 * @param catalog_statement
 * @param args
 * @return
 * @throws Exception
 */
@Override
public Object startQuery(Object xact_handle, Statement catalog_statement, Object args[], int batch_id) {
    QueryTrace query_handle = null;
    if (xact_handle instanceof TransactionTrace) {
        TransactionTrace xact = (TransactionTrace) xact_handle;
        long txn_id = xact.getTransactionId();

        if (this.ignored_xact_ids.contains(txn_id))
            return (null);

        Map<Integer, AtomicInteger> open_queries = this.xact_open_queries.get(txn_id);
        // HACK
        if (open_queries == null) {
            open_queries = new HashMap<Integer, AtomicInteger>();
            this.xact_open_queries.put(txn_id, open_queries);
        }

        assert (open_queries != null) : "Starting a query before starting the txn?? [" + txn_id + "]";

        query_handle = new QueryTrace(catalog_statement, args, batch_id);
        xact.addQuery(query_handle);
        this.query_txn_xref.put(query_handle, txn_id);

        // Make sure that there aren't still running queries in the previous batch
        if (batch_id > 0) {
            AtomicInteger last_batch_ctr = open_queries.get(batch_id - 1);
            if (last_batch_ctr != null && last_batch_ctr.intValue() != 0) {
                String msg = "Txn #" + txn_id + " is trying to start a new query in batch #" + batch_id
                        + " but there are still " + last_batch_ctr.intValue() + " queries running in batch #"
                        + (batch_id - 1);
                throw new IllegalStateException(msg);
            }
        }

        AtomicInteger batch_ctr = open_queries.get(batch_id);
        if (batch_ctr == null) {
            synchronized (open_queries) {
                batch_ctr = new AtomicInteger(0);
                open_queries.put(batch_id, batch_ctr);
            } // SYNCHRONIZED
        }
        batch_ctr.incrementAndGet();

        if (debug.val)
            LOG.debug("Created '" + catalog_statement.getName() + "' query trace record for xact '" + txn_id
                    + "'");
    } else {
        LOG.fatal("Unable to create new query trace: Invalid transaction handle");
    }
    return (query_handle);
}

From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java

@Test
public void testNoMoreFilesEmptyBatch() throws Exception {
    TSpoolDirSource source = createSource(null);
    PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane")
            .build();//from w ww  .j a  v  a2  s  .  c om
    File file = new File(source.spoolDir, "file-0.log").getAbsoluteFile();
    Files.createFile(file.toPath());
    AtomicInteger batchCount = new AtomicInteger(0);
    runner.runInit();

    try {
        source.file = file;
        source.offset = 0;
        source.maxBatchSize = 10;
        source.offsetIncrement = -1;

        runner.runProduce(new HashMap<>(), 1000, output -> {
            TSpoolDirRunnable runnable = source.getTSpoolDirRunnable();
            batchCount.incrementAndGet();

            if (batchCount.get() == 1) {
                Assert.assertEquals("file-0.log", output.getOffsetEntity());
                Assert.assertEquals("{\"POS\":\"-1\"}", output.getNewOffset());
                Assert.assertTrue(runnable.produceCalled);
                runnable.produceCalled = false;
            } else if (batchCount.get() == 2) {
                Assert.assertEquals("file-0.log", output.getOffsetEntity());
                Assert.assertEquals("{\"POS\":\"-1\"}", output.getNewOffset());
                //Produce will not be called as this file-0.log will not be eligible for produce
                Assert.assertFalse(runnable.produceCalled);
            } else if (batchCount.get() > 2) {
                runner.setStop();
            }
        });

        runner.waitOnProduce();

        Assert.assertEquals(3, batchCount.get());
    } finally {
        runner.runDestroy();
    }
}

From source file:com.twitter.distributedlog.lock.TestZKSessionLock.java

/**
 * Test Session Expired Before Lock does locking. The lock should be closed since
 * all zookeeper operations would be failed.
 *
 * @param timeout//from w  w  w .j av a 2  s .  co m
 *          timeout to wait for the lock
 * @throws Exception
 */
private void testSessionExpiredBeforeLock(long timeout) throws Exception {
    String lockPath = "/test-session-expired-before-lock-" + timeout + "-" + System.currentTimeMillis();
    String clientId = "test-session-expired-before-lock-" + System.currentTimeMillis();

    createLockPath(zkc.get(), lockPath);
    final AtomicInteger expireCounter = new AtomicInteger(0);
    final CountDownLatch expiredLatch = new CountDownLatch(1);
    LockListener listener = new LockListener() {
        @Override
        public void onExpired() {
            expireCounter.incrementAndGet();
        }
    };
    final ZKSessionLock lock = new ZKSessionLock(zkc, lockPath, clientId, lockStateExecutor)
            .setLockListener(listener);
    // expire session
    ZooKeeperClientUtils.expireSession(zkc, zkServers, sessionTimeoutMs);
    // submit a runnable to lock state executor to ensure any state changes happened when session expired
    lockStateExecutor.submit(lockPath, new SafeRunnable() {
        @Override
        public void safeRun() {
            expiredLatch.countDown();
        }
    });
    expiredLatch.await();
    // no watcher was registered if never acquired lock successfully
    assertEquals(State.INIT, lock.getLockState());
    try {
        lock.tryLock(timeout, TimeUnit.MILLISECONDS);
        fail("Should fail locking using an expired lock");
    } catch (LockingException le) {
        assertTrue(le.getCause() instanceof KeeperException.SessionExpiredException);
    }
    assertEquals(State.CLOSED, lock.getLockState());
    List<String> children = getLockWaiters(zkc, lockPath);
    assertEquals(0, children.size());
}

From source file:com.facebook.BatchRequestTests.java

@LargeTest
public void testBatchLastOnProgressCallbackIsCalledOnce() {
    final AtomicInteger count = new AtomicInteger();

    final AccessToken accessToken = getAccessTokenForSharedUser();

    String appId = getApplicationId();
    GraphRequest.setDefaultBatchApplicationId(appId);

    GraphRequest request1 = GraphRequest.newGraphPathRequest(accessToken, "4", null);
    assertNotNull(request1);/*from   w ww  .  ja va  2s . com*/
    GraphRequest request2 = GraphRequest.newGraphPathRequest(accessToken, "4", null);
    assertNotNull(request2);

    GraphRequestBatch batch = new GraphRequestBatch(request1, request2);
    batch.addCallback(new GraphRequestBatch.OnProgressCallback() {
        @Override
        public void onBatchCompleted(GraphRequestBatch batch) {
        }

        @Override
        public void onBatchProgress(GraphRequestBatch batch, long current, long max) {
            if (current == max) {
                count.incrementAndGet();
            } else if (current > max) {
                count.set(0);
            }
        }
    });

    batch.executeAndWait();
    assertEquals(1, count.get());
}

From source file:com.oneops.sensor.Sensor.java

/**
 * Load all valid threshold statements into esper engine and emit fake events for each heartbeat thresholds.
 *
 * @return a stream of fake events// ww  w.  ja  v  a2s. co  m
 */
private Observable<FakeEvent> loadThresholds() {

    AtomicInteger ldStmts = new AtomicInteger(0);

    return tsDao.getAllThreshold(READ_ROWCOUNT).filter(this::validateThreshold).map(tr -> {
        ThresholdStatements stmts = stmtBuilder.getThresholdStatements(tr.getManifestId(), tr.getSource(),
                tr.getCrc(), tr.getThresholdJson(), tr.isHeartbeat(), tr.getHbDuration());

        for (String stmtName : stmts.getStatements().keySet()) {
            SensorStatement stmt = stmts.getStatements().get(stmtName);
            addStatementToEngine(stmt.getStmtName(), stmt.getStmtText(), stmt.getListenerName());
            ldStmts.incrementAndGet();
            if (ldStmts.get() % READ_ROWCOUNT == 0) {
                logger.info("Loaded " + ldStmts.get() + " threshold statements.");
            }
        }

        if (!loadedThresholds.containsKey(tr.getManifestId())) {
            loadedThresholds.put(tr.getManifestId(), new HashMap<String, ThresholdStatements>());
        }
        loadedThresholds.get(tr.getManifestId()).put(tr.getSource(), stmts);
        return tr;

    }).filter(tr -> tr.isHeartbeat()).flatMap(tr -> {

        // Fake events for missing heartbeat
        List<Long> mIds = tsDao.getManifestCiIds(tr.getManifestId());
        List<FakeEvent> fes = new ArrayList<>(mIds.size());
        for (long ciId : mIds) {
            FakeEvent fe = new FakeEvent();
            fe.ciId = ciId;
            fe.manifestId = tr.getManifestId();
            fe.source = tr.getSource();
            fes.add(fe);
        }
        return Observable.from(fes);

    }).doOnCompleted(() -> logger.info(">>> Loaded total " + ldStmts.get() + " threshold statements."));
}

From source file:com.ikanow.aleph2.analytics.services.TestGraphBuilderEnrichmentService.java

@Test
public void test_empty() {

    final AtomicInteger counter = new AtomicInteger(0);

    final Streamable<Tuple2<Long, IBatchRecord>> test_stream = Streamable
            .of(Arrays.asList(_mapper.createObjectNode()))
            .<Tuple2<Long, IBatchRecord>>map(j -> Tuples._2T(0L, new BatchRecordUtils.JsonBatchRecord(j)));

    final IGraphService throwing_graph_service = Mockito.mock(IGraphService.class);
    Mockito.when(throwing_graph_service.getUnderlyingPlatformDriver(Mockito.any(), Mockito.any()))
            .thenThrow(new RuntimeException("getUnderlyingPlatformDriver"));
    final MockServiceContext mock_service_context = new MockServiceContext();
    final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class);
    Mockito.when(enrich_context.getServiceContext()).thenReturn(mock_service_context);
    Mockito.when(enrich_context.emitImmutableObject(Mockito.anyLong(), Mockito.any(), Mockito.any(),
            Mockito.any(), Mockito.any())).thenAnswer(invocation -> {
                counter.incrementAndGet();
                return null;
            });//  w  w w .j av a  2  s  . c o m

    // Bucket enabled but no graph service
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class).done().get();
        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::graph_schema, graph_schema).done().get())
                .done().get();
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).done().get();

        under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null), Optional.empty());
        under_test.onObjectBatch(test_stream.stream(), Optional.empty(), Optional.empty());
        under_test.onStageComplete(true);
        assertEquals(under_test, under_test.cloneForNewGrouping());
        assertEquals(Collections.emptyList(), under_test.validateModule(enrich_context, bucket, control));
        assertEquals(1, counter.getAndSet(0));
    }
    // Use override
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class).done().get();
        final GraphConfigBean graph_config = BeanTemplateUtils.build(GraphConfigBean.class)
                .with(GraphConfigBean::graph_schema_override, graph_schema).done().get();
        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::graph_schema, graph_schema).done().get())
                .done().get();
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::config,
                        new LinkedHashMap<String, Object>(BeanTemplateUtils.toMap(graph_config)))
                .done().get();

        under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null), Optional.empty());
        under_test.onObjectBatch(test_stream.stream(), Optional.empty(), Optional.empty());
        under_test.onStageComplete(true);
        assertEquals(under_test, under_test.cloneForNewGrouping());
        assertEquals(Collections.emptyList(), under_test.validateModule(enrich_context, bucket, control));
        assertEquals(1, counter.getAndSet(0));
    }

    mock_service_context.addService(IGraphService.class, Optional.empty(), throwing_graph_service);

    // Add graph service, check it starts failing (bucket enabled)
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class).done().get();
        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::graph_schema, graph_schema).done().get())
                .done().get();
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).done().get();

        try {
            under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null),
                    Optional.empty());
            fail("Should have thrown");
        } catch (Exception e) {
        }
    }
    // Add graph service, check it starts failing (override)
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class).done().get();
        final GraphConfigBean graph_config = BeanTemplateUtils.build(GraphConfigBean.class)
                .with(GraphConfigBean::graph_schema_override, graph_schema).done().get();
        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::graph_schema, graph_schema).done().get())
                .done().get();
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::config,
                        new LinkedHashMap<String, Object>(BeanTemplateUtils.toMap(graph_config)))
                .done().get();

        try {
            under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null),
                    Optional.empty());
            fail("Should have thrown");
        } catch (Exception e) {
        }
    }
    // From bucket, graph service disabled, won't fail
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class).done().get(); // (no data_schema.graph_schema)
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).done().get();

        under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null), Optional.empty());
        under_test.onObjectBatch(test_stream.stream(), Optional.empty(), Optional.empty());
        under_test.onStageComplete(true);
        assertEquals(under_test, under_test.cloneForNewGrouping());
        assertEquals(Collections.emptyList(), under_test.validateModule(enrich_context, bucket, control));
        assertEquals(1, counter.getAndSet(0));
    }
    // From override, graph service disabled, won't fail
    {
        final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();

        final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class)
                .with(GraphSchemaBean::enabled, false).done().get();
        final GraphConfigBean graph_config = BeanTemplateUtils.build(GraphConfigBean.class)
                .with(GraphConfigBean::graph_schema_override, graph_schema).done().get();
        final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::graph_schema, graph_schema).done().get())
                .done().get();
        final EnrichmentControlMetadataBean control = BeanTemplateUtils
                .build(EnrichmentControlMetadataBean.class).with(EnrichmentControlMetadataBean::config,
                        new LinkedHashMap<String, Object>(BeanTemplateUtils.toMap(graph_config)))
                .done().get();

        under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null), Optional.empty());
        under_test.onObjectBatch(test_stream.stream(), Optional.empty(), Optional.empty());
        under_test.onStageComplete(true);
        assertEquals(under_test, under_test.cloneForNewGrouping());
        assertEquals(Collections.emptyList(), under_test.validateModule(enrich_context, bucket, control));
        assertEquals(1, counter.getAndSet(0));
    }

}