Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.eclipse.hono.deviceregistry.FileBasedRegistrationService.java

Future<Void> saveToFile() {

    if (!getConfig().isSaveToFile()) {
        return Future.succeededFuture();
    } else if (dirty) {
        return checkFileExists(true).compose(s -> {
            final AtomicInteger idCount = new AtomicInteger();
            final JsonArray tenants = new JsonArray();
            for (final Entry<String, Map<String, JsonObject>> entry : identities.entrySet()) {
                final JsonArray devices = new JsonArray();
                for (final Entry<String, JsonObject> deviceEntry : entry.getValue().entrySet()) {
                    devices.add(new JsonObject().put(FIELD_PAYLOAD_DEVICE_ID, deviceEntry.getKey())
                            .put(FIELD_DATA, deviceEntry.getValue()));
                    idCount.incrementAndGet();
                }// w  w  w .j ava2s  .  c om
                tenants.add(new JsonObject().put(FIELD_TENANT, entry.getKey()).put(ARRAY_DEVICES, devices));
            }

            final Future<Void> writeHandler = Future.future();
            vertx.fileSystem().writeFile(getConfig().getFilename(),
                    Buffer.factory.buffer(tenants.encodePrettily()), writeHandler.completer());
            return writeHandler.map(ok -> {
                dirty = false;
                log.trace("successfully wrote {} device identities to file {}", idCount.get(),
                        getConfig().getFilename());
                return (Void) null;
            }).otherwise(t -> {
                log.warn("could not write device identities to file {}", getConfig().getFilename(), t);
                return (Void) null;
            });
        });
    } else {
        log.trace("registry does not need to be persisted");
        return Future.succeededFuture();
    }
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from  w  w  w . j av  a  2s  .  c om
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:au.org.ala.bhl.service.IndexingService.java

/**
 * Indexes an item that exists in the document cache
 * //from w w  w .j a v a2 s  .  c  om
 * @param item
 */
public void indexItem(final ItemDescriptor item) {

    String itemPathStr = _docCache.getItemDirectoryPath(item.getInternetArchiveId());

    final SolrServer server = createSolrServer();

    log("Indexing pages %s for item %s", itemPathStr, item.getItemId());

    try {
        final AtomicInteger pageCount = new AtomicInteger(0);
        File itemPath = new File(itemPathStr);
        if (itemPath.exists() && itemPath.isDirectory()) {
            File f = _docCache.getPageArchiveFile(item);
            if (f.exists()) {
                _docCache.forEachItemPage(item, new CachedItemPageHandler() {

                    public void startItem(String itemId) {
                    }

                    public void onPage(String iaId, String pageId, String text) {
                        indexPage(item, pageId, text, server);
                        pageCount.incrementAndGet();

                        if (pageCount.get() % 100 == 0) {
                            try {
                                server.commit();
                            } catch (Exception ex) {
                                throw new RuntimeException(ex);
                            }
                        }
                    }

                    public void endItem(String itemId) {
                    }
                });

                if (pageCount.get() > 0) {
                    server.commit();
                    getItemsService().setItemStatus(item.getItemId(), ItemStatus.INDEXED, pageCount.get());
                    log("%s pages indexed for item: %s", pageCount, item.getItemId());
                } else {
                    log("Ignoring empty item (no pages): %s", item.getItemId());
                }
            } else {
                log("Ignoring partial or empty item (no archive file found): %s", item.getInternetArchiveId());
            }
        }

    } catch (Exception ex) {
        ex.printStackTrace();
    }
}

From source file:com.redhat.red.build.koji.KojiClient.java

protected Map<String, KojiClientException> uploadForImport(KojiImport buildInfo,
        Supplier<Iterable<ImportFile>> outputSupplier, String dirname, KojiSessionInfo session)
        throws KojiClientException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    try {/*from   w  w  w  . j  a v a 2  s  . c o  m*/
        objectMapper.writeValue(baos, buildInfo);
    } catch (IOException e) {
        throw new KojiClientException("Failed to serialize import info to JSON. Reason: %s", e, e.getMessage());
    }

    AtomicInteger count = new AtomicInteger(0);
    uploadService.submit(
            newUploader(new ImportFile(METADATA_JSON_FILE, new ByteArrayInputStream(baos.toByteArray())),
                    dirname, session));

    count.incrementAndGet();

    outputSupplier.get().forEach((importFile) -> {
        uploadService.submit(newUploader(importFile, dirname, session));
        count.incrementAndGet();
    });

    Logger logger = LoggerFactory.getLogger(getClass());
    Map<String, KojiClientException> uploadErrors = new HashMap<>();
    Set<UploadResponse> responses = new HashSet<>();
    int total = count.get();
    do {
        logger.debug("Waiting for %d uploads.", count.get());

        try {
            Future<KojiUploaderResult> future = uploadService.take();
            KojiUploaderResult result = future.get();
            KojiClientException error = result.getError();
            if (error != null) {
                uploadErrors.put(result.getImportFile().getFilePath(), error);
            } else {
                responses.add(result.getResponse());
            }
        } catch (InterruptedException e) {
            logger.debug("Interrupted while uploading. Aborting upload.");
            break;
        } catch (ExecutionException e) {
            throw new KojiClientException("Failed to execute %d uploads for: %s. Reason: %s", e, total,
                    buildInfo, e.getMessage());
        }
    } while (count.decrementAndGet() > 0);

    return uploadErrors;
}

From source file:me.smoe.adar.utils.cam.o.CAMApplication.java

private void go(int cases, boolean buildScore) throws Exception {
    if (buildScore) {
        statistics.run();//from  w w w  .jav a  2  s . c  o  m
    }

    Map<Long, String> cams = new HashMap<>();
    for (CAM cam : camRepository.findAll()) {
        cams.put(cam.getId(), cam.getName());
    }

    AtomicInteger succ = new AtomicInteger();
    AtomicInteger fail = new AtomicInteger();
    AtomicInteger wrong = new AtomicInteger();
    AtomicInteger index = new AtomicInteger();
    Page<CAMProduct> products = camProductRepository
            .findAll(new PageRequest(0, cases, new Sort(Direction.DESC, "id")));
    int total = products.getSize();
    for (CAMProduct product : products) {
        THREADPOOLEXECUTOR.execute(() -> {
            try {
                Long ca = caMatcher.matcher(product.getName(), product.getBrand(), product.getCategory(),
                        product.getDesc(), 10000);
                //               Long ca = me.smoe.adar.utils.cam.n.matcher.CAMatcher.matcher(product.getCategory(), 20);

                if (product.getCao().equals(ca)) {
                    System.err.println(
                            String.format("[CAM] Index: %s Id: %s Cao: %s Can: %s", index.incrementAndGet(),
                                    product.getId(), cams.get(product.getCao()), cams.get(ca)));
                } else if (ca != null && cams.get(product.getCao()).equals(cams.get(ca))) {
                    System.err.println(
                            String.format("[CAM] Index: %s Id: %s Cao: %s Can: %s", index.incrementAndGet(),
                                    product.getId(), cams.get(product.getCao()), cams.get(ca)));
                } else if (ca != null) {
                    System.out.println(
                            String.format("[CAM] Index: %s Id: %s Cao: %s Can: %s", index.incrementAndGet(),
                                    product.getId(), cams.get(product.getCao()), cams.get(ca)));
                } else {
                    //                  System.out.println(String.format("[CAM] Index: %s Id: %s Cao: %s Can: %s", index.incrementAndGet(), product.getId(), cams.get(product.getCao()), cams.get(ca)));
                }

                if (ca == null) {
                    fail.incrementAndGet();
                    return;
                }
                if (product.getCao().equals(ca)) {
                    succ.incrementAndGet();
                } else if (ca != null && cams.get(product.getCao()).equals(cams.get(ca))) {
                    succ.incrementAndGet();
                } else {
                    wrong.incrementAndGet();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        });
    }

    while (!THREADPOOLEXECUTOR.getQueue().isEmpty()) {
        TimeUnit.SECONDS.sleep(1);
    }
    TimeUnit.SECONDS.sleep(2);

    System.out.println();
    System.out.println("[CAM] : " + total);
    System.out.println("[CAM] ?: "
            + new BigDecimal(succ.get()).divide(new BigDecimal(total), 4, RoundingMode.HALF_UP)
                    .multiply(new BigDecimal(100)).setScale(2, RoundingMode.HALF_UP)
            + "%");
    System.out.println("[CAM] : "
            + new BigDecimal(fail.get()).divide(new BigDecimal(total), 4, RoundingMode.HALF_UP)
                    .multiply(new BigDecimal(100)).setScale(2, RoundingMode.HALF_UP)
            + "%");
    System.out.println("[CAM] : "
            + new BigDecimal(wrong.get()).divide(new BigDecimal(total), 4, RoundingMode.HALF_UP)
                    .multiply(new BigDecimal(100)).setScale(2, RoundingMode.HALF_UP)
            + "%");

    System.exit(0);
}

From source file:com.adobe.acs.commons.workflow.process.impl.SyntheticWrapperWorkflowProcess.java

@Override
public void execute(WorkItem workItem, WorkflowSession workflowSession, MetaDataMap metaDataMap)
        throws WorkflowException {
    ResourceResolver resourceResolver = null;
    final SyntheticWorkflowRunner syntheticWorkflowRunner = syntheticWorkflowRunnerAccessor
            .getSyntheticWorkflowRunner();

    final String payload = (String) workItem.getWorkflowData().getPayload();
    final ProcessArgs processArgs = new ProcessArgs(metaDataMap);

    try {/*  w ww . j a v a 2  s . co m*/
        resourceResolver = workflowHelper.getResourceResolver(workflowSession);
        final SyntheticWorkflowModel syntheticWorkflowModel = syntheticWorkflowRunner
                .getSyntheticWorkflowModel(resourceResolver, processArgs.getWorkflowModelId(), true);

        final AtomicInteger count = new AtomicInteger(0);

        // Anonymous inner class to facilitate counting of processed payloads
        final ResourceRunnable syntheticRunnable = new ResourceRunnable() {
            @Override
            public void run(final Resource resource) throws java.lang.Exception {
                if (processArgs.isThrottle()) {
                    throttledTaskRunner.waitForLowCpuAndLowMemory();
                }

                syntheticWorkflowRunner.execute(resource.getResourceResolver(), resource.getPath(),
                        syntheticWorkflowModel, false, false);

                // Commit as needed
                if (processArgs.getSaveInterval() > 0
                        && count.incrementAndGet() % processArgs.getSaveInterval() == 0
                        && resource.getResourceResolver().hasChanges()) {
                    resource.getResourceResolver().commit();
                }
            }
        };

        final ContentVisitor visitor = new ContentVisitor(syntheticRunnable);
        final Resource resource = resourceResolver.getResource(payload);

        if (processArgs.isTraverseTree()) {
            visitor.accept(resource);
        } else {
            syntheticRunnable.run(resource);
        }

        if (processArgs.getSaveInterval() > 0 && resourceResolver.hasChanges()) {
            // Commit any stranglers
            resourceResolver.commit();
        }

        log.info("Synthetic Workflow Wrapper processed [ {} ] total payloads", count.get());
    } catch (Exception e) {
        throw new WorkflowException(e);
    }
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from   www.  j a  v a  2  s.c om
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:com.comphenix.protocol.error.DetailedErrorReporter.java

/**
 * Report a problem with a given method and plugin, ensuring that we don't exceed the maximum number of error reports.
 * @param sender - the component that observed this exception.
 * @param methodName - the method name.//from   w ww. j  a va 2s. c  om
 * @param error - the error itself.
 * @return TRUE if the error was printed, FALSE if it was suppressed.
 */
public boolean reportMinimalNoSpam(Plugin sender, String methodName, Throwable error) {
    String pluginName = PacketAdapter.getPluginName(sender);
    AtomicInteger counter = warningCount.get(pluginName);

    // Thread safe pattern
    if (counter == null) {
        AtomicInteger created = new AtomicInteger();
        counter = warningCount.putIfAbsent(pluginName, created);

        if (counter == null) {
            counter = created;
        }
    }

    final int errorCount = counter.incrementAndGet();

    // See if we should print the full error
    if (errorCount < getMaxErrorCount()) {
        logger.log(Level.SEVERE,
                "[" + pluginName + "] Unhandled exception occured in " + methodName + " for " + pluginName,
                error);
        return true;

    } else {
        // Nope - only print the error count occationally
        if (isPowerOfTwo(errorCount)) {
            logger.log(Level.SEVERE, "[" + pluginName + "] Unhandled exception number " + errorCount
                    + " occured in " + methodName + " for " + pluginName, error);
        }
        return false;
    }
}

From source file:com.yahoo.pulsar.broker.service.PersistentQueueE2ETest.java

@Test
public void testConsumersWithDifferentPermits() throws Exception {
    final String topicName = "persistent://prop/use/ns-abc/shared-topic4";
    final String subName = "sub4";
    final int numMsgs = 10000;

    final AtomicInteger msgCountConsumer1 = new AtomicInteger(0);
    final AtomicInteger msgCountConsumer2 = new AtomicInteger(0);
    final CountDownLatch latch = new CountDownLatch(numMsgs);

    int recvQ1 = 10;
    ConsumerConfiguration conf1 = new ConsumerConfiguration();
    conf1.setSubscriptionType(SubscriptionType.Shared);
    conf1.setReceiverQueueSize(recvQ1);//from   w w  w .j a v  a  2s.co m
    conf1.setMessageListener((consumer, msg) -> {
        msgCountConsumer1.incrementAndGet();
        try {
            consumer.acknowledge(msg);
            latch.countDown();
        } catch (PulsarClientException e) {
            fail("Should not fail");
        }
    });

    int recvQ2 = 1;
    ConsumerConfiguration conf2 = new ConsumerConfiguration();
    conf2.setSubscriptionType(SubscriptionType.Shared);
    conf2.setReceiverQueueSize(recvQ2);
    conf2.setMessageListener((consumer, msg) -> {
        msgCountConsumer2.incrementAndGet();
        try {
            consumer.acknowledge(msg);
            latch.countDown();
        } catch (PulsarClientException e) {
            fail("Should not fail");
        }
    });

    Consumer consumer1 = pulsarClient.subscribe(topicName, subName, conf1);
    Consumer consumer2 = pulsarClient.subscribe(topicName, subName, conf2);

    List<CompletableFuture<MessageId>> futures = Lists.newArrayListWithCapacity(numMsgs);
    Producer producer = pulsarClient.createProducer(topicName);
    for (int i = 0; i < numMsgs; i++) {
        String message = "msg-" + i;
        futures.add(producer.sendAsync(message.getBytes()));
    }
    FutureUtil.waitForAll(futures).get();
    producer.close();

    latch.await(5, TimeUnit.SECONDS);

    assertEquals(msgCountConsumer1.get(), numMsgs - numMsgs / (recvQ1 + recvQ2), numMsgs * 0.1);
    assertEquals(msgCountConsumer2.get(), numMsgs / (recvQ1 + recvQ2), numMsgs * 0.1);

    consumer1.close();
    consumer2.close();
    admin.persistentTopics().delete(topicName);
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexAugmentTest.java

@Test
public void indexHookCallbackFrequency() throws Exception {
    //setup repo and index
    NodeTypeRegistry.register(root, IOUtils.toInputStream(TestUtil.TEST_NODE_TYPE), "test nodeType");
    Tree props = createIndex(TestUtil.NT_TEST);
    TestUtil.enablePropertyIndex(props, "foo1", false);
    TestUtil.enablePropertyIndex(props, "foo2", false);
    TestUtil.enablePropertyIndex(props, "subChild/foo3", false);
    root.commit();//from  ww w. j  a va 2 s.  co m

    //setup index augmentor
    final AtomicInteger counter = new AtomicInteger(0);
    factory.indexFieldProvider = new IndexFieldProvider() {
        @Nonnull
        @Override
        public Iterable<Field> getAugmentedFields(String path, NodeState document, NodeState indexDefinition) {
            counter.incrementAndGet();
            return IndexFieldProvider.DEFAULT.getAugmentedFields(path, document, indexDefinition);
        }

        @Nonnull
        @Override
        public Set<String> getSupportedTypes() {
            return Collections.singleton(TestUtil.NT_TEST);
        }
    };

    //add content
    counter.set(0);
    Tree test = root.getTree("/").addChild("test");
    Tree node = createNodeWithType(test, "item", TestUtil.NT_TEST);
    node.setProperty("foo1", "bar1");
    node.setProperty("foo2", "bar2");
    Tree subChild = node.addChild("subChild");
    subChild.setProperty("foo3", "bar3");
    root.commit();
    assertEquals("Number of callbacks should be same as number of changed properties", 1, counter.get());

    //change sub-property
    counter.set(0);
    subChild = root.getTree("/test/item/subChild");
    subChild.setProperty("foo3", "bar4");
    root.commit();
    assertEquals("Sub child property change should make call backs for all indexed properties", 1,
            counter.get());
}