Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:net.sf.jasperreports.components.table.fill.FillTable.java

protected void setTableInstanceCounter() {
    JRFillContext fillerContext = fillContext.getFiller().getFillContext();
    AtomicInteger counter = (AtomicInteger) fillerContext.getFillCache(FILL_CACHE_KEY_TABLE_INSTANCE_COUNTER);
    if (counter == null) {
        // we just need a mutable integer, there's no actual concurrency here
        counter = new AtomicInteger();
        fillerContext.setFillCache(FILL_CACHE_KEY_TABLE_INSTANCE_COUNTER, counter);
    }//w  w  w .  j  av a2 s .  c om

    int instanceIndex = counter.getAndIncrement();
    if (log.isDebugEnabled()) {
        log.debug("table instance index is " + instanceIndex);
    }

    fillSubreport.getTableReport().getBaseReport().setTableInstanceIndex(instanceIndex);
}

From source file:org.apache.activemq.bugs.AMQ6131Test.java

@Test(timeout = 300000)
public void testDurableWithNoMessageAfterRestartAndIndexRecovery() throws Exception {
    final File persistentDir = getPersistentDir();

    broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"),
            false);/*www . j  a v  a  2  s.  co m*/

    ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection();
    connection.setClientID("myId");
    connection.start();
    final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");
    final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub"));

    final int original = new ArrayList<File>(
            FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE))
                    .size();

    // 100k messages
    final byte[] data = new byte[100000];
    final Random random = new Random();
    random.nextBytes(data);

    // run test with enough messages to create a second journal file
    final AtomicInteger messageCount = new AtomicInteger();
    assertTrue("Should have added a journal file", Wait.waitFor(new Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            final ActiveMQBytesMessage message = new ActiveMQBytesMessage();
            message.setContent(new ByteSequence(data));

            for (int i = 0; i < 100; i++) {
                producer.send(message);
                messageCount.getAndIncrement();
            }

            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() > original;
        }
    }));

    // Consume all messages
    for (int i = 0; i < messageCount.get(); i++) {
        durable.receive();
    }

    durable.close();

    assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() {
        @Override
        public boolean isSatisified() throws Exception {
            return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1;
        }
    }));

    // force a GC of unneeded journal files
    getBroker().getPersistenceAdapter().checkpoint(true);

    // wait until a journal file has been GC'd after receiving messages
    assertTrue("Should have garbage collected", Wait.waitFor(new Wait.Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() == original;
        }
    }));

    // stop the broker so we can blow away the index
    getBroker().stop();
    getBroker().waitUntilStopped();

    // delete the index so that the durables are gone from the index
    // The test passes if you take out this delete section
    for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"),
            TrueFileFilter.INSTANCE)) {
        FileUtils.deleteQuietly(index);
    }

    stopBroker();
    setUpBroker(false);

    assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length);

    ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection();
    connection2.setClientID("myId");
    connection2.start();
    final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");

    assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length);

    assertNull(durable2.receive(500));
}

From source file:org.apache.activemq.bugs.AMQ6131Test.java

@Test(timeout = 300000)
public void testDurableWithOnePendingAfterRestartAndIndexRecovery() throws Exception {
    final File persistentDir = getPersistentDir();

    broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"),
            false);/*w ww . j ava2  s .c  om*/

    ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection();
    connection.setClientID("myId");
    connection.start();
    final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");
    final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub"));

    final int original = new ArrayList<File>(
            FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE))
                    .size();

    // 100k messages
    final byte[] data = new byte[100000];
    final Random random = new Random();
    random.nextBytes(data);

    // run test with enough messages to create a second journal file
    final AtomicInteger messageCount = new AtomicInteger();
    assertTrue("Should have added a journal file", Wait.waitFor(new Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            final ActiveMQBytesMessage message = new ActiveMQBytesMessage();
            message.setContent(new ByteSequence(data));

            for (int i = 0; i < 100; i++) {
                producer.send(message);
                messageCount.getAndIncrement();
            }

            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() > original;
        }
    }));

    // Consume all but 1 message
    for (int i = 0; i < messageCount.get() - 1; i++) {
        durable.receive();
    }

    durable.close();

    // wait until a journal file has been GC'd after receiving messages
    assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() {
        @Override
        public boolean isSatisified() throws Exception {
            return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1;
        }
    }));

    // force a GC of unneeded journal files
    getBroker().getPersistenceAdapter().checkpoint(true);

    // wait until a journal file has been GC'd after receiving messages
    assertFalse("Should not have garbage collected", Wait.waitFor(new Wait.Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() == original;
        }
    }, 5000, 500));

    // stop the broker so we can blow away the index
    getBroker().stop();
    getBroker().waitUntilStopped();

    // delete the index so that the durables are gone from the index
    // The test passes if you take out this delete section
    for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"),
            TrueFileFilter.INSTANCE)) {
        FileUtils.deleteQuietly(index);
    }

    stopBroker();
    setUpBroker(false);

    assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length);

    ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection();
    connection2.setClientID("myId");
    connection2.start();
    final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");

    assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length);

    assertNotNull(durable2.receive(5000));
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBPOfferService.java

/**
 * Test datanode block pool initialization error handling.
 * Failure in initializing a block pool should not cause NPE.
 *//*w w  w  .j ava 2s.  c  o m*/
@Test
public void testBPInitErrorHandling() throws Exception {
    final DataNode mockDn = Mockito.mock(DataNode.class);
    Mockito.doReturn(true).when(mockDn).shouldRun();
    Configuration conf = new Configuration();
    File dnDataDir = new File(new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
    conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
    Mockito.doReturn(conf).when(mockDn).getConf();
    Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
    Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).when(mockDn).getMetrics();
    final AtomicInteger count = new AtomicInteger();
    Mockito.doAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            if (count.getAndIncrement() == 0) {
                throw new IOException("faked initBlockPool exception");
            }
            // The initBlockPool is called again. Now mock init is done.
            Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
            return null;
        }
    }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
    BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
    List<BPServiceActor> actors = bpos.getBPServiceActors();
    assertEquals(2, actors.size());
    bpos.start();
    try {
        waitForInitialization(bpos);
        // even if one of the actor initialization fails, the other one will be
        // finish block report.
        waitForBlockReport(mockNN1, mockNN2);
    } finally {
        bpos.stop();
    }
}

From source file:org.apache.nifi.processors.standard.SplitXml.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile original = session.get();
    if (original == null) {
        return;/*from  ww w . ja va 2 s  . c  om*/
    }

    final int depth = context.getProperty(SPLIT_DEPTH).asInteger();
    final ComponentLog logger = getLogger();

    final List<FlowFile> splits = new ArrayList<>();
    final String fragmentIdentifier = UUID.randomUUID().toString();
    final AtomicInteger numberOfRecords = new AtomicInteger(0);
    final XmlSplitterSaxParser parser = new XmlSplitterSaxParser(xmlTree -> {
        FlowFile split = session.create(original);
        split = session.write(split, out -> out.write(xmlTree.getBytes("UTF-8")));
        split = session.putAttribute(split, FRAGMENT_ID.key(), fragmentIdentifier);
        split = session.putAttribute(split, FRAGMENT_INDEX.key(),
                Integer.toString(numberOfRecords.getAndIncrement()));
        split = session.putAttribute(split, SEGMENT_ORIGINAL_FILENAME.key(),
                split.getAttribute(CoreAttributes.FILENAME.key()));
        splits.add(split);
    }, depth);

    final AtomicBoolean failed = new AtomicBoolean(false);
    session.read(original, rawIn -> {
        try (final InputStream in = new BufferedInputStream(rawIn)) {
            SAXParser saxParser = null;
            try {
                saxParser = saxParserFactory.newSAXParser();
                final XMLReader reader = saxParser.getXMLReader();
                reader.setContentHandler(parser);
                reader.parse(new InputSource(in));
            } catch (final ParserConfigurationException | SAXException e) {
                logger.error("Unable to parse {} due to {}", new Object[] { original, e });
                failed.set(true);
            }
        }
    });

    if (failed.get()) {
        session.transfer(original, REL_FAILURE);
        session.remove(splits);
    } else {
        splits.forEach((split) -> {
            split = session.putAttribute(split, FRAGMENT_COUNT.key(), Integer.toString(numberOfRecords.get()));
            session.transfer(split, REL_SPLIT);
        });

        final FlowFile originalToTransfer = copyAttributesToOriginal(session, original, fragmentIdentifier,
                numberOfRecords.get());
        session.transfer(originalToTransfer, REL_ORIGINAL);
        logger.info("Split {} into {} FlowFiles", new Object[] { originalToTransfer, splits.size() });
    }
}

From source file:org.springframework.cloud.stream.app.pose.estimation.processor.PoseEstimationTensorflowOutputConverter.java

/**
 * Grows the body out of it parts//from  w  w w .ja  v  a 2 s .c o m
 * @param limbsMap Limb candidates to connected into bodies
 * @return Final list of body postures
 */
private List<Body> assembleBodies(Map<Model.LimbType, List<Limb>> limbsMap) {

    AtomicInteger bodyId = new AtomicInteger();

    Map<Part, Body> partToBodyIndex = new ConcurrentHashMap<>();

    for (Model.LimbType limbType : limbsMap.keySet()) {
        for (Limb limb : limbsMap.get(limbType)) {

            Body fromBody = partToBodyIndex.get(limb.getFromPart());
            Body toBody = partToBodyIndex.get(limb.getToPart());

            Body bodyCandidate;

            if (fromBody == null && toBody == null) {
                bodyCandidate = new Body(bodyId.getAndIncrement());
            } else if (fromBody != null && toBody != null) {
                bodyCandidate = fromBody;
                if (!fromBody.equals(toBody)) {
                    bodyCandidate.getLimbs().addAll(toBody.getLimbs());
                    bodyCandidate.getParts().addAll(toBody.getParts());
                    toBody.getParts().forEach(p -> partToBodyIndex.put(p, bodyCandidate));
                }
            } else {
                bodyCandidate = (fromBody != null) ? fromBody : toBody;
            }

            bodyCandidate.addLimb(limb);
            partToBodyIndex.put(limb.getFromPart(), bodyCandidate);
            partToBodyIndex.put(limb.getToPart(), bodyCandidate);
        }
    }

    // Filter out the body duplicates and bodies with too few parts
    List<Body> bodies = partToBodyIndex.values().stream().distinct()
            .filter(body -> body.getParts().size() > poseProperties.getMinBodyPartCount())
            .collect(Collectors.toList());

    return bodies;
}

From source file:org.apache.hadoop.hbase.procedure.TestZKProcedure.java

private void runCommit(String... members) throws Exception {
    // make sure we just have an empty list
    if (members == null) {
        members = new String[0];
    }/*  ww w.j  a  va2s.co m*/
    List<String> expected = Arrays.asList(members);

    // setup the constants
    ZooKeeperWatcher coordZkw = newZooKeeperWatcher();
    String opDescription = "coordination test - " + members.length + " cohort members";

    // start running the controller
    ZKProcedureCoordinatorRpcs coordinatorComms = new ZKProcedureCoordinatorRpcs(coordZkw, opDescription,
            COORDINATOR_NODE_NAME);
    ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
    ProcedureCoordinator coordinator = new ProcedureCoordinator(coordinatorComms, pool) {
        @Override
        public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
                List<String> expectedMembers) {
            return Mockito.spy(super.createProcedure(fed, procName, procArgs, expectedMembers));
        }
    };

    // build and start members
    // NOTE: There is a single subprocedure builder for all members here.
    SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class);
    List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> procMembers = new ArrayList<Pair<ProcedureMember, ZKProcedureMemberRpcs>>(
            members.length);
    // start each member
    for (String member : members) {
        ZooKeeperWatcher watcher = newZooKeeperWatcher();
        ZKProcedureMemberRpcs comms = new ZKProcedureMemberRpcs(watcher, opDescription);
        ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE);
        ProcedureMember procMember = new ProcedureMember(comms, pool2, subprocFactory);
        procMembers.add(new Pair<ProcedureMember, ZKProcedureMemberRpcs>(procMember, comms));
        comms.start(member, procMember);
    }

    // setup mock member subprocedures
    final List<Subprocedure> subprocs = new ArrayList<Subprocedure>();
    for (int i = 0; i < procMembers.size(); i++) {
        ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher();
        Subprocedure commit = Mockito.spy(new SubprocedureImpl(procMembers.get(i).getFirst(), opName,
                cohortMonitor, WAKE_FREQUENCY, TIMEOUT));
        subprocs.add(commit);
    }

    // link subprocedure to buildNewOperation invocation.
    final AtomicInteger i = new AtomicInteger(0); // NOTE: would be racy if not an AtomicInteger
    Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName),
            (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer<Subprocedure>() {
                @Override
                public Subprocedure answer(InvocationOnMock invocation) throws Throwable {
                    int index = i.getAndIncrement();
                    LOG.debug("Task size:" + subprocs.size() + ", getting:" + index);
                    Subprocedure commit = subprocs.get(index);
                    return commit;
                }
            });

    // setup spying on the coordinator
    //    Procedure proc = Mockito.spy(procBuilder.createProcedure(coordinator, opName, data, expected));
    //    Mockito.when(procBuilder.build(coordinator, opName, data, expected)).thenReturn(proc);

    // start running the operation
    Procedure task = coordinator.startProcedure(new ForeignExceptionDispatcher(), opName, data, expected);
    //    assertEquals("Didn't mock coordinator task", proc, task);

    // verify all things ran as expected
    //    waitAndVerifyProc(proc, once, once, never(), once, false);
    waitAndVerifyProc(task, once, once, never(), once, false);
    verifyCohortSuccessful(expected, subprocFactory, subprocs, once, once, never(), once, false);

    // close all the things
    closeAll(coordinator, coordinatorComms, procMembers);
}

From source file:org.nanoframework.concurrent.scheduler.SchedulerFactory.java

public void rebalance(final String groupName) {
    Assert.hasLength(groupName);//from   w  w w  .ja v a  2  s  .c om
    final Set<BaseScheduler> groupScheduler = group.get(groupName);
    if (!CollectionUtils.isEmpty(groupScheduler)) {
        final AtomicInteger idx = new AtomicInteger(0);
        groupScheduler.forEach(scheduler -> {
            scheduler.getConfig().setNum(idx.getAndIncrement());
            scheduler.getConfig().setTotal(groupScheduler.size());
        });
    }
}

From source file:org.neo4j.io.pagecache.impl.SingleFilePageSwapperTest.java

@Test
public void mustCloseFilesIfTakingFileLockThrows() throws Exception {
    assumeFalse("No file locking on Windows", SystemUtils.IS_OS_WINDOWS); // no file locking on Windows.

    final AtomicInteger openFilesCounter = new AtomicInteger();
    PageSwapperFactory factory = swapperFactory();
    DefaultFileSystemAbstraction fs = new DefaultFileSystemAbstraction();
    factory.setFileSystemAbstraction(new DelegatingFileSystemAbstraction(fs) {
        @Override//from w  w w  .  j  av  a 2 s .c  o  m
        public StoreChannel open(File fileName, String mode) throws IOException {
            openFilesCounter.getAndIncrement();
            return new DelegatingStoreChannel(super.open(fileName, mode)) {
                @Override
                public void close() throws IOException {
                    openFilesCounter.getAndDecrement();
                    super.close();
                }
            };
        }
    });
    File file = testDir.file("file");
    try (StoreChannel ch = fs.create(file); FileLock lock = ch.tryLock()) {
        createSwapper(factory, file, 4, NO_CALLBACK, false).close();
        fail("Creating a page swapper for a locked channel should have thrown");
    } catch (FileLockException e) {
        // As expected.
    }
    assertThat(openFilesCounter.get(), is(0));
}

From source file:org.jasig.ssp.util.importer.job.staging.SqlServerStagingTableWriter.java

@Override
public void write(final List<? extends RawItem> items) {

    NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(dataSource);
    String fileName = items.get(0).getResource().getFilename();
    final String[] tableName = fileName.split("\\.");

    Integer batchStart = (Integer) (stepExecution.getExecutionContext().get("batchStart") == null ? null
            : stepExecution.getExecutionContext().get("batchStart"));
    Integer batchStop = (Integer) (stepExecution.getExecutionContext().get("batchStop") == null ? null
            : stepExecution.getExecutionContext().get("batchStop"));
    Object currentEntity = stepExecution.getExecutionContext().get("currentEntity");

    if (currentEntity == null || !currentEntity.equals(tableName[0])) {
        batchStart = 0;/*from   w w w .  ja  v  a  2 s.  co  m*/
        batchStop = items.size() - 1;
        currentEntity = tableName[0];
        stepExecution.getExecutionContext().put("currentEntity", currentEntity);
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    } else {
        batchStart = batchStop + 1;
        batchStop = (Integer) batchStart + items.size() - 1;
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    }

    RawItem firstItem = items.get(0);
    Resource firstItemResource = firstItem.getResource();

    if (currentResource == null || !(this.currentResource.equals(firstItemResource))) {
        this.orderedHeaders = writeHeader(firstItem);
        this.currentResource = firstItemResource;
    }

    StringBuilder insertSql = new StringBuilder();
    insertSql.append("INSERT INTO stg_" + tableName[0] + " (batch_id,");
    StringBuilder valuesSqlBuilder = new StringBuilder();
    valuesSqlBuilder.append(" VALUES (?,");
    for (String header : this.orderedHeaders) {
        insertSql.append(header).append(",");
        valuesSqlBuilder.append("?").append(",");
    }
    insertSql.setLength(insertSql.length() - 1); // trim comma
    valuesSqlBuilder.setLength(valuesSqlBuilder.length() - 1); // trim comma
    insertSql.append(")");
    valuesSqlBuilder.append(");");
    insertSql.append(valuesSqlBuilder);

    final AtomicInteger batchStartRef = new AtomicInteger(batchStart);
    final String sql = insertSql.toString();
    jdbcTemplate.getJdbcOperations().execute(sql, new PreparedStatementCallback() {
        @Override
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
            for (RawItem item : items) {
                final List<Object> paramsForLog = new ArrayList(orderedHeaders.length);
                int counter = 1;
                paramsForLog.add(batchStartRef.get());
                StatementCreatorUtils.setParameterValue(ps, counter, SqlTypeValue.TYPE_UNKNOWN,
                        batchStartRef.getAndIncrement());
                counter++;
                for (String header : orderedHeaders) {
                    final Map<String, String> record = item.getRecord();
                    String value = record.get(header);
                    final Integer sqlType = metadataRepository.getRepository().getColumnMetadataRepository()
                            .getColumnMetadata(new ColumnReference(tableName[0], header)).getJavaSqlType();
                    paramsForLog.add(value);
                    StatementCreatorUtils.setParameterValue(ps, counter, sqlType, value);
                    counter++;
                }
                sayQuery(sql, paramsForLog);
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    });
    batchStart = batchStartRef.get();
    say("******CHUNK SQLSERVER******");
}