Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.jasig.ssp.util.importer.job.staging.SqlServerStagingTableWriter.java

@Override
public void write(final List<? extends RawItem> items) {

    NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(dataSource);
    String fileName = items.get(0).getResource().getFilename();
    final String[] tableName = fileName.split("\\.");

    Integer batchStart = (Integer) (stepExecution.getExecutionContext().get("batchStart") == null ? null
            : stepExecution.getExecutionContext().get("batchStart"));
    Integer batchStop = (Integer) (stepExecution.getExecutionContext().get("batchStop") == null ? null
            : stepExecution.getExecutionContext().get("batchStop"));
    Object currentEntity = stepExecution.getExecutionContext().get("currentEntity");

    if (currentEntity == null || !currentEntity.equals(tableName[0])) {
        batchStart = 0;//  w  w  w .  j a  v a  2  s. c  om
        batchStop = items.size() - 1;
        currentEntity = tableName[0];
        stepExecution.getExecutionContext().put("currentEntity", currentEntity);
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    } else {
        batchStart = batchStop + 1;
        batchStop = (Integer) batchStart + items.size() - 1;
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    }

    RawItem firstItem = items.get(0);
    Resource firstItemResource = firstItem.getResource();

    if (currentResource == null || !(this.currentResource.equals(firstItemResource))) {
        this.orderedHeaders = writeHeader(firstItem);
        this.currentResource = firstItemResource;
    }

    StringBuilder insertSql = new StringBuilder();
    insertSql.append("INSERT INTO stg_" + tableName[0] + " (batch_id,");
    StringBuilder valuesSqlBuilder = new StringBuilder();
    valuesSqlBuilder.append(" VALUES (?,");
    for (String header : this.orderedHeaders) {
        insertSql.append(header).append(",");
        valuesSqlBuilder.append("?").append(",");
    }
    insertSql.setLength(insertSql.length() - 1); // trim comma
    valuesSqlBuilder.setLength(valuesSqlBuilder.length() - 1); // trim comma
    insertSql.append(")");
    valuesSqlBuilder.append(");");
    insertSql.append(valuesSqlBuilder);

    final AtomicInteger batchStartRef = new AtomicInteger(batchStart);
    final String sql = insertSql.toString();
    jdbcTemplate.getJdbcOperations().execute(sql, new PreparedStatementCallback() {
        @Override
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
            for (RawItem item : items) {
                final List<Object> paramsForLog = new ArrayList(orderedHeaders.length);
                int counter = 1;
                paramsForLog.add(batchStartRef.get());
                StatementCreatorUtils.setParameterValue(ps, counter, SqlTypeValue.TYPE_UNKNOWN,
                        batchStartRef.getAndIncrement());
                counter++;
                for (String header : orderedHeaders) {
                    final Map<String, String> record = item.getRecord();
                    String value = record.get(header);
                    final Integer sqlType = metadataRepository.getRepository().getColumnMetadataRepository()
                            .getColumnMetadata(new ColumnReference(tableName[0], header)).getJavaSqlType();
                    paramsForLog.add(value);
                    StatementCreatorUtils.setParameterValue(ps, counter, sqlType, value);
                    counter++;
                }
                sayQuery(sql, paramsForLog);
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    });
    batchStart = batchStartRef.get();
    say("******CHUNK SQLSERVER******");
}

From source file:com.opengamma.financial.currency.AbstractCurrencyMatrix.java

protected void addConversion(final Currency source, final Currency target, final CurrencyMatrixValue rate) {
    ArgumentChecker.notNull(source, "source");
    ArgumentChecker.notNull(target, "target");
    ArgumentChecker.notNull(rate, "rate");
    ConcurrentHashMap<Currency, CurrencyMatrixValue> conversions = _values.get(source);
    if (conversions == null) {
        conversions = new ConcurrentHashMap<Currency, CurrencyMatrixValue>();
        final ConcurrentHashMap<Currency, CurrencyMatrixValue> newConversions = _values.putIfAbsent(source,
                conversions);/*w ww . j a  v  a  2  s .c  om*/
        if (newConversions != null) {
            conversions = newConversions;
        }
    }
    if (conversions.put(target, rate) == null) {
        // Added something to the map, so increase the target's reference count
        AtomicInteger targetCount = _targets.get(target);
        if (targetCount == null) {
            targetCount = new AtomicInteger(1);
            targetCount = _targets.putIfAbsent(target, targetCount);
            if (targetCount != null) {
                // Another thread already inserted the reference count
                if (targetCount.incrementAndGet() == 1) {
                    // Another thread may have removed the last reference, confirm and re-insert atomically against "remove"
                    synchronized (targetCount) {
                        if (targetCount.get() > 0) {
                            _targets.putIfAbsent(target, targetCount);
                        }
                    }
                }
            }
        } else {
            if (targetCount.incrementAndGet() == 1) {
                // Another thread may have removed the last reference, confirm and re-insert atomically against "remove"
                synchronized (targetCount) {
                    if (targetCount.get() > 0) {
                        _targets.putIfAbsent(target, targetCount);
                    }
                }
            }
        }
    }
}

From source file:com.twitter.distributedlog.config.TestConfigurationSubscription.java

@Test(timeout = 60000)
public void testExceptionInConfigLoad() throws Exception {
    PropertiesWriter writer = new PropertiesWriter();
    writer.setProperty("prop1", "1");
    writer.save();/* w  w w. j a v  a 2 s .co m*/

    DeterministicScheduler mockScheduler = new DeterministicScheduler();
    FileConfigurationBuilder builder = new PropertiesConfigurationBuilder(writer.getFile().toURI().toURL());
    ConcurrentConstConfiguration conf = new ConcurrentConstConfiguration(new DistributedLogConfiguration());
    List<FileConfigurationBuilder> fileConfigBuilders = Lists.newArrayList(builder);
    ConfigurationSubscription confSub = new ConfigurationSubscription(conf, fileConfigBuilders, mockScheduler,
            100, TimeUnit.MILLISECONDS);

    final AtomicInteger count = new AtomicInteger(1);
    conf.addConfigurationListener(new ConfigurationListener() {
        @Override
        public void configurationChanged(ConfigurationEvent event) {
            LOG.info("config changed {}", event);
            // Throw after so we actually see the update anyway.
            if (!event.isBeforeUpdate()) {
                count.getAndIncrement();
                throw new RuntimeException("config listener threw and exception");
            }
        }
    });

    int i = 0;
    int initial = 0;
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }

    initial = count.get();
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.distributedlog.common.config.TestConfigurationSubscription.java

@Test(timeout = 60000)
public void testExceptionInConfigLoad() throws Exception {
    PropertiesWriter writer = new PropertiesWriter();
    writer.setProperty("prop1", "1");
    writer.save();//from ww  w.  j  av  a2 s . com

    DeterministicScheduler mockScheduler = new DeterministicScheduler();
    FileConfigurationBuilder builder = new PropertiesConfigurationBuilder(writer.getFile().toURI().toURL());
    ConcurrentConstConfiguration conf = new ConcurrentConstConfiguration(new CompositeConfiguration());
    List<FileConfigurationBuilder> fileConfigBuilders = Lists.newArrayList(builder);
    ConfigurationSubscription confSub = new ConfigurationSubscription(conf, fileConfigBuilders, mockScheduler,
            100, TimeUnit.MILLISECONDS);

    final AtomicInteger count = new AtomicInteger(1);
    conf.addConfigurationListener(new ConfigurationListener() {
        @Override
        public void configurationChanged(ConfigurationEvent event) {
            LOG.info("config changed {}", event);
            // Throw after so we actually see the update anyway.
            if (!event.isBeforeUpdate()) {
                count.getAndIncrement();
                throw new RuntimeException("config listener threw and exception");
            }
        }
    });

    int i = 0;
    int initial = 0;
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }

    initial = count.get();
    while (count.get() == initial) {
        writer.setProperty("prop1", Integer.toString(i++));
        writer.save();
        mockScheduler.tick(100, TimeUnit.MILLISECONDS);
    }
}

From source file:com.flipkart.bifrost.CommunicationTest.java

@Test
public void testSendReceive() throws Exception {
    ObjectMapper mapper = new ObjectMapper();
    mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
    mapper.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);

    Connection connection = new Connection(Lists.newArrayList("localhost"), "guest", "guest");
    connection.start();/*from  w  w w. ja  v  a  2  s.c  o m*/

    BifrostExecutor<Void> executor = BifrostExecutor.<Void>builder(TestAction.class).connection(connection)
            .objectMapper(mapper).requestQueue("bifrost-send").responseQueue("bifrost-recv").concurrency(10)
            .executorService(Executors.newFixedThreadPool(10)).build();

    BifrostRemoteCallExecutionServer<Void> executionServer = BifrostRemoteCallExecutionServer
            .<Void>builder(TestAction.class).objectMapper(mapper).connection(connection).concurrency(10)
            .requestQueue("bifrost-send").build();
    executionServer.start();

    long startTime = System.currentTimeMillis();
    AtomicInteger counter = new AtomicInteger(0);
    int requestCount = 100;
    CompletionService<Void> ecs = new ExecutorCompletionService<>(Executors.newFixedThreadPool(50));
    List<Future<Void>> futures = Lists.newArrayListWithCapacity(requestCount);
    for (int i = 0; i < requestCount; i++) {
        futures.add(ecs.submit(new ServiceCaller(executor, counter)));
    }
    for (int i = 0; i < requestCount; i++) {
        try {
            ecs.take().get();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
    }
    System.out.println(
            String.format("Completed: %d in %d ms", counter.get(), (System.currentTimeMillis() - startTime)));
    executor.shutdown();
    executionServer.stop();
    connection.stop();

    Assert.assertEquals(requestCount, counter.get());
}

From source file:org.apache.solr.client.solrj.impl.HttpClientUtilTest.java

@Test
public void testReplaceConfigurer() throws IOException {

    try {/*from   ww w  .  jav  a  2 s .c o  m*/
        final AtomicInteger counter = new AtomicInteger();
        HttpClientConfigurer custom = new HttpClientConfigurer() {
            @Override
            public void configure(DefaultHttpClient httpClient, SolrParams config) {
                super.configure(httpClient, config);
                counter.set(config.getInt("custom-param", -1));
            }

        };

        HttpClientUtil.setConfigurer(custom);

        ModifiableSolrParams params = new ModifiableSolrParams();
        params.set("custom-param", 5);
        HttpClientUtil.createClient(params).close();
        assertEquals(5, counter.get());
    } finally {
        //restore default configurer
        HttpClientUtil.setConfigurer(new HttpClientConfigurer());
    }

}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureSchedulerConcurrency.java

/**
 * Verify that "write" operations for a single table are serialized,
 * but different tables can be executed in parallel.
 *///  w  w w. ja  va2s. com
@Test(timeout = 60000)
public void testConcurrentWriteOps() throws Exception {
    final TestTableProcSet procSet = new TestTableProcSet(queue);

    final int NUM_ITEMS = 10;
    final int NUM_TABLES = 4;
    final AtomicInteger opsCount = new AtomicInteger(0);
    for (int i = 0; i < NUM_TABLES; ++i) {
        TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
        for (int j = 1; j < NUM_ITEMS; ++j) {
            procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
                    TableProcedureInterface.TableOperationType.EDIT));
            opsCount.incrementAndGet();
        }
    }
    assertEquals(opsCount.get(), queue.size());

    final Thread[] threads = new Thread[NUM_TABLES * 2];
    final HashSet<TableName> concurrentTables = new HashSet<TableName>();
    final ArrayList<String> failures = new ArrayList<String>();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (opsCount.get() > 0) {
                    try {
                        Procedure proc = procSet.acquire();
                        if (proc == null) {
                            queue.signalAll();
                            if (opsCount.get() > 0) {
                                continue;
                            }
                            break;
                        }

                        TableName tableId = procSet.getTableName(proc);
                        synchronized (concurrentTables) {
                            assertTrue("unexpected concurrency on " + tableId, concurrentTables.add(tableId));
                        }
                        assertTrue(opsCount.decrementAndGet() >= 0);
                        try {
                            long procId = proc.getProcId();
                            int concurrent = concurrentCount.incrementAndGet();
                            assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES,
                                    concurrent >= 1 && concurrent <= NUM_TABLES);
                            LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            Thread.sleep(2000);
                            concurrent = concurrentCount.decrementAndGet();
                            LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
                        } finally {
                            synchronized (concurrentTables) {
                                assertTrue(concurrentTables.remove(tableId));
                            }
                            procSet.release(proc);
                        }
                    } catch (Throwable e) {
                        LOG.error("Failed " + e.getMessage(), e);
                        synchronized (failures) {
                            failures.add(e.getMessage());
                        }
                    } finally {
                        queue.signalAll();
                    }
                }
            }
        };
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    assertTrue(failures.toString(), failures.isEmpty());
    assertEquals(0, opsCount.get());
    assertEquals(0, queue.size());

    for (int i = 1; i <= NUM_TABLES; ++i) {
        final TableName table = TableName.valueOf(String.format("testtb-%04d", i));
        final TestTableProcedure dummyProc = new TestTableProcedure(100, table,
                TableProcedureInterface.TableOperationType.DELETE);
        assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table, dummyProc));
    }
}

From source file:org.jasig.ssp.util.importer.job.staging.PostgresStagingTableWriter.java

@Override
public void write(final List<? extends RawItem> items) {

    NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(dataSource);
    String fileName = items.get(0).getResource().getFilename();
    final String[] tableName = fileName.split("\\.");

    Integer batchStart = (Integer) (stepExecution.getExecutionContext().get("batchStart") == null ? null
            : stepExecution.getExecutionContext().get("batchStart"));
    Integer batchStop = (Integer) (stepExecution.getExecutionContext().get("batchStop") == null ? null
            : stepExecution.getExecutionContext().get("batchStop"));
    Object currentEntity = stepExecution.getExecutionContext().get("currentEntity");

    if (currentEntity == null || !currentEntity.equals(tableName[0])) {
        batchStart = 0;//from   w  w  w .  j a  v  a2 s.c om
        batchStop = items.size() - 1;
        currentEntity = tableName[0];
        stepExecution.getExecutionContext().put("currentEntity", currentEntity);
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    } else {
        batchStart = batchStop + 1;
        batchStop = (Integer) batchStart + items.size() - 1;
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    }

    RawItem firstItem = items.get(0);
    Resource firstItemResource = firstItem.getResource();

    if (currentResource == null || !(this.currentResource.equals(firstItemResource))) {
        this.orderedHeaders = writeHeader(firstItem);
        this.currentResource = firstItemResource;
    }

    StringBuilder insertSql = new StringBuilder();
    insertSql.append("INSERT INTO stg_" + tableName[0] + " (batch_id,");
    StringBuilder valuesSqlBuilder = new StringBuilder();
    valuesSqlBuilder.append(" VALUES (?,");
    for (String header : this.orderedHeaders) {
        insertSql.append(header).append(",");
        valuesSqlBuilder.append("?").append(",");
    }
    insertSql.setLength(insertSql.length() - 1); // trim comma
    valuesSqlBuilder.setLength(valuesSqlBuilder.length() - 1); // trim comma
    insertSql.append(")");
    valuesSqlBuilder.append(");");
    insertSql.append(valuesSqlBuilder);

    final AtomicInteger batchStartRef = new AtomicInteger(batchStart);
    final String sql = insertSql.toString();

    jdbcTemplate.getJdbcOperations().execute(sql, new PreparedStatementCallback() {
        @Override
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
            for (RawItem item : items) {
                final List<Object> paramsForLog = new ArrayList(orderedHeaders.length);
                int counter = 1;
                paramsForLog.add(batchStartRef.get());
                StatementCreatorUtils.setParameterValue(ps, counter, SqlTypeValue.TYPE_UNKNOWN,
                        batchStartRef.getAndIncrement());
                counter++;
                for (String header : orderedHeaders) {
                    final Map<String, String> record = item.getRecord();
                    String value = record.get(header);
                    final Integer sqlType = metadataRepository.getRepository().getColumnMetadataRepository()
                            .getColumnMetadata(new ColumnReference(tableName[0], header)).getJavaSqlType();
                    paramsForLog.add(value);
                    StatementCreatorUtils.setParameterValue(ps, counter, sqlType, value);
                    counter++;
                }
                sayQuery(sql, paramsForLog);
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    });

    batchStart = batchStartRef.get();

    say("******CHUNK POSTGRES******");
}

From source file:dk.statsbiblioteket.util.JobControllerTest.java

public void testRemoveCallback() throws Exception {
    final int JOBS = 10;
    final AtomicInteger counter = new AtomicInteger(0);
    JobController<Long> controller = new JobController<Long>(10) {
        @Override// ww  w .  j  a  va2 s.  c o  m
        protected void afterExecute(Future<Long> finished) {
            counter.incrementAndGet();
        }
    };
    for (int i = 0; i < JOBS; i++) {
        controller.submit(new Shout(10));
    }
    synchronized (Thread.currentThread()) {
        Thread.currentThread().wait(100);
    }
    assertEquals("The number of pops should match", JOBS, controller.popAll().size());
    assertEquals("The number of callbacks should match", JOBS, counter.get());
}

From source file:dk.statsbiblioteket.util.JobControllerTest.java

public void TestAutoEmptyMultiPoll() throws InterruptedException {
    final int JOBS = 10;
    final AtomicInteger counter = new AtomicInteger(0);
    JobController<Long> controller = new JobController<Long>(10, true) {
        @Override//from   ww  w . ja  va 2  s  .co m
        protected void afterExecute(Future<Long> finished) {
            counter.incrementAndGet();
        }
    };
    for (int i = 0; i < JOBS; i++) {
        controller.submit(new Shout(JOBS / 4));
        synchronized (Thread.currentThread()) {
            Thread.currentThread().wait(JOBS / 10);
        }
    }
    int popped = controller.popAll().size();
    assertEquals("The auto removed count should be all the jobs", JOBS, counter.get());
    assertEquals("The JobController should be empty", 0, controller.getTaskCount());
    assertTrue("The number of explicit popped jobs should be > 0 and < " + JOBS + " but was " + popped,
            popped > 0 && popped < JOBS);
}