List of usage examples for java.util.concurrent.atomic AtomicInteger get
public final int get()
From source file:nl.knaw.huygens.alexandria.exporter.LaTeXExporter.java
private void connectTextNodes(StringBuilder latexBuilder, AtomicInteger textNodeCounter) { latexBuilder.append("\n % connect TextNodes\n \\graph{").append("(doc)"); for (int i = 0; i < textNodeCounter.get(); i++) { latexBuilder.append(" -> (tn").append(i).append(")"); }/*from w w w.jav a 2 s .co m*/ latexBuilder.append("};\n"); }
From source file:org.apache.flink.api.java.record.ReduceWrappingFunctionTest.java
@SuppressWarnings("unchecked") @Test//from w w w .ja va2s . c om public void testWrappedReduceObject() { try { AtomicInteger methodCounter = new AtomicInteger(); ReduceOperator reduceOp = ReduceOperator.builder(new TestReduceFunction(methodCounter)).build(); RichFunction reducer = (RichFunction) reduceOp.getUserCodeWrapper().getUserCodeObject(); // test the method invocations reducer.close(); reducer.open(new Configuration()); assertEquals(2, methodCounter.get()); // prepare the reduce / combine tests final List<Record> target = new ArrayList<Record>(); Collector<Record> collector = new Collector<Record>() { @Override public void collect(Record record) { target.add(record); } @Override public void close() { } }; List<Record> source = new ArrayList<Record>(); source.add(new Record(new IntValue(42), new LongValue(11))); source.add(new Record(new IntValue(13), new LongValue(17))); // test reduce ((GroupReduceFunction<Record, Record>) reducer).reduce(source, collector); assertEquals(2, target.size()); assertEquals(new IntValue(42), target.get(0).getField(0, IntValue.class)); assertEquals(new LongValue(11), target.get(0).getField(1, LongValue.class)); assertEquals(new IntValue(13), target.get(1).getField(0, IntValue.class)); assertEquals(new LongValue(17), target.get(1).getField(1, LongValue.class)); target.clear(); // test combine ((GroupCombineFunction<Record, Record>) reducer).combine(source, collector); assertEquals(2, target.size()); assertEquals(new IntValue(42), target.get(0).getField(0, IntValue.class)); assertEquals(new LongValue(11), target.get(0).getField(1, LongValue.class)); assertEquals(new IntValue(13), target.get(1).getField(0, IntValue.class)); assertEquals(new LongValue(17), target.get(1).getField(1, LongValue.class)); target.clear(); // test the serialization SerializationUtils.clone((java.io.Serializable) reducer); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
From source file:dk.statsbiblioteket.util.JobControllerTest.java
public void testEmptyPop() throws Exception { final AtomicInteger counter = new AtomicInteger(0); JobController<Long> controller = new JobController<Long>(10) { @Override/* w w w. j a v a 2 s.co m*/ protected void afterExecute(Future<Long> finished) { counter.incrementAndGet(); } }; assertTrue("Popping on empty should return empty list", controller.popAll(10, TimeUnit.MILLISECONDS).isEmpty()); assertEquals("The callback count should zero on empty controller", 0, counter.get()); }
From source file:com.googlecode.msidor.springframework.integration.channel.ConcurentOrderedMultiQueueChannel.java
/** * Adds message to the queue./* ww w . j a v a 2s . c om*/ * This method blocks if there is no space. * @param message to be added * @param timeout after which the method awakes from waiting for free space (0 for no timeout) * @return true if message was successfully added to queue */ @Override protected boolean doSend(Message<?> message, long timeout) { Assert.notNull(message, "'message' must not be null"); log.trace("Sending message " + message); long nanos = TimeUnit.MILLISECONDS.toNanos(timeout); int c = -1; final ReentrantLock lock = this.objectLock; final AtomicInteger count = this.count; try { //lock the object exclusively lock.lockInterruptibly(); while (count.get() == totalCapacity) { //if timeout was set and has elapsed if (nanos <= 0 && timeout > 0) return false; //wait for notification when any message has been handled if (timeout > 0) { nanos = notFull.awaitNanos(nanos); } else { notFull.await(); } } //add message to the queue addMessage(message); c = count.getAndIncrement(); //if there is still some space notify any other potentially dormant producer thread if (c + 1 < totalCapacity) notFull.signal(); } catch (InterruptedException e) { log.trace("Lock interrupted by other thread"); } finally { //notify potentially dormant consumer thread that there is a message to handle newMessagesToCheck.signal(); lock.unlock(); } return true; }
From source file:com.nearinfinity.blur.thrift.AsyncClientPool.java
private TAsyncClient newClient(Class<?> c, Connection connection) throws InterruptedException { BlockingQueue<TAsyncClient> blockingQueue = getQueue(connection); TAsyncClient client = blockingQueue.poll(); if (client != null) { return client; }/*from w w w .j a v a 2 s . c o m*/ AtomicInteger counter; synchronized (_numberOfConnections) { counter = _numberOfConnections.get(connection.getHost()); if (counter == null) { counter = new AtomicInteger(); _numberOfConnections.put(connection.getHost(), counter); } } synchronized (counter) { int numOfConnections = counter.get(); while (numOfConnections >= _maxConnectionsPerHost) { client = blockingQueue.poll(_pollTime, TimeUnit.MILLISECONDS); if (client != null) { return client; } LOG.debug("Waiting for client number of connection [" + numOfConnections + "], max connection per host [" + _maxConnectionsPerHost + "]"); numOfConnections = counter.get(); } LOG.info("Creating a new client for [" + connection + "]"); String name = c.getName(); Constructor<?> constructor = _constructorCache.get(name); if (constructor == null) { String clientClassName = name.replace("$AsyncIface", "$AsyncClient"); try { Class<?> clazz = Class.forName(clientClassName); constructor = clazz.getConstructor(new Class[] { TProtocolFactory.class, TAsyncClientManager.class, TNonblockingTransport.class }); _constructorCache.put(name, constructor); } catch (Exception e) { throw new RuntimeException(e); } } try { TNonblockingSocket transport = newTransport(connection); client = (TAsyncClient) constructor .newInstance(new Object[] { _protocolFactory, _clientManager, transport }); client.setTimeout(_timeout); counter.incrementAndGet(); return client; } catch (Exception e) { throw new RuntimeException(e); } } }
From source file:org.apache.hadoop.hbase.regionserver.TestRegionReplicaFailover.java
/** * Tests the case where there are 3 region replicas and the primary is continuously accepting * new writes while one of the secondaries is killed. Verification is done for both of the * secondary replicas.//from w w w .j ava 2 s . c om */ @Test(timeout = 120000) public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(htd.getTableName()); Admin admin = connection.getAdmin()) { // start a thread to do the loading of primary HTU.loadNumericRows(table, fam, 0, 1000); // start with some base admin.flush(table.getName()); HTU.loadNumericRows(table, fam, 1000, 2000); final AtomicReference<Throwable> ex = new AtomicReference<Throwable>(null); final AtomicBoolean done = new AtomicBoolean(false); final AtomicInteger key = new AtomicInteger(2000); Thread loader = new Thread() { @Override public void run() { while (!done.get()) { try { HTU.loadNumericRows(table, fam, key.get(), key.get() + 1000); key.addAndGet(1000); } catch (Throwable e) { ex.compareAndSet(null, e); } } } }; loader.start(); Thread aborter = new Thread() { @Override public void run() { try { boolean aborted = false; for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) { for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) { if (r.getRegionInfo().getReplicaId() == 1) { LOG.info("Aborting region server hosting secondary region replica"); rs.getRegionServer().abort("for test"); aborted = true; } } } assertTrue(aborted); } catch (Throwable e) { ex.compareAndSet(null, e); } }; }; aborter.start(); aborter.join(); done.set(true); loader.join(); assertNull(ex.get()); assertTrue(key.get() > 1000); // assert that the test is working as designed LOG.info("Loaded up to key :" + key.get()); verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 0, 30000); verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 1, 30000); verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 2, 30000); } // restart the region server HTU.getMiniHBaseCluster().startRegionServer(); }
From source file:org.apache.flink.api.java.record.CoGroupWrappingFunctionTest.java
@SuppressWarnings("unchecked") @Test//from w w w.ja v a 2 s.co m public void testWrappedCoGroupObject() { try { AtomicInteger methodCounter = new AtomicInteger(); CoGroupOperator coGroupOp = CoGroupOperator .builder(new TestCoGroupFunction(methodCounter), LongValue.class, 1, 2).build(); RichFunction cogrouper = (RichFunction) coGroupOp.getUserCodeWrapper().getUserCodeObject(); // test the method invocations cogrouper.close(); cogrouper.open(new Configuration()); assertEquals(2, methodCounter.get()); // prepare the coGroup final List<Record> target = new ArrayList<Record>(); Collector<Record> collector = new Collector<Record>() { @Override public void collect(Record record) { target.add(record); } @Override public void close() { } }; List<Record> source1 = new ArrayList<Record>(); source1.add(new Record(new IntValue(42))); source1.add(new Record(new IntValue(13))); List<Record> source2 = new ArrayList<Record>(); source2.add(new Record(new LongValue(11))); source2.add(new Record(new LongValue(17))); // test coGroup ((org.apache.flink.api.common.functions.CoGroupFunction<Record, Record, Record>) cogrouper) .coGroup(source1, source2, collector); assertEquals(4, target.size()); assertEquals(new IntValue(42), target.get(0).getField(0, IntValue.class)); assertEquals(new IntValue(13), target.get(1).getField(0, IntValue.class)); assertEquals(new LongValue(11), target.get(2).getField(0, LongValue.class)); assertEquals(new LongValue(17), target.get(3).getField(0, LongValue.class)); target.clear(); // test the serialization SerializationUtils.clone((java.io.Serializable) cogrouper); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
From source file:lh.api.showcase.server.util.HttpQueryUtils.java
public static String executeQuery(URI uri, ApiAuth apiAuth, HasProxySettings proxySetting, HttpClientFactory httpClientFactory, final int maxRetries) throws HttpErrorResponseException { //logger.info("uri: " + uri.toString()); AtomicInteger tryCounter = new AtomicInteger(0); while (true) { CloseableHttpClient httpclient = httpClientFactory.getHttpClient(proxySetting); HttpGet httpGet = new HttpGet(uri); httpGet.addHeader("Authorization", apiAuth.getAuthHeader()); httpGet.addHeader("Accept", "application/json"); //logger.info("auth: " + apiAuth.getAuthHeader()) ; //logger.info("query: " + httpGet.toString()); CloseableHttpResponse response = null; try {/*from w ww . ja v a2 s . com*/ response = httpclient.execute(httpGet); StatusLine status = response.getStatusLine(); BufferedHttpEntity entity = new BufferedHttpEntity(response.getEntity()); String json = IOUtils.toString(entity.getContent(), "UTF8"); EntityUtils.consume(entity); //logger.info("response: " + json); // check for errors if (status != null && status.getStatusCode() > 299) { if (status.getStatusCode() == 401) { // token has probably expired logger.info("Authentication Error. Token will be refreshed"); if (tryCounter.getAndIncrement() < maxRetries) { if (apiAuth.updateAccessToken()) { logger.info("Token successfully refreshed"); // we retry with the new token logger.info("Retry number " + tryCounter.get()); continue; } } } throw new HttpErrorResponseException(status.getStatusCode(), status.getReasonPhrase(), json); } return json; } catch (IOException e) { logger.severe(e.getMessage()); break; } finally { try { if (response != null) { response.close(); } } catch (IOException e) { logger.log(Level.SEVERE, e.getMessage()); } } } return null; }
From source file:com.taobao.pushit.server.listener.ConnectionNumberListener.java
/** * , //from w w w.j a v a 2 s . c o m */ public void onConnectionCreated(Connection conn) { // IP String remoteIp = this.getRemoteIp(conn); try { // IPserver, AtomicInteger connNum = this.connectionIpNumMap.get(remoteIp); if (connNum == null) { AtomicInteger newConnNum = new AtomicInteger(0); AtomicInteger oldConnNum = this.connectionIpNumMap.putIfAbsent(remoteIp, newConnNum); if (oldConnNum != null) { connNum = oldConnNum; } else { connNum = newConnNum; } } connNum.incrementAndGet(); // , if (isOverflow || connNum.get() > this.connThreshold) { // log.warn("pushit-server, , :" + connNum.get() + ",:" + this.connThreshold); conn.close(false); } } catch (NotifyRemotingException e) { log.error(", remoteIp=" + remoteIp, e); } catch (Exception e) { log.error(", remoteIp=" + remoteIp, e); } }
From source file:com.github.brandtg.switchboard.LogRegionResource.java
private void handleData(String target, List<LogRegion> logRegions, LogRegionResponse response) throws Exception { if (target != null) { final AtomicInteger contentLength = new AtomicInteger(); for (LogRegion logRegion : logRegions) { contentLength.addAndGet((int) (logRegion.getNextFileOffset() - logRegion.getFileOffset())); }//from w w w . j a v a 2 s. c o m String[] hostPort = target.split(":"); InetSocketAddress socketAddress = new InetSocketAddress(hostPort[0], Integer.valueOf(hostPort[1])); bootstrap.connect(socketAddress).addListener(new LogFileSender(logRegions, target)); response.setDataSize(contentLength.get()); } else { Map<Long, String> data = new HashMap<Long, String>(logRegions.size()); for (LogRegion logRegion : logRegions) { data.put(logRegion.getIndex(), Base64.encodeBase64String(logReader.read(logRegion))); } response.setData(data); } }