List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger
public AtomicInteger()
From source file:org.attribyte.api.pubsub.impl.server.BroadcastServlet.java
/** * Creates a servlet with a specified maximum body size. * @param endpoint The hub endpoint.//from ww w .ja v a2 s .c o m * @param maxBodyBytes The maximum size of accepted for a notification body. * @param autocreateTopics If <code>true</code>, topics will be automatically created if they do not exist. * @param logger The logger. * @param filters A list of filters to be applied. * @param topicCache A topic cache. * @param replicationTopic A system topic to which all notifications are replicated. May be <code>null</code>. * @param maxSavedNotifications The maximum number of notifications saved in-memory for debugging purposes. * @param jsonEnabled If <code>true</code> a JSON body will be sent with the notification response. */ public BroadcastServlet(final HubEndpoint endpoint, final int maxBodyBytes, final boolean autocreateTopics, final Logger logger, final List<BasicAuthFilter> filters, final Cache<String, Topic> topicCache, final Topic replicationTopic, final int maxSavedNotifications, final boolean jsonEnabled) { this.endpoint = endpoint; this.datastore = endpoint.getDatastore(); this.maxBodyBytes = maxBodyBytes; this.autocreateTopics = autocreateTopics; this.logger = logger; this.filters = filters != null ? ImmutableList.copyOf(filters) : ImmutableList.<BasicAuthFilter>of(); this.topicCache = topicCache; this.replicationTopic = replicationTopic; this.maxSavedNotifications = maxSavedNotifications; this.jsonEnabled = jsonEnabled; final int queueLimit = maxSavedNotifications * 2; final int drainTriggerLimit = queueLimit - maxSavedNotifications / 2; this.recentNotifications = maxSavedNotifications > 0 ? new ArrayBlockingQueue<>(queueLimit) : null; this.recentNotificationsSize = new AtomicInteger(); if (recentNotifications != null) { this.recentNotificationsMonitor = new Thread(new Runnable() { @Override public void run() { while (true) { try { int currSize = recentNotificationsSize.get(); if (currSize >= drainTriggerLimit) { int maxDrained = currSize - maxSavedNotifications; List<NotificationRecord> drain = Lists.newArrayListWithCapacity(maxDrained); int numDrained = recentNotifications.drainTo(drain, maxSavedNotifications); recentNotificationsSize.addAndGet(-1 * numDrained); } else { Thread.sleep(100L); } } catch (InterruptedException ie) { return; } } } }); this.recentNotificationsMonitor.setName("recent-notifications-monitor"); this.recentNotificationsMonitor.setDaemon(true); this.recentNotificationsMonitor.start(); } else { this.recentNotificationsMonitor = null; } }
From source file:io.wcm.caravan.pipeline.impl.JsonPipelineMultipleSubscriptionsTest.java
@SuppressWarnings("unchecked") @Test//from w w w.j ava 2 s . c o m public void subscribeToThreeRelatedPipelineOutputs() { final AtomicInteger subscribeCount = new AtomicInteger(); initPipelines(subscribeCount); Observer<JsonPipelineOutput> firstObserver = Mockito.mock(Observer.class); Observer<JsonPipelineOutput> secondObserver = Mockito.mock(Observer.class); Observer<JsonPipelineOutput> thirdObserver = Mockito.mock(Observer.class); firstStep.getOutput().subscribe(firstObserver); secondStep.getOutput().subscribe(secondObserver); thirdStep.getOutput().subscribe(thirdObserver); assertEquals(1, subscribeCount.get()); }
From source file:com.github.brandtg.switchboard.LogRegionResource.java
private void handleData(String target, List<LogRegion> logRegions, LogRegionResponse response) throws Exception { if (target != null) { final AtomicInteger contentLength = new AtomicInteger(); for (LogRegion logRegion : logRegions) { contentLength.addAndGet((int) (logRegion.getNextFileOffset() - logRegion.getFileOffset())); }/*from w w w. j a v a 2 s. co m*/ String[] hostPort = target.split(":"); InetSocketAddress socketAddress = new InetSocketAddress(hostPort[0], Integer.valueOf(hostPort[1])); bootstrap.connect(socketAddress).addListener(new LogFileSender(logRegions, target)); response.setDataSize(contentLength.get()); } else { Map<Long, String> data = new HashMap<Long, String>(logRegions.size()); for (LogRegion logRegion : logRegions) { data.put(logRegion.getIndex(), Base64.encodeBase64String(logReader.read(logRegion))); } response.setData(data); } }
From source file:net.sf.jasperreports.components.table.fill.FillTable.java
protected void setTableInstanceCounter() { JRFillContext fillerContext = fillContext.getFiller().getFillContext(); AtomicInteger counter = (AtomicInteger) fillerContext.getFillCache(FILL_CACHE_KEY_TABLE_INSTANCE_COUNTER); if (counter == null) { // we just need a mutable integer, there's no actual concurrency here counter = new AtomicInteger(); fillerContext.setFillCache(FILL_CACHE_KEY_TABLE_INSTANCE_COUNTER, counter); }//w ww . j a v a2s . c om int instanceIndex = counter.getAndIncrement(); if (log.isDebugEnabled()) { log.debug("table instance index is " + instanceIndex); } fillSubreport.getTableReport().getBaseReport().setTableInstanceIndex(instanceIndex); }
From source file:org.onehippo.cms7.brokenlinks.LinkChecker.java
private void runCheckerThreads(final Iterable<Link> links) { ConcurrentLinkedQueue<Link> queue = new ConcurrentLinkedQueue<Link>(); for (Link link : links) { queue.add(link);/*w ww .ja v a 2 s .c o m*/ } final int threadCount = Math.min(queue.size(), nrOfThreads); final AtomicInteger internalLinksChecked = new AtomicInteger(); Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threadCount; i++) { threads[i] = new LinkCheckerRunner(queue, internalLinksChecked); threads[i].setUncaughtExceptionHandler(new LogUncaughtExceptionHandler(log)); } for (int i = 0; i < threadCount; i++) { threads[i].start(); } try { for (int i = 0; i < threadCount; i++) { threads[i].join(); } } catch (InterruptedException ex) { // aborted } try { session.refresh(false); } catch (RepositoryException e) { log.warn("Failed to clear the session.", e); } }
From source file:com.alibaba.druid.benckmark.pool.Case3.java
private void p0(final DataSource dataSource, String name, int threadCount) throws Exception { final AtomicInteger count = new AtomicInteger(); final AtomicInteger errorCount = new AtomicInteger(); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(threadCount); for (int i = 0; i < threadCount; ++i) { Thread thread = new Thread() { public void run() { try { startLatch.await();/*www.j ava 2 s . c o m*/ for (int i = 0; i < LOOP_COUNT; ++i) { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { rs.getInt(1); } rs.close(); stmt.close(); conn.close(); count.incrementAndGet(); } } catch (Throwable ex) { errorCount.incrementAndGet(); ex.printStackTrace(); } finally { endLatch.countDown(); } } }; thread.start(); } long startMillis = System.currentTimeMillis(); long startYGC = TestUtil.getYoungGC(); long startFullGC = TestUtil.getFullGC(); startLatch.countDown(); endLatch.await(); long millis = System.currentTimeMillis() - startMillis; long ygc = TestUtil.getYoungGC() - startYGC; long fullGC = TestUtil.getFullGC() - startFullGC; Assert.assertEquals(LOOP_COUNT * threadCount, count.get()); Thread.sleep(1); System.out.println("thread " + threadCount + " " + name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC); }
From source file:org.eclipse.hono.service.registration.impl.FileBasedRegistrationService.java
private void loadRegistrationData() { if (filename != null) { final FileSystem fs = vertx.fileSystem(); log.debug("trying to load device registration information from file {}", filename); if (fs.existsBlocking(filename)) { final AtomicInteger deviceCount = new AtomicInteger(); fs.readFile(filename, readAttempt -> { if (readAttempt.succeeded()) { JsonArray allObjects = new JsonArray(new String(readAttempt.result().getBytes())); for (Object obj : allObjects) { JsonObject tenant = (JsonObject) obj; String tenantId = tenant.getString(FIELD_TENANT); Map<String, JsonObject> deviceMap = new HashMap<>(); for (Object deviceObj : tenant.getJsonArray(ARRAY_DEVICES)) { JsonObject device = (JsonObject) deviceObj; deviceMap.put(device.getString(FIELD_HONO_ID), device.getJsonObject(FIELD_DATA)); deviceCount.incrementAndGet(); }// w w w .ja v a2 s . c o m identities.put(tenantId, deviceMap); } log.info("successfully loaded {} device identities from file [{}]", deviceCount.get(), filename); } else { log.warn("could not load device identities from file [{}]", filename, readAttempt.cause()); } }); } else { log.debug("device identity file {} does not exist (yet)", filename); } } }
From source file:com.marklogic.contentpump.LocalJobRunner.java
/** * Run the job. Get the input splits, create map tasks and submit it to * the thread pool if there is one; otherwise, runs the the task one by * one.//from ww w .j a v a 2 s. co m * * @param <INKEY> * @param <INVALUE> * @param <OUTKEY> * @param <OUTVALUE> * @throws Exception */ @SuppressWarnings("unchecked") public <INKEY, INVALUE, OUTKEY, OUTVALUE, T extends org.apache.hadoop.mapreduce.InputSplit> void run() throws Exception { Configuration conf = job.getConfiguration(); InputFormat<INKEY, INVALUE> inputFormat = (InputFormat<INKEY, INVALUE>) ReflectionUtils .newInstance(job.getInputFormatClass(), conf); List<InputSplit> splits = inputFormat.getSplits(job); T[] array = (T[]) splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]); // sort the splits into order based on size, so that the biggest // goes first Arrays.sort(array, new SplitLengthComparator()); OutputFormat<OUTKEY, OUTVALUE> outputFormat = (OutputFormat<OUTKEY, OUTVALUE>) ReflectionUtils .newInstance(job.getOutputFormatClass(), conf); Class<? extends Mapper<?, ?, ?, ?>> mapperClass = job.getMapperClass(); Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE> mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils .newInstance(mapperClass, conf); try { outputFormat.checkOutputSpecs(job); } catch (Exception ex) { if (LOG.isDebugEnabled()) { LOG.debug("Error checking output specification: ", ex); } else { LOG.error("Error checking output specification: "); LOG.error(ex.getMessage()); } return; } conf = job.getConfiguration(); progress = new AtomicInteger[splits.size()]; for (int i = 0; i < splits.size(); i++) { progress[i] = new AtomicInteger(); } Monitor monitor = new Monitor(); monitor.start(); reporter = new ContentPumpReporter(); List<Future<Object>> taskList = new ArrayList<Future<Object>>(); for (int i = 0; i < array.length; i++) { InputSplit split = array[i]; if (pool != null) { LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE> task = new LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE>( inputFormat, outputFormat, conf, i, split, reporter, progress[i]); availableThreads = assignThreads(i, array.length); Class<? extends Mapper<?, ?, ?, ?>> runtimeMapperClass = job.getMapperClass(); if (availableThreads > 1 && availableThreads != threadsPerSplit) { // possible runtime adjustment if (runtimeMapperClass != (Class) MultithreadedMapper.class) { runtimeMapperClass = (Class<? extends Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>>) cmd .getRuntimeMapperClass(job, mapperClass, threadsPerSplit, availableThreads); } if (runtimeMapperClass != mapperClass) { task.setMapperClass(runtimeMapperClass); } if (runtimeMapperClass == (Class) MultithreadedMapper.class) { task.setThreadCount(availableThreads); if (LOG.isDebugEnabled()) { LOG.debug("Thread Count for Split#" + i + " : " + availableThreads); } } } if (runtimeMapperClass == (Class) MultithreadedMapper.class) { synchronized (pool) { taskList.add(pool.submit(task)); pool.wait(); } } else { pool.submit(task); } } else { // single-threaded JobID jid = new JobID(); TaskID taskId = new TaskID(jid.getJtIdentifier(), jid.getId(), TaskType.MAP, i); TaskAttemptID taskAttemptId = new TaskAttemptID(taskId, 0); TaskAttemptContext context = ReflectionUtil.createTaskAttemptContext(conf, taskAttemptId); RecordReader<INKEY, INVALUE> reader = inputFormat.createRecordReader(split, context); RecordWriter<OUTKEY, OUTVALUE> writer = outputFormat.getRecordWriter(context); OutputCommitter committer = outputFormat.getOutputCommitter(context); TrackingRecordReader trackingReader = new TrackingRecordReader(reader, progress[i]); Mapper.Context mapperContext = ReflectionUtil.createMapperContext(mapper, conf, taskAttemptId, trackingReader, writer, committer, reporter, split); trackingReader.initialize(split, mapperContext); // no thread pool (only 1 thread specified) Class<? extends Mapper<?, ?, ?, ?>> mapClass = job.getMapperClass(); mapperContext.getConfiguration().setClass(CONF_MAPREDUCE_JOB_MAP_CLASS, mapClass, Mapper.class); mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils.newInstance(mapClass, mapperContext.getConfiguration()); mapper.run(mapperContext); trackingReader.close(); writer.close(mapperContext); committer.commitTask(context); } } // wait till all tasks are done if (pool != null) { for (Future<Object> f : taskList) { f.get(); } pool.shutdown(); while (!pool.awaitTermination(1, TimeUnit.DAYS)) ; jobComplete.set(true); } monitor.interrupt(); monitor.join(1000); // report counters Iterator<CounterGroup> groupIt = reporter.counters.iterator(); while (groupIt.hasNext()) { CounterGroup group = groupIt.next(); LOG.info(group.getDisplayName() + ": "); Iterator<Counter> counterIt = group.iterator(); while (counterIt.hasNext()) { Counter counter = counterIt.next(); LOG.info(counter.getDisplayName() + ": " + counter.getValue()); } } LOG.info("Total execution time: " + (System.currentTimeMillis() - startTime) / 1000 + " sec"); }
From source file:com.inmobi.grill.driver.hive.TestRemoteHiveDriver.java
@Test public void testMultiThreadClient() throws Exception { LOG.info("@@ Starting multi thread test"); // Launch two threads createTestTable("test_multithreads"); HiveConf thConf = new HiveConf(conf, TestRemoteHiveDriver.class); thConf.setLong(HiveDriver.GRILL_CONNECTION_EXPIRY_DELAY, 10000); final HiveDriver thrDriver = new HiveDriver(); thrDriver.configure(thConf);// w w w . j av a 2 s .c om QueryContext ctx = new QueryContext("USE " + TestRemoteHiveDriver.class.getSimpleName(), null, conf); thrDriver.execute(ctx); // Launch a select query final int QUERIES = 5; int launchedQueries = 0; final int THREADS = 5; final long POLL_DELAY = 500; List<Thread> thrs = new ArrayList<Thread>(); final AtomicInteger errCount = new AtomicInteger(); for (int q = 0; q < QUERIES; q++) { final QueryContext qctx; try { qctx = new QueryContext("SELECT * FROM test_multithreads", null, conf); thrDriver.executeAsync(qctx); } catch (GrillException e) { errCount.incrementAndGet(); LOG.info(q + " executeAsync error: " + e.getCause()); continue; } LOG.info("@@ Launched query: " + q + " " + qctx.getQueryHandle()); launchedQueries++; // Launch many threads to poll for status final QueryHandle handle = qctx.getQueryHandle(); for (int i = 0; i < THREADS; i++) { int thid = q * THREADS + i; Thread th = new Thread(new Runnable() { @Override public void run() { for (int i = 0; i < 1000; i++) { try { thrDriver.updateStatus(qctx); if (qctx.getDriverStatus().isFinished()) { LOG.info("@@ " + handle.getHandleId() + " >> " + qctx.getDriverStatus().getState()); thrDriver.closeQuery(handle); break; } Thread.sleep(POLL_DELAY); } catch (GrillException e) { LOG.error("Got Exception", e.getCause()); e.printStackTrace(); errCount.incrementAndGet(); break; } catch (InterruptedException e) { e.printStackTrace(); break; } } } }); thrs.add(th); th.setName("Poller#" + (thid)); th.start(); } } for (Thread th : thrs) { try { th.join(10000); } catch (InterruptedException e) { LOG.warn("Not ended yet: " + th.getName()); } } Assert.assertEquals(0, thrDriver.getHiveHandleSize()); LOG.info("@@ Completed all pollers. Total thrift errors: " + errCount.get()); assertEquals(launchedQueries, QUERIES); assertEquals(thrs.size(), QUERIES * THREADS); assertEquals(errCount.get(), 0); }
From source file:com.heliosdecompiler.helios.gui.controller.FileTreeController.java
@FXML public void initialize() { this.rootItem = new TreeItem<>(new TreeNode("[root]")); this.root.setRoot(this.rootItem); this.root.setCellFactory(new TreeCellFactory<>(node -> { if (node.getParent() == null) { ContextMenu export = new ContextMenu(); MenuItem exportItem = new MenuItem("Export"); export.setOnAction(e -> { File file = messageHandler.chooseFile().withInitialDirectory(new File(".")) .withTitle(Message.GENERIC_CHOOSE_EXPORT_LOCATION_JAR.format()) .withExtensionFilter(new FileFilter(Message.FILETYPE_JAVA_ARCHIVE.format(), "*.jar"), true)//from w w w .j av a2s. c o m .promptSave(); OpenedFile openedFile = (OpenedFile) node.getMetadata().get(OpenedFile.OPENED_FILE); Map<String, byte[]> clone = new HashMap<>(openedFile.getContents()); backgroundTaskHelper.submit( new BackgroundTask(Message.TASK_SAVING_FILE.format(node.getDisplayName()), true, () -> { try { if (!file.exists()) { if (!file.createNewFile()) { throw new IOException("Could not create export file"); } } try (ZipOutputStream zipOutputStream = new ZipOutputStream( new FileOutputStream(file))) { for (Map.Entry<String, byte[]> ent : clone.entrySet()) { ZipEntry zipEntry = new ZipEntry(ent.getKey()); zipOutputStream.putNextEntry(zipEntry); zipOutputStream.write(ent.getValue()); zipOutputStream.closeEntry(); } } messageHandler.handleMessage(Message.GENERIC_EXPORTED.format()); } catch (IOException ex) { messageHandler.handleException(Message.ERROR_IOEXCEPTION_OCCURRED.format(), ex); } })); }); export.getItems().add(exportItem); return export; } return null; })); root.addEventHandler(KeyEvent.KEY_RELEASED, event -> { if (event.getCode() == KeyCode.ENTER) { TreeItem<TreeNode> selected = this.root.getSelectionModel().getSelectedItem(); if (selected != null) { if (selected.getChildren().size() != 0) { selected.setExpanded(!selected.isExpanded()); } else { getParentController().getAllFilesViewerController().handleClick(selected.getValue()); } } } }); Tooltip tooltip = new Tooltip(); StringBuilder search = new StringBuilder(); List<TreeItem<TreeNode>> searchContext = new ArrayList<>(); AtomicInteger searchIndex = new AtomicInteger(); root.focusedProperty().addListener((observable, oldValue, newValue) -> { if (!newValue) { tooltip.hide(); search.setLength(0); } }); root.boundsInLocalProperty().addListener((observable, oldValue, newValue) -> { Bounds bounds = root.localToScreen(newValue); tooltip.setAnchorX(bounds.getMinX()); tooltip.setAnchorY(bounds.getMinY()); }); root.addEventHandler(KeyEvent.KEY_PRESSED, event -> { if (tooltip.isShowing() && event.getCode() == KeyCode.UP) { if (searchIndex.decrementAndGet() < 0) { searchIndex.set(searchContext.size() - 1); } } else if (tooltip.isShowing() && event.getCode() == KeyCode.DOWN) { if (searchIndex.incrementAndGet() >= searchContext.size()) { searchIndex.set(0); } } else { return; } event.consume(); root.scrollTo(root.getRow(searchContext.get(searchIndex.get()))); root.getSelectionModel().select(searchContext.get(searchIndex.get())); }); root.addEventHandler(KeyEvent.KEY_TYPED, event -> { if (event.getCharacter().charAt(0) == '\b') { if (search.length() > 0) { search.setLength(search.length() - 1); } } else if (event.getCharacter().charAt(0) == '\u001B') { //esc tooltip.hide(); search.setLength(0); return; } else if (search.length() > 0 || (search.length() == 0 && StringUtils.isAlphanumeric(event.getCharacter()))) { search.append(event.getCharacter()); if (!tooltip.isShowing()) { tooltip.show(root.getScene().getWindow()); } } if (!tooltip.isShowing()) return; String str = search.toString(); tooltip.setText("Search for: " + str); searchContext.clear(); ArrayDeque<TreeItem<TreeNode>> deque = new ArrayDeque<>(); deque.addAll(rootItem.getChildren()); while (!deque.isEmpty()) { TreeItem<TreeNode> item = deque.poll(); if (item.getValue().getDisplayName().contains(str)) { searchContext.add(item); } if (item.isExpanded() && item.getChildren().size() > 0) deque.addAll(item.getChildren()); } searchIndex.set(0); if (searchContext.size() > 0) { root.scrollTo(root.getRow(searchContext.get(0))); root.getSelectionModel().select(searchContext.get(0)); } }); openedFileController.loadedFiles().addListener((MapChangeListener<String, OpenedFile>) change -> { if (change.getValueAdded() != null) { updateTree(change.getValueAdded()); } if (change.getValueRemoved() != null) { this.rootItem.getChildren() .removeIf(ti -> ti.getValue().equals(change.getValueRemoved().getRoot())); } }); }