List of usage examples for java.util.concurrent.atomic AtomicInteger get
public final int get()
From source file:org.eclipse.hono.service.credentials.impl.FileBasedCredentialsService.java
protected void saveToFile(final Future<Void> writeResult) { if (!dirty) { log.trace("credentials registry does not need to be persisted"); return;/*from w ww . j av a 2s .co m*/ } final FileSystem fs = vertx.fileSystem(); if (!fs.existsBlocking(filename)) { fs.createFileBlocking(filename); } final AtomicInteger idCount = new AtomicInteger(); JsonArray tenants = new JsonArray(); for (Entry<String, Map<String, JsonArray>> entry : credentials.entrySet()) { JsonArray credentialsArray = new JsonArray(); for (Entry<String, JsonArray> credentialEntry : entry.getValue().entrySet()) { // authId -> full json attributes object JsonArray singleAuthIdCredentials = credentialEntry.getValue(); // from one authId credentialsArray.add(singleAuthIdCredentials); idCount.incrementAndGet(); } tenants.add( new JsonObject().put(FIELD_TENANT, entry.getKey()).put(ARRAY_CREDENTIALS, credentialsArray)); } fs.writeFile(filename, Buffer.factory.buffer(tenants.encodePrettily()), writeAttempt -> { if (writeAttempt.succeeded()) { dirty = false; log.trace("successfully wrote {} credentials to file {}", idCount.get(), filename); writeResult.complete(); } else { log.warn("could not write credentials to file {}", filename, writeAttempt.cause()); writeResult.fail(writeAttempt.cause()); } }); }
From source file:org.jspresso.hrsample.backend.JspressoUnitOfWorkTest.java
/** * Tests in TX collection element update with // optimistic locking. *///from w w w .j a va2 s . c o m @Test public void testInTXCollectionElementUpdate() { final HibernateBackendController hbc = (HibernateBackendController) getBackendController(); final AtomicInteger countDown = new AtomicInteger(10); ExecutorService es = Executors.newFixedThreadPool(countDown.get()); List<Future<Set<String>>> futures = new ArrayList<Future<Set<String>>>(); for (int t = countDown.intValue(); t > 0; t--) { futures.add(es.submit(new Callable<Set<String>>() { @Override public Set<String> call() throws Exception { final HibernateBackendController threadHbc = getApplicationContext() .getBean("applicationBackController", HibernateBackendController.class); final TransactionTemplate threadTT = threadHbc.getTransactionTemplate(); threadHbc.start(hbc.getLocale(), hbc.getClientTimeZone()); threadHbc.setApplicationSession(hbc.getApplicationSession()); BackendControllerHolder.setThreadBackendController(threadHbc); return threadTT.execute(new TransactionCallback<Set<String>>() { /** * {@inheritDoc} */ @Override public Set<String> doInTransaction(TransactionStatus status) { DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class); Set<String> names = new HashSet<String>(); Company c = (Company) compCrit.getExecutableCriteria(threadHbc.getHibernateSession()) .list().iterator().next(); synchronized (countDown) { countDown.decrementAndGet(); // wait for all threads to arrive here so that we are sure they // have all read the same data. try { countDown.wait(); } catch (InterruptedException ex) { throw new BackendException("Test has been interrupted"); } } if (c.getName().startsWith("TX_")) { throw new BackendException("Wrong data read from DB"); } c.setName("TX_" + Long.toHexString(System.currentTimeMillis())); names.add(c.getName()); for (Department d : c.getDepartments()) { d.setName(Long.toHexString(System.currentTimeMillis())); names.add(d.getName()); } return names; } }); } })); } while (countDown.get() > 0) { try { Thread.sleep(200); } catch (InterruptedException ex) { throw new BackendException("Test has been interrupted"); } } synchronized (countDown) { countDown.notifyAll(); } int successfullTxCount = 0; Set<String> names = new HashSet<String>(); for (Future<Set<String>> f : futures) { try { names = f.get(); successfullTxCount++; } catch (Exception ex) { if (ex.getCause() instanceof OptimisticLockingFailureException) { // safely ignore since this is what we are testing. } else { throw new BackendException(ex); } } } es.shutdown(); assertTrue("Only 1 TX succeeded", successfullTxCount == 1); DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class); Company c = hbc.findFirstByCriteria(compCrit, EMergeMode.MERGE_LAZY, Company.class); assertTrue("the company name is the one of the successfull TX", names.contains(c.getName())); for (Department d : c.getDepartments()) { assertTrue("the department name is the one of the successfull TX", names.contains(d.getName())); } }
From source file:com.heliosdecompiler.bootstrapper.Bootstrapper.java
private static HeliosData loadHelios() throws IOException { System.out.println("Finding Helios implementation"); HeliosData data = new HeliosData(); boolean needsToDownload = !IMPL_FILE.exists(); if (!needsToDownload) { try (JarFile jarFile = new JarFile(IMPL_FILE)) { ZipEntry entry = jarFile.getEntry("META-INF/MANIFEST.MF"); if (entry == null) { needsToDownload = true;//from ww w . j a v a 2 s . com } else { Manifest manifest = new Manifest(jarFile.getInputStream(entry)); String ver = manifest.getMainAttributes().getValue("Implementation-Version"); try { data.buildNumber = Integer.parseInt(ver); data.version = manifest.getMainAttributes().getValue("Version"); data.mainClass = manifest.getMainAttributes().getValue("Main-Class"); } catch (NumberFormatException e) { needsToDownload = true; } } } catch (IOException e) { needsToDownload = true; } } if (needsToDownload) { URL latestJar = new URL(LATEST_JAR); System.out.println("Downloading latest Helios implementation"); FileOutputStream out = new FileOutputStream(IMPL_FILE); HttpURLConnection connection = (HttpURLConnection) latestJar.openConnection(); if (connection.getResponseCode() == 200) { int contentLength = connection.getContentLength(); if (contentLength > 0) { InputStream stream = connection.getInputStream(); byte[] buffer = new byte[1024]; int amnt; AtomicInteger total = new AtomicInteger(); AtomicBoolean stop = new AtomicBoolean(false); Thread progressBar = new Thread() { public void run() { JPanel panel = new JPanel(); panel.setBorder(BorderFactory.createEmptyBorder(20, 20, 20, 20)); JLabel label = new JLabel(); label.setText("Downloading latest Helios build"); panel.add(label); GridLayout layout = new GridLayout(); layout.setColumns(1); layout.setRows(3); panel.setLayout(layout); JProgressBar pbar = new JProgressBar(); pbar.setMinimum(0); pbar.setMaximum(100); panel.add(pbar); JTextArea textArea = new JTextArea(1, 3); textArea.setOpaque(false); textArea.setEditable(false); textArea.setText("Downloaded 00.00MB/00.00MB"); panel.add(textArea); JFrame frame = new JFrame(); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.setContentPane(panel); frame.pack(); frame.setLocationRelativeTo(null); frame.setVisible(true); while (!stop.get()) { SwingUtilities.invokeLater( () -> pbar.setValue((int) (100.0 * total.get() / contentLength))); textArea.setText("Downloaded " + bytesToMeg(total.get()) + "MB/" + bytesToMeg(contentLength) + "MB"); try { Thread.sleep(100); } catch (InterruptedException ignored) { } } frame.dispose(); } }; progressBar.start(); while ((amnt = stream.read(buffer)) != -1) { out.write(buffer, 0, amnt); total.addAndGet(amnt); } stop.set(true); return loadHelios(); } else { throw new IOException("Content-Length set to " + connection.getContentLength()); } } else if (connection.getResponseCode() == 404) { // Most likely bootstrapper is out of date throw new RuntimeException("Bootstrapper out of date!"); } else { throw new IOException(connection.getResponseCode() + ": " + connection.getResponseMessage()); } } return data; }
From source file:org.apache.blur.shell.QueryCommand.java
private void renderRowMultiFamily(TableDisplay tableDisplay, BlurResults blurResults) { AtomicInteger line = new AtomicInteger(); tableDisplay.setHeader(0, highlight(getTruncatedVersion("result#"))); tableDisplay.setHeader(1, highlight(getTruncatedVersion("rowid"))); tableDisplay.setHeader(2, highlight(getTruncatedVersion("recordid"))); Map<String, List<String>> columnOrder = new HashMap<String, List<String>>(); int result = 0; for (BlurResult blurResult : blurResults.getResults()) { FetchResult fetchResult = blurResult.getFetchResult(); FetchRowResult rowResult = fetchResult.getRowResult(); if (rowResult != null) { Row row = rowResult.getRow(); String id = row.getId(); tableDisplay.set(1, line.get(), white(getTruncatedVersion(toStringBinary(id)))); List<Record> records = order(row.getRecords()); String currentFamily = "#"; for (Record record : records) { currentFamily = displayRecordInRowMultiFamilyView(result, tableDisplay, line, columnOrder, currentFamily, record); }/*from w w w . j a va2 s .c om*/ } else { String currentFamily = "#"; FetchRecordResult recordResult = fetchResult.getRecordResult(); Record record = recordResult.getRecord(); currentFamily = displayRecordInRowMultiFamilyView(result, tableDisplay, line, columnOrder, currentFamily, record); } result++; } }
From source file:dk.statsbiblioteket.util.JobControllerTest.java
public void testPopFinished() throws Exception { final int JOBS = 10; final AtomicInteger counter = new AtomicInteger(0); JobController<Long> controller = new JobController<Long>(10) { @Override/*from ww w. j a v a 2 s . co m*/ protected void afterExecute(Future<Long> finished) { counter.incrementAndGet(); } }; for (int i = 0; i < JOBS; i++) { synchronized (Thread.currentThread()) { Thread.currentThread().wait(10); } controller.submit(new Shout(50)); } int first = controller.popFinished().size(); synchronized (Thread.currentThread()) { Thread.currentThread().wait(60); } int second = controller.popFinished().size(); assertTrue("The first pop should yield some results. Got " + first, first > 0); assertTrue("The second pop should yield some results. Got " + second, second > 0); log.info("first popFinished: " + first + ", second popFinished: " + second); assertEquals("The total pops should be correct", 10, first + second); assertEquals("The callback count should be correct", 10, counter.get()); }
From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java
@Test public void testAllowLateDirectory() throws Exception { File f = new File("target", UUID.randomUUID().toString()); SpoolDirConfigBean conf = new SpoolDirConfigBean(); conf.dataFormat = DataFormat.TEXT;/*from w ww .j av a 2s.c o m*/ conf.spoolDir = f.getAbsolutePath(); conf.batchSize = 10; conf.overrunLimit = 100; conf.poolingTimeoutSecs = 1; conf.filePattern = "file-[0-9].log"; conf.pathMatcherMode = PathMatcherMode.GLOB; conf.maxSpoolFiles = 10; conf.initialFileToProcess = null; conf.dataFormatConfig.compression = Compression.NONE; conf.dataFormatConfig.filePatternInArchive = "*"; conf.errorArchiveDir = null; conf.postProcessing = PostProcessingOptions.ARCHIVE; conf.archiveDir = createTestDir(); conf.retentionTimeMins = 10; conf.dataFormatConfig.textMaxLineLen = 10; conf.dataFormatConfig.onParseError = OnParseError.ERROR; conf.dataFormatConfig.maxStackTraceLines = 0; TSpoolDirSource source = new TSpoolDirSource(conf); PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane") .build(); //Late Directories not allowed, init should fail. conf.allowLateDirectory = false; try { runner.runInit(); Assert.fail("Should throw an exception if the directory does not exist"); } catch (StageException e) { //Expected } //Late Directories allowed, wait and should be able to detect the file and read. conf.allowLateDirectory = true; TSpoolDirSource sourceWithLateDirectory = new TSpoolDirSource(conf); PushSourceRunner runner2 = new PushSourceRunner.Builder(TSpoolDirSource.class, sourceWithLateDirectory) .addOutputLane("lane").build(); AtomicInteger batchCount = new AtomicInteger(0); runner2.runInit(); try { runner2.runProduce(new HashMap<>(), 10, output -> { batchCount.incrementAndGet(); if (batchCount.get() == 1) { runner2.setStop(); } }); runner2.waitOnProduce(); TestOffsetUtil.compare(NULL_FILE_OFFSET, runner2.getOffsets()); Assert.assertEquals(1, runner2.getEventRecords().size()); Assert.assertEquals("no-more-data", runner2.getEventRecords().get(0).getEventType()); Assert.assertTrue(f.mkdirs()); File file = new File(source.spoolDir, "file-0.log").getAbsoluteFile(); Files.createFile(file.toPath()); source.file = file; source.offset = 1; source.maxBatchSize = 10; Thread.sleep(5000L); PushSourceRunner runner3 = new PushSourceRunner.Builder(TSpoolDirSource.class, source) .addOutputLane("lane").build(); runner3.runInit(); runner3.runProduce(ImmutableMap.of(Source.POLL_SOURCE_OFFSET_KEY, "file-0.log::1"), 10, output -> { batchCount.incrementAndGet(); if (batchCount.get() > 1) { runner3.setStop(); } }); runner3.waitOnProduce(); TestOffsetUtil.compare("file-0.log::1", runner3.getOffsets()); Assert.assertEquals(1, runner3.getEventRecords().size()); Assert.assertEquals("new-file", runner3.getEventRecords().get(0).getEventType()); runner3.runDestroy(); } finally { runner2.runDestroy(); } }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the * queue and send them.//from www . j a va2 s. c o m * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); // Keep 20 queries inside the query queue. while (queryQueue.size() == 20) { Thread.sleep(1); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java
@Test //(timeout=10000) public void validateWithReplyListener() throws Exception { final AtomicInteger repliesCounter = new AtomicInteger(); YarnApplication<DataProcessor> yarnApplication = YarnAssembly .forApplicationContainer(SimpleEchoContainer.class).containerCount(2).memory(512) .withApplicationMaster().maxAttempts(2).priority(2).build("sample-yarn-application"); yarnApplication.registerReplyListener(new ContainerReplyListener() { @Override//from w w w. j a v a 2s.c om public void onReply(ByteBuffer replyData) { repliesCounter.incrementAndGet(); } }); DataProcessor dataProcessor = yarnApplication.launch(); assertEquals(2, dataProcessor.containers()); for (int i = 0; i < 2; i++) { for (int j = 0; j < dataProcessor.containers(); j++) { dataProcessor.process(ByteBuffer.wrap(("Hello Yarn!-" + i).getBytes())); } } assertTrue(yarnApplication.isRunning()); yarnApplication.shutDown(); assertEquals(repliesCounter.get(), dataProcessor.completedSinceStart()); assertFalse(yarnApplication.isRunning()); }
From source file:de.blizzy.rust.lootconfig.LootConfigDump.java
private void run() throws IOException { LootConfig config = loadConfig(configFile); Table<LootContainer, Category, Multiset<Float>> dropChances = HashBasedTable.create(); Collection<LootContainer> lootContainers = config.LootContainers.values(); config.Categories.values().stream().filter(Category::hasItemsOrBlueprints).forEach(category -> { lootContainers.forEach(lootContainer -> { Multiset<Float> categoryInContainerDropChances = getItemCategoryDropChances(category, lootContainer);//from w ww .ja va 2s . c o m if (!categoryInContainerDropChances.isEmpty()) { dropChances.put(lootContainer, category, categoryInContainerDropChances); } }); }); dropChances.rowKeySet().stream() .filter(lootContainer -> SHOW_DMLOOT || !lootContainer.name.contains("dmloot")) .sorted((lootContainer1, lootContainer2) -> Collator.getInstance().compare(lootContainer1.name, lootContainer2.name)) .forEach(lootContainer -> { System.out.printf("%s (blueprint fragments: %s)", lootContainer, lootContainer.DistributeFragments ? "yes" : "no").println(); Map<Category, Multiset<Float>> lootContainerDropChances = dropChances.row(lootContainer); AtomicDouble lootContainerDropChancesSum = new AtomicDouble(); AtomicInteger categoriesCount = new AtomicInteger(); lootContainerDropChances.entrySet().stream().sorted(this::compareByChances).limit(7) .forEach(categoryDropChancesEntry -> { Category category = categoryDropChancesEntry.getKey(); Multiset<Float> categoryDropChances = categoryDropChancesEntry.getValue(); float categoryDropChancesSum = sum(categoryDropChances); lootContainerDropChancesSum.addAndGet(categoryDropChancesSum); System.out.printf(" %s %s%s%s", formatPercent(categoryDropChancesSum), category, (category.Items.size() > 0) ? " (" + formatItems(category) + ")" : "", (category.Blueprints.size() > 0) ? " [" + formatBlueprints(category) + "]" : "") .println(); categoriesCount.incrementAndGet(); }); if (categoriesCount.get() < lootContainerDropChances.size()) { System.out.printf(" %s other (%d)", formatPercent(1f - (float) lootContainerDropChancesSum.get()), lootContainerDropChances.size() - categoriesCount.get()).println(); } }); }
From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.java
/** * Verify that "write" operations for a single table are serialized, * but different tables can be executed in parallel. *//* ww w .ja va 2 s . co m*/ @Test(timeout = 90000) public void testConcurrentWriteOps() throws Exception { final TestTableProcSet procSet = new TestTableProcSet(queue); final int NUM_ITEMS = 10; final int NUM_TABLES = 4; final AtomicInteger opsCount = new AtomicInteger(0); for (int i = 0; i < NUM_TABLES; ++i) { TableName tableName = TableName.valueOf(String.format("testtb-%04d", i)); for (int j = 1; j < NUM_ITEMS; ++j) { procSet.addBack(new TestTableProcedure(i * 100 + j, tableName, TableProcedureInterface.TableOperationType.EDIT)); opsCount.incrementAndGet(); } } assertEquals(opsCount.get(), queue.size()); final Thread[] threads = new Thread[NUM_TABLES * 2]; final HashSet<TableName> concurrentTables = new HashSet<TableName>(); final ArrayList<String> failures = new ArrayList<String>(); final AtomicInteger concurrentCount = new AtomicInteger(0); for (int i = 0; i < threads.length; ++i) { threads[i] = new Thread() { @Override public void run() { while (opsCount.get() > 0) { try { Procedure proc = procSet.acquire(); if (proc == null) { queue.signalAll(); if (opsCount.get() > 0) { continue; } break; } TableName tableId = procSet.getTableName(proc); synchronized (concurrentTables) { assertTrue("unexpected concurrency on " + tableId, concurrentTables.add(tableId)); } assertTrue(opsCount.decrementAndGet() >= 0); try { long procId = proc.getProcId(); int concurrent = concurrentCount.incrementAndGet(); assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES, concurrent >= 1 && concurrent <= NUM_TABLES); LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); Thread.sleep(2000); concurrent = concurrentCount.decrementAndGet(); LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES); } finally { synchronized (concurrentTables) { assertTrue(concurrentTables.remove(tableId)); } procSet.release(proc); } } catch (Throwable e) { LOG.error("Failed " + e.getMessage(), e); synchronized (failures) { failures.add(e.getMessage()); } } finally { queue.signalAll(); } } } }; threads[i].start(); } for (int i = 0; i < threads.length; ++i) { threads[i].join(); } assertTrue(failures.toString(), failures.isEmpty()); assertEquals(0, opsCount.get()); assertEquals(0, queue.size()); for (int i = 1; i <= NUM_TABLES; ++i) { TableName table = TableName.valueOf(String.format("testtb-%04d", i)); assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table)); } }