List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:org.dasein.cloud.azure.tests.network.AzureLoadBalancerSupportWithMockHttpClientTest.java
@Test public void removeServersShouldPostCorrectRequest() throws CloudException, InternalException { final String ROLE_NAME_2 = "TESTROLENAME2"; final String VM_ID_2 = String.format("%s:%s:%s", SERVICE_NAME, DEPLOYMENT_NAME, ROLE_NAME_2); final AtomicInteger postCount = new AtomicInteger(0); new MockUp<CloseableHttpClient>() { @Mock//w w w .j av a 2 s.c o m public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException { if ("GET".equals(request.getMethod()) && DEFINITION_URL.equals(request.getURI().toString())) { assertGet(request, DEFINITION_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") }); DefinitionModel definitionModel = createDefinitionModelWithAnotherServer("Failover", "Enabled", ROLE_NAME_2); DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>( createDefinitionModel("Failover", "Enabled", HC_PORT)); return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity, new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) }); } else if ("POST".equals(request.getMethod()) && DEFINITIONS_URL.equals(request.getURI().toString())) { postCount.incrementAndGet(); assertPost(request, DEFINITIONS_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") }, createDefinitionModel("Failover", "Enabled", HC_PORT)); DefinitionModel definitionModel = new DefinitionModel(); definitionModel.setVersion("2"); DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>( definitionModel); return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity, new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) }); } else { throw new IOException("Request is not mocked"); } } }; loadBalancerSupport.removeServers(LB_NAME, ROLE_NAME_2); assertEquals("LoadBalancerSupport.addServers() post count doesn't match", 1, postCount.get()); }
From source file:com.ibm.jaggr.core.impl.deps.DepTree.java
/** * Object constructor. Attempts to de-serialize the cached dependency lists * from disk and then validates the dependency lists based on last-modified * dates, looking for any new or removed files. If the cached dependency * list data cannot be de-serialized, new lists are constructed. Once the * dependency lists have been validated, the list data is serialized back * out to disk./*from w w w.ja va 2 s . c o m*/ * * @param paths * Collection of URIs which specify the target resources * to be scanned for javascript files. * @param aggregator * The servlet instance for this object * @param stamp * timestamp associated with external override/customization * resources that are check on every server restart * @param clean * If true, then the dependency lists are generated from scratch * rather than by de-serializing and then validating the cached * dependency lists. * @param validateDeps * If true, then validate existing cached dependencies using * file last-modified times. * @throws IOException */ public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps) throws IOException { final String sourceMethod = "<ctor>"; //$NON-NLS-1$ boolean isTraceLogging = log.isLoggable(Level.FINER); if (isTraceLogging) { log.entering(DepTree.class.getName(), sourceMethod, new Object[] { paths, aggregator, stamp, clean, validateDeps }); } this.stamp = stamp; IConfig config = aggregator.getConfig(); rawConfig = config.toString(); cacheBust = AggregatorUtil.getCacheBust(aggregator); File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME); File cacheFile = new File(cacheDir, CACHE_FILE); /* * The de-serialized dependency map. If we have a cached dependency map, * then it will be validated against the last-modified dates of the * current files and only the files that have changed will need to be * re-parsed to update the dependency lists. */ DepTree cached = null; if (!clean) { // If we're not starting clean, try to de-serialize the map from // cache try { ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile)); try { if (isTraceLogging) { log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$ } cached = (DepTree) is.readObject(); } finally { try { is.close(); } catch (Exception ignore) { } } } catch (FileNotFoundException e) { /* * Not an error. Just means that the cache file hasn't been * written yet or else it's been deleted. */ if (log.isLoggable(Level.INFO)) log.log(Level.INFO, Messages.DepTree_1); } catch (Exception e) { if (log.isLoggable(Level.SEVERE)) log.log(Level.SEVERE, e.getMessage(), e); } } // If the cacheBust config param has changed, then do a clean build // of the dependencies. if (cached != null) { if (stamp == 0) { // no init stamp provided. Preserve the cached one. stamp = cached.stamp; } if (stamp > cached.stamp) { // init stamp has been updated. Validate dependencies. validateDeps = true; } if (!StringUtils.equals(cacheBust, cached.cacheBust)) { if (isTraceLogging) { log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$ } if (log.isLoggable(Level.INFO)) { log.info(Messages.DepTree_2); } cached = null; } if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) { if (isTraceLogging) { log.finer("Current config = " + rawConfig); //$NON-NLS-1$ log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$ } validateDeps = true; } } /* * If we de-serialized a previously saved dependency map, then go with * that. */ if (cached != null && !validateDeps && !clean) { depMap = cached.depMap; fromCache = true; return; } else if (isTraceLogging) { log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$ + ", clean = " + clean); //$NON-NLS-1$ } // Initialize the dependency map depMap = new ConcurrentHashMap<URI, DepTreeNode>(); // This can take a while, so print something to the console String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() }); ConsoleService cs = new ConsoleService(); cs.println(msg); if (log.isLoggable(Level.INFO)) { log.info(msg); } // Make sure that all the paths are unique and orthogonal paths = DepUtils.removeRedundantPaths(paths); /* * Create the thread pools, one for the tree builders and one for the * parsers. Since a tree builder thread will wait for all the outstanding * parser threads started by that builder to complete, we need to use two * independent thread pools to guard against the possibility of deadlock * caused by all the threads in the pool being consumed by tree builders * and leaving none available to service the parsers. */ final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME), parserTG = new ThreadGroup(JSPARSER_TGNAME); ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME, new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() })); } }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(parserTG, r, MessageFormat.format(THREADNAME, new Object[] { parserTG.getName(), parserTG.activeCount() })); } }); // Counter to keep track of number of tree builder threads started AtomicInteger treeBuilderCount = new AtomicInteger(0); // The completion services for the thread pools final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc); CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>( treeBuilderExc); Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator)); // Start the tree builder threads to process the paths for (final URI path : paths) { /* * Create or get from cache the root node for this path and * add it to the new map. */ DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$ DepTreeNode cachedNode = null; if (cached != null) { cachedNode = cached.depMap.get(path); if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path })); } } else { if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path })); } } depMap.put(path, root); treeBuilderCount.incrementAndGet(); treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions)); } // List of parser exceptions LinkedList<Exception> parserExceptions = new LinkedList<Exception>(); /* * Pull the completed tree builder tasks from the completion queue until * all the paths have been processed */ while (treeBuilderCount.decrementAndGet() >= 0) { try { DepTreeBuilder.Result result = treeBuilderCs.take().get(); if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_6, new Object[] { result.parseCount, result.dirName })); } } catch (Exception e) { if (log.isLoggable(Level.SEVERE)) log.log(Level.SEVERE, e.getMessage(), e); parserExceptions.add(e); } } // shutdown the thread pools now that we're done with them parserExc.shutdown(); treeBuilderExc.shutdown(); // If parser exceptions occurred, then rethrow the first one if (parserExceptions.size() > 0) { throw new RuntimeException(parserExceptions.get(0)); } // Prune dead nodes (folder nodes with no children) for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) { entry.getValue().prune(); } /* * Make sure the cache directory exists before we try to serialize the * dependency map. */ if (!cacheDir.exists()) if (!cacheDir.mkdirs()) { throw new IOException( MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() })); } // Serialize the map to the cache directory ObjectOutputStream os; os = new ObjectOutputStream(new FileOutputStream(cacheFile)); try { if (isTraceLogging) { log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$ } os.writeObject(this); } finally { try { os.close(); } catch (Exception ignore) { } } msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() }); // Output that we're done. cs.println(msg); if (log.isLoggable(Level.INFO)) { log.info(msg); } if (isTraceLogging) { log.exiting(DepTree.class.getName(), sourceMethod); } }
From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java
/** * Create an HRegion with the result of a WAL split and test we only see the * good edits/* w w w .j a v a 2s .com*/ * @throws Exception */ @Test public void testReplayEditsWrittenIntoWAL() throws Exception { final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL"); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); HBaseTestingUtility.closeRegionAndWAL(region2); final WAL wal = createWAL(this.conf, hbaseRootDir, logName); final byte[] rowName = tableName.getName(); final byte[] regionName = hri.getEncodedNameAsBytes(); // Add 1k to each family. final int countPerFamily = 1000; Set<byte[]> familyNames = new HashSet<byte[]>(); NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } for (HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, mvcc, scopes); familyNames.add(hcd.getName()); } // Add a cache flush, shouldn't have any effect wal.startCacheFlush(regionName, familyNames); wal.completeCacheFlush(regionName); // Add an edit to another family, should be skipped. WALEdit edit = new WALEdit(); long now = ee.currentTime(); edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true); // Delete the c family to verify deletes make it over. edit = new WALEdit(); now = ee.currentTime(); edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true); // Sync. wal.sync(); // Make a new conf and a new fs for the splitter to run on so we can take // over old wal. final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); // 100k seems to make for about 4 flushes during HRegion#initialize. newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100); // Make a new wal for new region. WAL newWal = createWAL(newConf, hbaseRootDir, logName); final AtomicInteger flushcount = new AtomicInteger(0); try { final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { @Override protected FlushResult internalFlushcache(final WAL wal, final long myseqid, final Collection<Store> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException { LOG.info("InternalFlushCache Invoked"); FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker); flushcount.incrementAndGet(); return fs; } }; // The seq id this region has opened up with long seqid = region.initialize(); // The mvcc readpoint of from inserting data. long writePoint = mvcc.getWritePoint(); // We flushed during init. assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0); assertTrue((seqid - 1) == writePoint); Get get = new Get(rowName); Result result = region.get(get); // Make sure we only see the good edits assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size()); region.close(); } finally { newWal.close(); } return null; } }); }
From source file:net.myrrix.online.eval.AUCEvaluator.java
public EvaluationResult evaluate(final MyrrixRecommender recommender, final FastByIDMap<FastIDSet> testData) throws TasteException { final AtomicInteger underCurve = new AtomicInteger(0); final AtomicInteger total = new AtomicInteger(0); final long[] allItemIDs = recommender.getAllItemIDs().toArray(); Processor<Long> processor = new Processor<Long>() { private final RandomGenerator random = RandomManager.getRandom(); @Override//from www . j a v a2 s.c o m public void process(Long userID, long count) throws ExecutionException { FastIDSet testItemIDs = testData.get(userID); int numTest = testItemIDs.size(); for (int i = 0; i < numTest; i++) { long randomTestItemID; long randomTrainingItemID; synchronized (random) { randomTestItemID = RandomUtils.randomFrom(testItemIDs, random); do { randomTrainingItemID = allItemIDs[random.nextInt(allItemIDs.length)]; } while (testItemIDs.contains(randomTrainingItemID)); } float relevantEstimate; float nonRelevantEstimate; try { relevantEstimate = recommender.estimatePreference(userID, randomTestItemID); nonRelevantEstimate = recommender.estimatePreference(userID, randomTrainingItemID); } catch (NoSuchItemException nsie) { // OK; it's possible item only showed up in test split continue; } catch (NoSuchUserException nsie) { // OK; it's possible user only showed up in test split continue; } catch (TasteException te) { throw new ExecutionException(te); } if (relevantEstimate > nonRelevantEstimate) { underCurve.incrementAndGet(); } total.incrementAndGet(); if (count % 100000 == 0) { log.info("AUC: {}", (double) underCurve.get() / total.get()); } } } }; try { new Paralleler<Long>(testData.keySetIterator(), processor, "AUCEval").runInParallel(); } catch (InterruptedException ie) { throw new TasteException(ie); } catch (ExecutionException e) { throw new TasteException(e.getCause()); } double score = (double) underCurve.get() / total.get(); log.info("AUC: {}", score); return new EvaluationResultImpl(score); }
From source file:com.heliosapm.tsdblite.metric.MetricCache.java
private MetricCache() { final String jmxDomain = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_METRICS_MSERVER, Constants.DEFAULT_METRICS_MSERVER); if (!JMXHelper.getHeliosMBeanServer().getDefaultDomain().equals(jmxDomain)) { metricMBeanServer = JMXHelper.createMBeanServer(jmxDomain, true); final int port = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_METRICS_JMXMP_PORT, Constants.DEFAULT_METRICS_JMXMP_PORT); if (port > -1) { final String iface = ConfigurationHelper.getSystemThenEnvProperty( Constants.CONF_METRICS_JMXMP_IFACE, Constants.DEFAULT_METRICS_JMXMP_IFACE); final JMXServiceURL surl = JMXHelper.fireUpJMXMPServer(iface, port, metricMBeanServer); log.info("Metrics MBeanServer [{}] available at [{}]", jmxDomain, surl); }/*from ww w . j av a 2s . co m*/ } else { metricMBeanServer = JMXHelper.getHeliosMBeanServer(); } expiry = ConfigurationHelper.getLongSystemThenEnvProperty(Constants.CONF_METRIC_EXPIRY, Constants.DEFAULT_METRIC_EXPIRY); expiryPeriod = ConfigurationHelper.getLongSystemThenEnvProperty(Constants.CONF_METRIC_EXPIRY_PERIOD, Constants.DEFAULT_METRIC_EXPIRY_PERIOD); expiryThread = new Thread(new Runnable() { @Override public void run() { while (true) { final AtomicInteger expiredMetricCount = new AtomicInteger(0); SystemClock.sleep(expiryPeriod); final long startTime = System.currentTimeMillis(); final ObjectName[] metricObjectNames = JMXHelper.query(JMXHelper.ALL_MBEANS_FILTER, metricBeanQuery); final Collection<Future<?>> taskFutures = new ArrayList<Future<?>>(metricObjectNames.length); for (final ObjectName on : metricObjectNames) { taskFutures.add(expiryService.submit(new Runnable() { @Override public void run() { try { final long now = System.currentTimeMillis(); final Map<String, Object> attrMap = JMXHelper.getAttributes(on, EXPIRY_ATTRIBUTES); if (!attrMap.containsKey("LastActivity")) { log.warn("No LA for [{}], AttrMap: {}", on, attrMap); return; } final long lastActivity = (Long) attrMap.get("LastActivity"); final long age = now - lastActivity; if (age > expiry) { expiredMetricCount.incrementAndGet(); metricMBeanServer.unregisterMBean(on); expiredMetrics.increment(); final long hc = (Long) attrMap.get("MetricHashCode"); metricCache.remove(hc); } } catch (Exception x) { log.error("Expiry Task Failure", x); } } })); } final long dispatchElapsed = System.currentTimeMillis() - startTime; lastExpiryDispatchTime.set(dispatchElapsed); int fails = 0; for (Future<?> f : taskFutures) { try { f.get(); } catch (Exception x) { fails++; } } final long expiryElapsed = System.currentTimeMillis() - startTime; final int exp = expiredMetricCount.get(); if (exp != 0) { log.info("Expiry Dispatch for [{}] Metrics Completed in [{}] ms. Expired [{}] metrics.", metricObjectNames.length, dispatchElapsed, exp); } lastExpiryTime.set(expiryElapsed); if (log.isDebugEnabled()) log.debug("Expiry Completed in [{}] ms. Tasks: {}, Fails: {}", expiryElapsed, taskFutures.size(), fails); } } }, "MetricExpiryThread"); expiryThread.setDaemon(true); expiryThread.setPriority(Thread.MAX_PRIORITY); expiryThread.start(); JMXHelper.registerMBean(this, OBJECT_NAME); }
From source file:com.revetkn.ios.analyzer.ArtworkAnalyzer.java
public void generateRetinaImages(final File projectRootDirectory, final File outputDirectory, final Set<File> nonretinaImageFiles, final RetinaImageGenerationProgressCallback progressCallback) { if (projectRootDirectory == null) throw new NullPointerException("The 'projectRootDirectory' parameter cannot be null."); if (!projectRootDirectory.exists()) throw new IllegalArgumentException(format("Directory '%s' does not exist.", projectRootDirectory)); if (!projectRootDirectory.isDirectory()) throw new IllegalArgumentException( format("'%s' is a regular file - it must be a directory.", projectRootDirectory)); if (outputDirectory == null) throw new NullPointerException("The 'outputDirectory' parameter cannot be null."); if (outputDirectory.exists() && !outputDirectory.isDirectory()) throw new IllegalArgumentException( format("'%s' is a regular file - it must be a directory.", outputDirectory)); if (nonretinaImageFiles == null) throw new NullPointerException("The 'nonretinaImageFiles' parameter cannot be null."); if (progressCallback == null) throw new NullPointerException("The 'progressCallback' parameter cannot be null."); Set<Callable<Object>> retinaScalingTasks = new HashSet<Callable<Object>>(); final AtomicInteger imageFilesProcessed = new AtomicInteger(0); for (final File nonretinaImageFile : nonretinaImageFiles) { retinaScalingTasks.add(new Callable<Object>() { @Override//from www . j a v a2 s .c o m public Object call() throws Exception { byte[] imageFileData = readFileToByteArray(nonretinaImageFile); ImageMetrics imageMetrics = ImageUtilities.extractImageMetrics(imageFileData); int retinaWidth = imageMetrics.getWidth() * 2; int retinaHeight = imageMetrics.getHeight() * 2; byte[] retinaImageData = scaleImageUpToFit(imageFileData, retinaWidth, retinaHeight, IMAGE_TYPE_PNG); String absoluteRetinaImageFilename = retinaImageFilename(nonretinaImageFile.getAbsolutePath()); int rootDirectoryPathLength = projectRootDirectory.getAbsolutePath().length(); String relativeRetinaImageFilename = absoluteRetinaImageFilename .substring(rootDirectoryPathLength + 1); String retinaImageFilename = outputDirectory.getAbsolutePath() + separator + relativeRetinaImageFilename; File retinaImageFile = new File(retinaImageFilename); writeByteArrayToFile(retinaImageFile, retinaImageData); progressCallback.generatedRetinaImage(nonretinaImageFile, retinaImageFile, imageFilesProcessed.incrementAndGet(), nonretinaImageFiles.size()); return null; } }); } try { for (Future<Object> future : getExecutorService().invokeAll(retinaScalingTasks)) future.get(); } catch (Throwable throwable) { throw new ArtworkProcessingException(throwable); } }
From source file:com.btoddb.fastpersitentqueue.FpqIT.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; fpq1.setMaxTransactionSize(2000);/*from w w w . ja v a 2 s. c o m*/ final int popBatchSize = 100; fpq1.setMaxMemorySegmentSizeInBytes(10000000); fpq1.setMaxJournalFileSize(10000000); fpq1.setMaxJournalDurationInMs(30000); fpq1.setFlushPeriodInMs(1000); fpq1.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); fpq1.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); fpq1.beginTransaction(); fpq1.push(bb.array()); fpq1.commit(); if ((x + 1) % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) { try { fpq1.beginTransaction(); try { Collection<FpqEntry> entries = fpq1.pop(popBatchSize); if (null == entries) { Thread.sleep(100); continue; } for (FpqEntry entry : entries) { ByteBuffer bb = ByteBuffer.wrap(entry.getData()); popSum.addAndGet(bb.getLong()); if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } } numPops.addAndGet(entries.size()); fpq1.commit(); entries.clear(); } finally { if (fpq1.isTransactionActive()) { fpq1.rollback(); } } Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(fpq1.getNumberOfEntries(), is(0L)); assertThat(pushSum.get(), is(popSum.get())); assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1)); assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1)); assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty())); assertThat( FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:org.apache.hadoop.hbase.client.TestAdmin.java
@Test(timeout = 300000) public void testCreateBadTables() throws IOException { String msg = null;/*w ww . j a v a 2 s. c o m*/ try { this.admin.createTable(HTableDescriptor.META_TABLEDESC); } catch (TableExistsException e) { msg = e.toString(); } assertTrue("Unexcepted exception message " + msg, msg != null && msg.startsWith(TableExistsException.class.getName()) && msg.contains(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString())); // Now try and do concurrent creation with a bunch of threads. final HTableDescriptor threadDesc = new HTableDescriptor(TableName.valueOf("threaded_testCreateBadTables")); threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); int count = 10; Thread[] threads = new Thread[count]; final AtomicInteger successes = new AtomicInteger(0); final AtomicInteger failures = new AtomicInteger(0); final HBaseAdmin localAdmin = this.admin; for (int i = 0; i < count; i++) { threads[i] = new Thread(Integer.toString(i)) { @Override public void run() { try { localAdmin.createTable(threadDesc); successes.incrementAndGet(); } catch (TableExistsException e) { failures.incrementAndGet(); } catch (IOException e) { throw new RuntimeException("Failed threaded create" + getName(), e); } } }; } for (int i = 0; i < count; i++) { threads[i].start(); } for (int i = 0; i < count; i++) { while (threads[i].isAlive()) { try { Thread.sleep(100); } catch (InterruptedException e) { // continue } } } // All threads are now dead. Count up how many tables were created and // how many failed w/ appropriate exception. assertEquals(1, successes.get()); assertEquals(count - 1, failures.get()); }