List of usage examples for java.util.concurrent.atomic AtomicLong get
public final long get()
From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java
@Test public void testThreading() throws IOException, ExecutionException { final int numEntries = 10000; final int numPushers = 3; int numPoppers = 3; final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>(); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); mgr.setMaxJournalFileSize(1000);//from ww w . ja va 2 s . c om mgr.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = idGen.incrementAndGet(); FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100])); events.offer(entry); pushSum.addAndGet(x); if (x % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !events.isEmpty()) { try { FpqEntry entry; while (null != (entry = events.poll())) { if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } popSum.addAndGet(entry.getId()); numPops.incrementAndGet(); mgr.reportTake(entry); Thread.sleep(popRand.nextInt(5)); } } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(popSum.get(), is(pushSum.get())); assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:de.hybris.platform.jdbcwrapper.ConnectionPoolTest.java
private void doTestMultithreadedAccess(final int RUN_SECONDS, final int THREADS, final int PERCENT_NO_TX, final int PERCENT_TX_ROLLBACK, final boolean useInterrupt, final boolean sendDummyStatement) { HybrisDataSource dataSource = null;//from www . j a va 2s.c o m LOG.info("--- test multithreaded access to connection pool duration:" + RUN_SECONDS + "s threads:" + THREADS + " nonTx:" + PERCENT_NO_TX + "% rollback:" + PERCENT_TX_ROLLBACK + "% interrupt:" + useInterrupt + "-----------------------------------"); try { final Collection<TestConnectionImpl> allConnections = new ConcurrentLinkedQueue<TestConnectionImpl>(); final AtomicLong rollbackCounter = new AtomicLong(0); final AtomicLong connectionCounter = new AtomicLong(0); final AtomicBoolean finished = new AtomicBoolean(false); dataSource = createDataSource(Registry.getCurrentTenantNoFallback(), allConnections, connectionCounter, false, false); assertEquals(0, dataSource.getNumInUse()); assertEquals(1, dataSource.getNumPhysicalOpen()); assertEquals(1, dataSource.getMaxInUse()); assertEquals(1, dataSource.getMaxPhysicalOpen()); final int maxConnections = dataSource.getMaxAllowedPhysicalOpen(); final String runId = "[" + RUN_SECONDS + "|" + THREADS + "|" + PERCENT_NO_TX + "|" + PERCENT_TX_ROLLBACK + "|" + useInterrupt + "]"; final Runnable runnable = new ContinuousAccessRunnable(dataSource, PERCENT_NO_TX, PERCENT_TX_ROLLBACK, rollbackCounter, finished, runId, sendDummyStatement); final TestThreadsHolder threadsHolder = new TestThreadsHolder(THREADS, runnable) { @Override public void stopAll() { if (useInterrupt) { super.stopAll(); } else { finished.set(true); } } }; threadsHolder.startAll(); waitDuration(RUN_SECONDS, maxConnections, dataSource, allConnections); threadsHolder.stopAll(); final boolean allStoppedNormal = threadsHolder.waitForAll(30, TimeUnit.SECONDS); if (!allStoppedNormal) { // try fallback method finished.set(true); final boolean allStoppedFallback = threadsHolder.waitForAll(10, TimeUnit.SECONDS); if (allStoppedFallback) { LOG.error("Threads did not stop normally but only after using boolean flag!"); } else { fail("db connection test threads did not stop correctly even after fallback method"); } } // kill data source dataSource.destroy(); assertTrue(dataSource.getConnectionPool().isPoolClosed()); assertTrue(waitForAllInactive(dataSource.getConnectionPool(), 10, TimeUnit.SECONDS)); if (PERCENT_TX_ROLLBACK > 0) { assertTrue(rollbackCounter.get() > 0); } final long maxAllowedConnections = maxConnections + rollbackCounter.get(); final Stats stats = getStats(allConnections); LOG.info(// "max connections :" + maxConnections + "\n" + // "rollbacks :" + rollbackCounter.get() + "\n" + // "real connections :" + connectionCounter.get() + "\n" + // "closed:" + stats.closed + "\n" + // "open:" + stats.open + "\n" + // "borrowed :" + stats.borrowed + "\n" + // "returned :" + stats.returned + "\n" + // "invalidated :" + stats.invalidated + "\n"); // we cannot be sure since not each rollbacked connections *must* be re-created assertTrue( "handed out more than max connections (got:" + connectionCounter.get() + " > max:" + maxAllowedConnections + ")", // connectionCounter.get() <= maxAllowedConnections); assertEquals("still got " + stats.borrowed + "borrowed connections", 0, stats.borrowed); assertEquals( "connection count mismatch - total:" + connectionCounter.get() + " <> " + (stats.returned + stats.invalidated) + " (returned:" + stats.returned + " + invalidated:" + stats.invalidated + ")", // connectionCounter.get(), stats.returned + stats.invalidated); // make sure all connections have been finally closed assertEquals( "data source " + dataSource + "still got " + dataSource.getNumInUse() + " connections in use", // 0, dataSource.getNumInUse()); assertEquals( "data source " + dataSource + "still got " + dataSource.getNumPhysicalOpen() + " physical connections open (despite none are in use)", // 0, dataSource.getNumPhysicalOpen()); assertTrue( "data source " + dataSource + " had more than max allowed connections (max:" + maxConnections + ", max in use:" + dataSource.getMaxInUse() + ")", // maxConnections >= dataSource.getMaxInUse()); assertTrue( "data source " + dataSource + " had more than max allowed physical connections (max:" + maxConnections + ", max physical in use:" + dataSource.getMaxPhysicalOpen() + ")", // maxConnections >= dataSource.getMaxPhysicalOpen()); } finally { destroyDataSource(dataSource); } }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { final AtomicLong totalBytes = new AtomicLong(0); final AtomicLong totalEntries = new AtomicLong(0); final AtomicLong numLedgers = new AtomicLong(0); LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final SettableFuture<Void> doneFuture = SettableFuture.create(); final BookKeeper bk = bkc.get(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override/*from w w w . j a v a 2 s . c o m*/ public void process(final Long lid, final AsyncCallback.VoidCallback cb) { numLedgers.incrementAndGet(); executorService.submit(new Runnable() { @Override public void run() { bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8), new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { final int cbRc; if (BKException.Code.OK == rc) { totalBytes.addAndGet(lh.getLength()); totalEntries.addAndGet(lh.getLastAddConfirmed() + 1); cbRc = rc; } else { cbRc = BKException.Code.ZKException; } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(cbRc, null, null); } }); } }, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.set(null); } else { doneFuture.setException(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}", new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on calculating ledger space : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to calculate ledger space : ", e.getCause()); } } return totalBytes.get(); }
From source file:com.twitter.distributedlog.BKLogHandler.java
private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter, final Watcher watcher, final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft, final AtomicLong backoffMillis) { final Stopwatch stopwatch = Stopwatch.createStarted(); try {//from ww w.ja v a 2 s.c om if (LOG.isTraceEnabled()) { LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName()); } final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() { @Override public void operationComplete(int rc, List<LogSegmentMetadata> result) { long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS); if (KeeperException.Code.OK.intValue() != rc) { getListStat.registerFailedEvent(elapsedMicros); } else { if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) { isFullListFetched.set(true); } getListStat.registerSuccessfulEvent(elapsedMicros); } finalCallback.operationComplete(rc, result); } }; zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher, new AsyncCallback.Children2Callback() { @Override public void processResult(final int rc, final String path, final Object ctx, final List<String> children, final Stat stat) { if (KeeperException.Code.OK.intValue() != rc) { if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc || KeeperException.Code.SESSIONEXPIRED.intValue() == rc || KeeperException.Code.SESSIONMOVED.intValue() == rc) && numAttemptsLeft.decrementAndGet() > 0) { long backoffMs = backoffMillis.get(); backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs)); scheduler.schedule(new Runnable() { @Override public void run() { asyncGetLedgerListInternal(comparator, segmentFilter, watcher, finalCallback, numAttemptsLeft, backoffMillis); } }, backoffMs, TimeUnit.MILLISECONDS); return; } callback.operationComplete(rc, null); return; } if (LOG.isTraceEnabled()) { LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(), children); } ledgerListWatchSet.set(true); Set<String> segmentsReceived = new HashSet<String>(); segmentsReceived.addAll(segmentFilter.filter(children)); Set<String> segmentsAdded; final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>()); final Map<String, LogSegmentMetadata> addedSegments = Collections .synchronizedMap(new HashMap<String, LogSegmentMetadata>()); Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived); segmentsAdded = segmentChanges.getLeft(); removedSegments.addAll(segmentChanges.getRight()); if (segmentsAdded.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("No segments added for {}.", getFullyQualifiedName()); } // update the cache before fetch logSegmentCache.update(removedSegments, addedSegments); List<LogSegmentMetadata> segmentList; try { segmentList = getCachedLogSegments(comparator); } catch (UnexpectedException e) { callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(), null); return; } callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList); notifyUpdatedLogSegments(segmentList); if (!removedSegments.isEmpty()) { notifyOnOperationComplete(); } return; } final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size()); final AtomicInteger numFailures = new AtomicInteger(0); for (final String segment : segmentsAdded) { metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment)) .addEventListener(new FutureEventListener<LogSegmentMetadata>() { @Override public void onSuccess(LogSegmentMetadata result) { addedSegments.put(segment, result); complete(); } @Override public void onFailure(Throwable cause) { // NONODE exception is possible in two cases // 1. A log segment was deleted by truncation between the call to getChildren and read // attempt on the znode corresponding to the segment // 2. In progress segment has been completed => inprogress ZNode does not exist if (cause instanceof KeeperException && KeeperException.Code.NONODE == ((KeeperException) cause) .code()) { removedSegments.add(segment); complete(); } else { // fail fast if (1 == numFailures.incrementAndGet()) { int rcToReturn = KeeperException.Code.SYSTEMERROR .intValue(); if (cause instanceof KeeperException) { rcToReturn = ((KeeperException) cause).code() .intValue(); } else if (cause instanceof ZKException) { rcToReturn = ((ZKException) cause) .getKeeperExceptionCode().intValue(); } // :( properly we need dlog related response code. callback.operationComplete(rcToReturn, null); return; } } } private void complete() { if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) { // update the cache only when fetch completed logSegmentCache.update(removedSegments, addedSegments); List<LogSegmentMetadata> segmentList; try { segmentList = getCachedLogSegments(comparator); } catch (UnexpectedException e) { callback.operationComplete( KeeperException.Code.DATAINCONSISTENCY.intValue(), null); return; } callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList); notifyUpdatedLogSegments(segmentList); notifyOnOperationComplete(); } } }); } } }, null); } catch (ZooKeeperClient.ZooKeeperConnectionException e) { getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null); } catch (InterruptedException e) { getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null); } }
From source file:org.apache.nifi.controller.repository.FileSystemRepository.java
private synchronized void initializeRepository() throws IOException { final Map<String, Path> realPathMap = new HashMap<>(); final ExecutorService executor = Executors.newFixedThreadPool(containers.size()); final List<Future<Long>> futures = new ArrayList<>(); // Run through each of the containers. For each container, create the sections if necessary. // Then, we need to scan through the archived data so that we can determine what the oldest // archived data is, so that we know when we have to start aging data off. for (final Map.Entry<String, Path> container : containers.entrySet()) { final String containerName = container.getKey(); final ContainerState containerState = containerStateMap.get(containerName); final Path containerPath = container.getValue(); final boolean pathExists = Files.exists(containerPath); final Path realPath; if (pathExists) { realPath = containerPath.toRealPath(); } else {/*from ww w. ja v a 2 s . c o m*/ realPath = Files.createDirectories(containerPath).toRealPath(); } for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) { Files.createDirectories(realPath.resolve(String.valueOf(i))); } realPathMap.put(containerName, realPath); // We need to scan the archive directories to find out the oldest timestamp so that know whether or not we // will have to delete archived data based on time threshold. Scanning all of the directories can be very // expensive because of all of the disk accesses. So we do this in multiple threads. Since containers are // often unique to a disk, we just map 1 thread to each container. final Callable<Long> scanContainer = new Callable<Long>() { @Override public Long call() throws IOException { final AtomicLong oldestDateHolder = new AtomicLong(0L); // the path already exists, so scan the path to find any files and update maxIndex to the max of // all filenames seen. Files.walkFileTree(realPath, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { LOG.warn("Content repository contains un-readable file or directory '" + file.getFileName() + "'. Skipping. ", exc); return FileVisitResult.SKIP_SUBTREE; } @Override public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { if (attrs.isDirectory()) { return FileVisitResult.CONTINUE; } // Check if this is an 'archive' directory final Path relativePath = realPath.relativize(file); if (relativePath.getNameCount() > 3 && ARCHIVE_DIR_NAME.equals(relativePath.subpath(1, 2).toString())) { final long lastModifiedTime = getLastModTime(file); if (lastModifiedTime < oldestDateHolder.get()) { oldestDateHolder.set(lastModifiedTime); } containerState.incrementArchiveCount(); } return FileVisitResult.CONTINUE; } }); return oldestDateHolder.get(); } }; // If the path didn't exist to begin with, there's no archive directory, so don't bother scanning. if (pathExists) { futures.add(executor.submit(scanContainer)); } } executor.shutdown(); for (final Future<Long> future : futures) { try { final Long oldestDate = future.get(); if (oldestDate < oldestArchiveDate.get()) { oldestArchiveDate.set(oldestDate); } } catch (final ExecutionException | InterruptedException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new RuntimeException(e); } } } containers.clear(); containers.putAll(realPathMap); }
From source file:org.apache.distributedlog.auditor.DLAuditor.java
private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { final AtomicLong totalBytes = new AtomicLong(0); final AtomicLong totalEntries = new AtomicLong(0); final AtomicLong numLedgers = new AtomicLong(0); LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final CompletableFuture<Void> doneFuture = FutureUtils.createFuture(); final BookKeeper bk = bkc.get(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override//from w w w. ja v a2 s. c o m public void process(final Long lid, final AsyncCallback.VoidCallback cb) { numLedgers.incrementAndGet(); executorService.submit(new Runnable() { @Override public void run() { bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8), new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { final int cbRc; if (BKException.Code.OK == rc) { totalBytes.addAndGet(lh.getLength()); totalEntries.addAndGet(lh.getLastAddConfirmed() + 1); cbRc = rc; } else { cbRc = BKException.Code.ZKException; } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(cbRc, null, null); } }); } }, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.complete(null); } else { doneFuture.completeExceptionally(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}", new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on calculating ledger space : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to calculate ledger space : ", e.getCause()); } } return totalBytes.get(); }
From source file:org.codice.ddf.commands.catalog.DumpCommand.java
@Override protected Object executeWithSubject() throws Exception { final File dumpDir = new File(dirPath); if (!dumpDir.exists()) { printErrorMessage("Directory [" + dirPath + "] must exist."); console.println("If the directory does indeed exist, try putting the path in quotes."); return null; }// ww w . j ava 2s.c o m if (!dumpDir.isDirectory()) { printErrorMessage("Path [" + dirPath + "] must be a directory."); return null; } if (!DEFAULT_TRANSFORMER_ID.matches(transformerId)) { transformers = getTransformers(); if (transformers == null) { console.println(transformerId + " is an invalid metacard transformer."); return null; } } CatalogFacade catalog = getCatalog(); FilterBuilder builder = getFilterBuilder(); Filter createdFilter = null; if ((createdAfter != null) && (createdBefore != null)) { DateTime createStartDateTime = DateTime.parse(createdAfter); DateTime createEndDateTime = DateTime.parse(createdBefore); createdFilter = builder.attribute(Metacard.CREATED).is().during().dates(createStartDateTime.toDate(), createEndDateTime.toDate()); } else if (createdAfter != null) { DateTime createStartDateTime = DateTime.parse(createdAfter); createdFilter = builder.attribute(Metacard.CREATED).is().after().date(createStartDateTime.toDate()); } else if (createdBefore != null) { DateTime createEndDateTime = DateTime.parse(createdBefore); createdFilter = builder.attribute(Metacard.CREATED).is().before().date(createEndDateTime.toDate()); } Filter modifiedFilter = null; if ((modifiedAfter != null) && (modifiedBefore != null)) { DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter); DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore); modifiedFilter = builder.attribute(Metacard.MODIFIED).is().during() .dates(modifiedStartDateTime.toDate(), modifiedEndDateTime.toDate()); } else if (modifiedAfter != null) { DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter); modifiedFilter = builder.attribute(Metacard.MODIFIED).is().after().date(modifiedStartDateTime.toDate()); } else if (modifiedBefore != null) { DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore); modifiedFilter = builder.attribute(Metacard.MODIFIED).is().before().date(modifiedEndDateTime.toDate()); } Filter filter = null; if ((createdFilter != null) && (modifiedFilter != null)) { // Filter by both created and modified dates filter = builder.allOf(createdFilter, modifiedFilter); } else if (createdFilter != null) { // Only filter by created date filter = createdFilter; } else if (modifiedFilter != null) { // Only filter by modified date filter = modifiedFilter; } else { // Don't filter by date range filter = builder.attribute(Metacard.ID).is().like().text(WILDCARD); } if (cqlFilter != null) { filter = CQL.toFilter(cqlFilter); } QueryImpl query = new QueryImpl(filter); query.setRequestsTotalResultsCount(false); query.setPageSize(pageSize); Map<String, Serializable> props = new HashMap<String, Serializable>(); // Avoid caching all results while dumping with native query mode props.put("mode", "native"); final AtomicLong resultCount = new AtomicLong(0); long start = System.currentTimeMillis(); SourceResponse response = catalog.query(new QueryRequestImpl(query, props)); BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(multithreaded); RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy(); final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L, TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler); while (response.getResults().size() > 0) { response = catalog.query(new QueryRequestImpl(query, props)); if (multithreaded > 1) { final List<Result> results = new ArrayList<Result>(response.getResults()); executorService.submit(new Runnable() { @Override public void run() { boolean transformationFailed = false; for (final Result result : results) { Metacard metacard = result.getMetacard(); try { exportMetacard(dumpDir, metacard); } catch (IOException | CatalogTransformerException e) { transformationFailed = true; LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e); executorService.shutdownNow(); } printStatus(resultCount.incrementAndGet()); } if (transformationFailed) { LOGGER.error( "One or more metacards failed to transform. Enable debug log for more details."); } } }); } else { for (final Result result : response.getResults()) { Metacard metacard = result.getMetacard(); exportMetacard(dumpDir, metacard); printStatus(resultCount.incrementAndGet()); } } if (response.getResults().size() < pageSize || pageSize == -1) { break; } if (pageSize > 0) { query.setStartIndex(query.getStartIndex() + pageSize); } } executorService.shutdown(); while (!executorService.isTerminated()) { try { TimeUnit.MILLISECONDS.sleep(100); } catch (InterruptedException e) { // ignore } } long end = System.currentTimeMillis(); String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0)); console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime); LOGGER.info("{} file(s) dumped in {}", resultCount.get(), elapsedTime); console.println(); return null; }
From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; channel.setMaxTransactionSize(2000); final int popBatchSize = 100; channel.setMaxMemorySegmentSizeInBytes(10000000); channel.setMaxJournalFileSize(10000000); channel.setMaxJournalDurationInMs(30000); channel.setFlushPeriodInMs(1000);/* ww w .j a v a 2s .c om*/ channel.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); channel.start(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); Transaction tx = channel.getTransaction(); tx.begin(); MyEvent event1 = new MyEvent(); event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long channel.put(event1); tx.commit(); tx.close(); Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) { try { Transaction tx = channel.getTransaction(); tx.begin(); Event event; int count = popBatchSize; while (null != (event = channel.take()) && count-- > 0) { popSum.addAndGet(Long.valueOf(event.getHeaders().get("x"))); numPops.incrementAndGet(); } tx.commit(); tx.close(); Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(channel.isEmpty(), is(true)); assertThat(pushSum.get(), is(popSum.get())); }
From source file:org.apache.hadoop.hbase.regionserver.TestFailedAppendAndSync.java
/** * Reproduce locking up that happens when we get an exceptions appending and syncing. * See HBASE-14317.//from www . j a va 2s .c o m * First I need to set up some mocks for Server and RegionServerServices. I also need to * set up a dodgy WAL that will throw an exception when we go to append to it. */ @Test(timeout = 300000) public void testLockupAroundBadAssignSync() throws IOException { final AtomicLong rolls = new AtomicLong(0); // Dodgy WAL. Will throw exceptions when flags set. class DodgyFSLog extends FSHLog { volatile boolean throwSyncException = false; volatile boolean throwAppendException = false; public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException { super(fs, root, logDir, conf); } @Override public byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException { byte[][] regions = super.rollWriter(force); rolls.getAndIncrement(); return regions; } @Override protected Writer createWriterInstance(Path path) throws IOException { final Writer w = super.createWriterInstance(path); return new Writer() { @Override public void close() throws IOException { w.close(); } @Override public void sync() throws IOException { if (throwSyncException) { throw new IOException("FAKE! Failed to replace a bad datanode..."); } w.sync(); } @Override public void append(Entry entry) throws IOException { if (throwAppendException) { throw new IOException("FAKE! Failed to replace a bad datanode..."); } w.append(entry); } @Override public long getLength() throws IOException { return w.getLength(); } }; } } // Make up mocked server and services. Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(CONF); when(server.isStopped()).thenReturn(false); when(server.isAborted()).thenReturn(false); RegionServerServices services = mock(RegionServerServices.class); // OK. Now I have my mocked up Server and RegionServerServices and my dodgy WAL, go ahead with // the test. FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + getName()); DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF); LogRoller logRoller = new LogRoller(server, services); logRoller.addWAL(dodgyWAL); logRoller.start(); boolean threwOnSync = false; boolean threwOnAppend = false; boolean threwOnBoth = false; HRegion region = initHRegion(tableName, null, null, dodgyWAL); try { // Get some random bytes. byte[] value = Bytes.toBytes(getName()); try { // First get something into memstore Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), value); region.put(put); } catch (IOException ioe) { fail(); } long rollsCount = rolls.get(); try { dodgyWAL.throwAppendException = true; dodgyWAL.throwSyncException = false; Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("3"), value); region.put(put); } catch (IOException ioe) { threwOnAppend = true; } while (rollsCount == rolls.get()) Threads.sleep(100); rollsCount = rolls.get(); // When we get to here.. we should be ok. A new WAL has been put in place. There were no // appends to sync. We should be able to continue. try { dodgyWAL.throwAppendException = true; dodgyWAL.throwSyncException = true; Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("4"), value); region.put(put); } catch (IOException ioe) { threwOnBoth = true; } while (rollsCount == rolls.get()) Threads.sleep(100); // Again, all should be good. New WAL and no outstanding unsync'd edits so we should be able // to just continue. // So, should be no abort at this stage. Verify. Mockito.verify(server, Mockito.atLeast(0)).abort(Mockito.anyString(), (Throwable) Mockito.anyObject()); try { dodgyWAL.throwAppendException = false; dodgyWAL.throwSyncException = true; Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("2"), value); region.put(put); } catch (IOException ioe) { threwOnSync = true; } // An append in the WAL but the sync failed is a server abort condition. That is our // current semantic. Verify. It takes a while for abort to be called. Just hang here till it // happens. If it don't we'll timeout the whole test. That is fine. while (true) { try { Mockito.verify(server, Mockito.atLeast(1)).abort(Mockito.anyString(), (Throwable) Mockito.anyObject()); break; } catch (WantedButNotInvoked t) { Threads.sleep(1); } } } finally { // To stop logRoller, its server has to say it is stopped. Mockito.when(server.isStopped()).thenReturn(true); if (logRoller != null) logRoller.interrupt(); if (region != null) { try { region.close(true); } catch (DroppedSnapshotException e) { LOG.info("On way out; expected!", e); } } if (dodgyWAL != null) dodgyWAL.close(); assertTrue("The regionserver should have thrown an exception", threwOnBoth); assertTrue("The regionserver should have thrown an exception", threwOnAppend); assertTrue("The regionserver should have thrown an exception", threwOnSync); } }
From source file:org.tomitribe.tribestream.registryng.resources.ClientResource.java
@GET @Path("invoke/stream") @Produces("text/event-stream") // will be part of JAX-RS 2.1, for now just making it working public void invokeScenario(@Suspended final AsyncResponse asyncResponse, @Context final Providers providers, @Context final HttpServletRequest httpServletRequest, // base64 encoded json with the request and identify since EventSource doesnt handle it very well // TODO: use a ciphering with a POST endpoint to avoid to have it readable (or other) @QueryParam("request") final String requestBytes) { final SseRequest in = loadPayload(SseRequest.class, providers, requestBytes); final String auth = in.getIdentity(); security.check(auth, httpServletRequest, () -> { }, () -> {/* w w w .j a va 2 s . c o m*/ throw new WebApplicationException(Response.Status.FORBIDDEN); }); final GenericClientService.Request req = toRequest(in.getHttp()); final Scenario scenario = in.getHttp().getScenario(); final MultivaluedHashMap<String, Object> fakeHttpHeaders = new MultivaluedHashMap<>(); final ConcurrentMap<Future<?>, Boolean> computations = new ConcurrentHashMap<>(); final MessageBodyWriter<LightHttpResponse> writerResponse = providers.getMessageBodyWriter( LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE); final MessageBodyWriter<ScenarioEnd> writerEnd = providers.getMessageBodyWriter(ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE); // not jaxrs one cause cxf wraps this one and prevents the flush() to works final HttpServletResponse httpServletResponse = HttpServletResponse.class .cast(httpServletRequest.getAttribute("tribe.registry.response")); httpServletResponse.setHeader("Content-Type", "text/event-stream"); try { httpServletResponse.flushBuffer(); } catch (final IOException e) { throw new IllegalStateException(e); } final ServletOutputStream out; try { out = httpServletResponse.getOutputStream(); } catch (final IOException e) { throw new IllegalStateException(e); } mes.submit(() -> { final AtomicReference<Invoker.Handle> handleRef = new AtomicReference<>(); try { // we compute some easy stats asynchronously final Map<Integer, AtomicInteger> sumPerResponse = new HashMap<>(); final AtomicInteger total = new AtomicInteger(); final AtomicLong min = new AtomicLong(); final AtomicLong max = new AtomicLong(); final AtomicLong sum = new AtomicLong(); final AtomicInteger writeErrors = new AtomicInteger(0); final long start = System.currentTimeMillis(); handleRef.set(invoker.invoke(scenario.getThreads(), scenario.getInvocations(), scenario.getDuration(), timeout, () -> { if (handleRef.get().isCancelled()) { return; } LightHttpResponse resp; try { final GenericClientService.Response invoke = service.invoke(req); resp = new LightHttpResponse(invoke.getStatus(), null, invoke.getClientExecutionDurationMs()); } catch (final RuntimeException e) { resp = new LightHttpResponse(-1, e.getMessage(), -1); } // let's process it in an environment where synchronisation is fine final LightHttpResponse respRef = resp; computations.put(mes.submit(() -> { synchronized (out) { try { out.write(dataStart); writerResponse.writeTo(respRef, LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE, fakeHttpHeaders, out); out.write(dataEnd); out.flush(); } catch (final IOException e) { if (writeErrors.incrementAndGet() > toleratedWriteErrors) { handleRef.get().cancel(); } throw new IllegalStateException(e); } } if (handleRef.get().isCancelled()) { return; } final long clientExecutionDurationMs = respRef.getClientExecutionDurationMs(); total.incrementAndGet(); sumPerResponse.computeIfAbsent(respRef.getStatus(), k -> new AtomicInteger()) .incrementAndGet(); sum.addAndGet(clientExecutionDurationMs); { long m = min.get(); do { m = min.get(); if (min.compareAndSet(m, clientExecutionDurationMs)) { break; } } while (m > clientExecutionDurationMs); } { long m = max.get(); do { m = max.get(); if (max.compareAndSet(m, clientExecutionDurationMs)) { break; } } while (m < clientExecutionDurationMs); } }), true); })); handleRef.get().await(); final long end = System.currentTimeMillis(); do { // wait all threads finished to compute the stats final Iterator<Future<?>> iterator = computations.keySet().iterator(); while (iterator.hasNext()) { try { iterator.next().get(timeout, TimeUnit.MILLISECONDS); } catch (final InterruptedException e) { Thread.interrupted(); } catch (final ExecutionException | TimeoutException e) { throw new IllegalStateException(e.getCause()); } finally { iterator.remove(); } } } while (!computations.isEmpty()); if (handleRef.get().isCancelled()) { return; } try { out.write(dataStart); writerEnd.writeTo( new ScenarioEnd( sumPerResponse.entrySet().stream() .collect(toMap(Map.Entry::getKey, t -> t.getValue().get())), end - start, total.get(), min.get(), max.get(), sum.get() * 1. / total.get()), ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE, new MultivaluedHashMap<>(), out); out.write(dataEnd); out.flush(); } catch (final IOException e) { throw new IllegalStateException(e); } } finally { try { // cxf will skip it since we already write ourself asyncResponse.resume(""); } catch (final RuntimeException re) { // no-op: not that important } } }); }