List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:fr.gouv.vitam.mdbes.QueryBench.java
private TypeField getField(JsonNode bfield, int level) throws InvalidParseOperationException { String name = bfield.get(FIELD_ARGS.__name.name()).asText(); String type = bfield.get(FIELD_ARGS.__type.name()).asText(); String sftype = bfield.path(FIELD_ARGS.__ftype.name()).asText(); if (type == null || type.isEmpty()) { LOGGER.warn("Unknown empty type: {}", type); throw new InvalidParseOperationException("Unknown empty type: " + type); }/*from www.jav a 2s .c om*/ TypeField field = new TypeField(); FIELD fieldType = null; try { fieldType = FIELD.valueOf(type); } catch (IllegalArgumentException e) { LOGGER.warn("Unknown type: {}", bfield); throw new InvalidParseOperationException("Unknown type: " + bfield); } field.name = name; field.type = fieldType; FIELD_TYPE ftype = FIELD_TYPE.chaine; if (sftype != null && !sftype.isEmpty()) { try { ftype = FIELD_TYPE.valueOf(sftype); } catch (final IllegalArgumentException e) { LOGGER.error("Unknown ftype: " + bfield); ftype = FIELD_TYPE.chaine; } } field.ftype = ftype; switch (fieldType) { case setdistrib: { // no field but CPT level distribCpt = context.cpts.get(CPTLEVEL + level); return null; } case save: { break; } case liste: case listeorder: { ArrayNode liste = (ArrayNode) bfield.get("__" + fieldType.name()); if (liste == null || !liste.has(0)) { LOGGER.warn("Empty List: {}", liste); throw new InvalidParseOperationException("Empty List: " + bfield); } field.listeValeurs = new String[liste.size()]; for (int i = 0; i < liste.size(); i++) { field.listeValeurs[i] = liste.get(i).asText(); } break; } case serie: { JsonNode bson = bfield.get(FIELD_ARGS.__serie.name()); if (bson == null) { LOGGER.warn("Empty serie: {}", bfield); throw new InvalidParseOperationException("Empty serie: " + bfield); } if (bson.has(FIELD_ARGS.__prefix.name())) { String prefix = bson.get(FIELD_ARGS.__prefix.name()).asText(); if (prefix == null) { prefix = ""; } field.prefix = prefix; } if (bson.has(FIELD_ARGS.__idcpt.name())) { String idcpt = bson.get(FIELD_ARGS.__idcpt.name()).asText(); if (idcpt != null && !idcpt.isEmpty()) { field.idcpt = idcpt; context.cpts.put(idcpt, new AtomicLong(0)); } } field.modulo = -1; if (bson.has(FIELD_ARGS.__modulo.name())) { int modulo = bson.get(FIELD_ARGS.__modulo.name()).asInt(); if (modulo > 0) { field.modulo = modulo; } } break; } case interval: { Integer low = bfield.get(FIELD_ARGS.__low.name()).asInt(); if (low == null) { LOGGER.warn("Empty interval: {}", bfield); throw new InvalidParseOperationException("Empty interval: " + bfield); } Integer high = bfield.get(FIELD_ARGS.__high.name()).asInt(); if (high == null) { LOGGER.warn("Empty interval: {}", bfield); throw new InvalidParseOperationException("Empty interval: " + bfield); } field.low = low; field.high = high; break; } default: LOGGER.warn("Incorrect type: {}", bfield); throw new InvalidParseOperationException("Incorrect type: " + bfield); } if (bfield.has(FIELD_ARGS.__save.name())) { String savename = bfield.get(FIELD_ARGS.__save.name()).asText(); if (savename != null && !savename.isEmpty()) { field.saveName = savename; } } if (bfield.has(FIELD_ARGS.__subprefix.name())) { ArrayNode liste = (ArrayNode) bfield.get(FIELD_ARGS.__subprefix.name()); if (liste == null || !liste.has(0)) { LOGGER.warn("Empty SubPrefix List: {}", liste); throw new InvalidParseOperationException("Empty SubPrefix List: " + bfield); } field.subprefixes = new String[liste.size()]; for (int i = 0; i < liste.size(); i++) { field.subprefixes[i] = liste.get(i).asText(); } } return field; }
From source file:org.apache.hadoop.ipc.DecayRpcScheduler.java
/** * Get the number of occurrences and increment atomically. * @param identity the identity of the user to increment * @return the value before incrementation *///from ww w .jav a2 s. c o m private long getAndIncrementCallCounts(Object identity) throws InterruptedException { // We will increment the count, or create it if no such count exists List<AtomicLong> count = this.callCounts.get(identity); if (count == null) { // Create the counts since no such count exists. // idx 0 for decayed call count // idx 1 for the raw call count count = new ArrayList<AtomicLong>(2); count.add(new AtomicLong(0)); count.add(new AtomicLong(0)); // Put it in, or get the AtomicInteger that was put in by another thread List<AtomicLong> otherCount = callCounts.putIfAbsent(identity, count); if (otherCount != null) { count = otherCount; } } // Update the total totalDecayedCallCount.getAndIncrement(); totalRawCallCount.getAndIncrement(); // At this point value is guaranteed to be not null. It may however have // been clobbered from callCounts. Nonetheless, we return what // we have. count.get(1).getAndIncrement(); return count.get(0).getAndIncrement(); }
From source file:com.hubcap.task.TaskRunner.java
/** * Starts the ThreadPoolExecutor which builds a set of TaskRunner instances * which will wait for inputs (from the user) *//*from w w w . j av a 2 s.c o m*/ public static void startThreadPool() { if (!isTaskSystemReady) { System.out.println("startThreadPool()"); isTaskSystemReady = true; // used to id the threads 'atomically' final AtomicLong count = new AtomicLong(0); if (TaskRunner.taskRunnerThreadFactory == null) { TaskRunner.taskRunnerThreadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { if (runningTasks.contains(r)) { throw new IllegalStateException("Cannot add duplicate runnable to running tasks"); } Thread thread = new Thread(r); thread.setDaemon(false); thread.setName("HubcapTaskRunnerThread-" + count.getAndIncrement()); taskThreads.add(thread); return thread; } }; // calculates the current stable thread count based on the // assumption // that it takes 'X' times the amount of time to transfer data // (from github) // as it does to process said data (including Gson // transformation) // and the limit of Y% use of CPU. MAX_THREADS provides a safe // and stable cap for // systems that are so 'badass' that we would break the cap. \ // (i.e. i have 32 cores and 12 disks = (2*32*12*1(1+5/1) = // 4600 threads, a bit high)...) int numThreads = ThreadUtils.getStableThreadCount(CPU_LOAD_TR, CPU_WAIT_TR, CPU_COMPUTE_TR, Constants.MAX_TASK_RUNNER_THREADS); System.out.println("creating: " + numThreads + " threads for hubcap"); TaskRunner.taskRunnerThreadPool = Executors.newFixedThreadPool(numThreads, TaskRunner.taskRunnerThreadFactory); for (int i = 0; i < numThreads; ++i) { TaskRunner tr = new TaskRunner(); taskRunnerThreadPool.execute(tr); } // pass the monitoring code to another thread // so we don't block the REPL loop monitorThread = new Thread(new Runnable() { @Override public void run() { while (!taskRunnerThreadPool.isShutdown()) { try { TaskRunner.rebalance(); Thread.sleep(Constants.POOL_SHUTDOWN_CHECK_INTERVAL); } catch (InterruptedException ex) { if (ProcessModel.instance().getVerbose()) { ErrorUtils.printStackTrace(ex); } break; } } System.out.println("Thread Pool was shutdown"); while (!taskRunnerThreadPool.isTerminated()) { try { Thread.sleep(Constants.POOL_TERM_CHECK_INTERVAL); } catch (InterruptedException ex) { ErrorUtils.printStackTrace(ex); break; } } System.out.println("Thread pool terminated."); } }); monitorThread.setName("TaskMonDaemon"); monitorThread.setDaemon(false); // start monitoring monitorThread.start(); System.out.println("Thread pool started!"); } } else { throw new IllegalStateException("Hubcap task runner can only be initialized once!"); } }
From source file:org.apache.hadoop.hbase.quotas.TestSpaceQuotasWithSnapshots.java
void waitForStableQuotaSize(Connection conn, TableName tn, String ns) throws Exception { // For some stability in the value before proceeding // Helps make sure that we got the actual last value, not some inbetween AtomicLong lastValue = new AtomicLong(-1); AtomicInteger counter = new AtomicInteger(0); TEST_UTIL.waitFor(15_000, 500, new SpaceQuotaSnapshotPredicate(conn, tn, ns) { @Override/*from ww w . j av a2 s.c o m*/ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { LOG.debug("Last observed size=" + lastValue.get()); if (snapshot.getUsage() == lastValue.get()) { int numMatches = counter.incrementAndGet(); if (numMatches >= 5) { return true; } // Not yet.. return false; } counter.set(0); lastValue.set(snapshot.getUsage()); return false; } }); }
From source file:com.facebook.presto.accumulo.tools.RewriteMetricsTask.java
private void incrementTimestampMetric(Map<Text, Map<Text, Map<ColumnVisibility, AtomicLong>>> rowMap, Text family, ColumnVisibility visibility, Text timestampValue) { for (Entry<TimestampPrecision, Long> entry : getTruncatedTimestamps( serializer.decode(TIMESTAMP, timestampValue.copyBytes())).entrySet()) { Text timestampFamily = new Text( Bytes.concat(family.copyBytes(), TIMESTAMP_CARDINALITY_FAMILIES.get(entry.getKey()))); Text row = new Text(serializer.encode(TIMESTAMP, entry.getValue())); Map<Text, Map<ColumnVisibility, AtomicLong>> familyMap = rowMap.get(row); if (familyMap == null) { familyMap = new HashMap<>(); rowMap.put(row, familyMap);/*from w ww .jav a 2 s .c o m*/ } Map<ColumnVisibility, AtomicLong> visibilityMap = familyMap.get(timestampFamily); if (visibilityMap == null) { visibilityMap = new HashMap<>(); visibilityMap.put(new ColumnVisibility(), new AtomicLong(0)); familyMap.put(timestampFamily, visibilityMap); } if (visibilityMap.containsKey(visibility)) { visibilityMap.get(visibility).incrementAndGet(); } else { visibilityMap.put(visibility, new AtomicLong(1)); } } }
From source file:org.apache.hadoop.raid.RaidShell.java
private long estimateSaving(final Codec codec, final List<Path> files, final int targetReplication, final int numThreads, final boolean isDebug) throws IOException { final AtomicLong totalSavingSize = new AtomicLong(0); ExecutorService executor = Executors.newFixedThreadPool(numThreads); LOG.info("Processing " + files.size() + " files/dirs for " + codec.id + " in " + numThreads + " threads"); if (isDebug) { System.out.println("oldDiskSize | oldParitySize | newDiskSize | newParitySize" + "| savingSize | totalSavingSize | path "); }// w ww.j a v a 2s . c om final AtomicInteger finishNum = new AtomicInteger(0); for (int i = 0; i < numThreads; i++) { final int startIdx = i; Runnable work = new Runnable() { public void run() { try { for (int idx = startIdx; idx < files.size(); idx += numThreads) { try { Path p = files.get(idx); FileSystem fs = FileSystem.get(conf); p = fs.makeQualified(p); FileStatus stat = null; try { stat = fs.getFileStatus(p); } catch (FileNotFoundException e) { LOG.warn("Path " + p + " does not exist", e); } if (stat == null) { continue; } short repl = 0; List<FileStatus> lfs = null; if (codec.isDirRaid) { if (!stat.isDir()) { continue; } lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, p); if (lfs == null) { continue; } repl = DirectoryStripeReader.getReplication(lfs); } else { repl = stat.getReplication(); } // if should not raid, will not put the file into the write list. if (!RaidNode.shouldRaid(conf, fs, stat, codec, lfs)) { LOG.info("Should not raid file: " + p); continue; } // check the replication. boolean add = false; if (repl > targetReplication) { add = true; } else if (repl == targetReplication && !ParityFilePair.parityExists(stat, codec, conf)) { add = true; } if (add) { long oldDiskSize = 0L; long newDiskSize = 0L; long numBlocks = 0L; long parityBlockSize = 0L; if (codec.isDirRaid) { for (FileStatus fsStat : lfs) { oldDiskSize += fsStat.getLen() * (fsStat.getReplication()); newDiskSize += fsStat.getLen() * targetReplication; } numBlocks = DirectoryStripeReader.getBlockNum(lfs); parityBlockSize = DirectoryStripeReader.getParityBlockSize(conf, lfs); } else { oldDiskSize = stat.getLen() * stat.getReplication(); newDiskSize = stat.getLen() * targetReplication; numBlocks = RaidNode.getNumBlocks(stat); parityBlockSize = stat.getBlockSize(); } long numStripes = RaidNode.numStripes(numBlocks, codec.stripeLength); long newParitySize = numStripes * codec.parityLength * parityBlockSize * targetReplication; long oldParitySize = 0L; for (Codec other : Codec.getCodecs()) { if (other.priority < codec.priority) { Path parityPath = new Path(other.parityDirectory, RaidNode.makeRelative(stat.getPath())); long logicalSize = 0; try { logicalSize = fs.getContentSummary(parityPath).getSpaceConsumed(); } catch (IOException ioe) { // doesn't exist continue; } oldParitySize += logicalSize; } } long savingSize = oldDiskSize + oldParitySize - newDiskSize - newParitySize; totalSavingSize.addAndGet(savingSize); if (isDebug) { System.out.println(oldDiskSize + " " + oldParitySize + " " + newDiskSize + " " + newParitySize + " " + savingSize + " " + totalSavingSize.get() + " " + stat.getPath()); } } } catch (IOException ioe) { LOG.warn("Get IOException", ioe); } } } finally { finishNum.incrementAndGet(); } } }; if (executor != null) { executor.execute(work); } } if (executor != null) { try { while (finishNum.get() < numThreads) { try { Thread.sleep(2000); } catch (InterruptedException ie) { LOG.warn("EstimateSaving get exception ", ie); throw new IOException(ie); } } } finally { executor.shutdown(); // Waits for submitted tasks to finish. } } return totalSavingSize.get(); }
From source file:com.netflix.discovery.shared.Applications.java
/** * Shuffle the instances and filter for only {@link InstanceStatus#UP} if * required.// w ww .j a va 2 s. c o m * */ private void shuffleAndFilterInstances(Map<String, AbstractQueue<InstanceInfo>> srcMap, Map<String, AtomicReference<List<InstanceInfo>>> destMap, Map<String, AtomicLong> vipIndexMap, boolean filterUpInstances) { for (Map.Entry<String, AbstractQueue<InstanceInfo>> entries : srcMap.entrySet()) { AbstractQueue<InstanceInfo> instanceInfoQueue = entries.getValue(); List<InstanceInfo> l = new ArrayList<InstanceInfo>(instanceInfoQueue); if (filterUpInstances) { Iterator<InstanceInfo> it = l.iterator(); while (it.hasNext()) { InstanceInfo instanceInfo = it.next(); if (!InstanceStatus.UP.equals(instanceInfo.getStatus())) { it.remove(); } } } Collections.shuffle(l); AtomicReference<List<InstanceInfo>> instanceInfoList = destMap.get(entries.getKey()); if (instanceInfoList == null) { instanceInfoList = new AtomicReference<List<InstanceInfo>>(l); destMap.put(entries.getKey(), instanceInfoList); } instanceInfoList.set(l); vipIndexMap.put(entries.getKey(), new AtomicLong(0)); } // finally remove all vips that are completed deleted (i.e. missing) from the srcSet Set<String> srcVips = srcMap.keySet(); Set<String> destVips = destMap.keySet(); destVips.retainAll(srcVips); }
From source file:com.antsdb.saltedfish.nosql.Gobbler.java
/** * return -1 if there is no valid sp found meaning this is an empty database * @throws Exception /*w ww . j a va 2 s . c om*/ */ public long getLatestSp() { long sp = this.spaceman.getAllocationPointer(); int spaceId = SpaceManager.getSpaceId(sp); long spaceStartSp = this.spaceman.getSpaceStartSp(spaceId); if (spaceStartSp == sp) { // if current space is empty, wait a little try { Thread.sleep(1000); } catch (InterruptedException e) { } } AtomicLong result = new AtomicLong(-1); try { this.replay(spaceStartSp, true, new ReplayHandler() { @Override public void all(LogEntry entry) { result.set(entry.getSpacePointer()); } }); } catch (Exception ignored) { } return result.get(); }
From source file:org.apache.accumulo.tserver.tablet.Tablet.java
public Tablet(final TabletServer tabletServer, final KeyExtent extent, final TabletResourceManager trm, TabletData data) throws IOException { this.tabletServer = tabletServer; this.extent = extent; this.tabletResources = trm; this.lastLocation = data.getLastLocation(); this.lastFlushID = data.getFlushID(); this.lastCompactID = data.getCompactID(); this.splitCreationTime = data.getSplitTime(); this.tabletTime = TabletTime.getInstance(data.getTime()); this.persistedTime = tabletTime.getTime(); this.logId = tabletServer.createLogId(extent); TableConfiguration tblConf = tabletServer.getTableConfiguration(extent); if (null == tblConf) { Tables.clearCache(tabletServer.getInstance()); tblConf = tabletServer.getTableConfiguration(extent); requireNonNull(tblConf, "Could not get table configuration for " + extent.getTableId()); }/* w w w . j a v a2s. co m*/ this.tableConfiguration = tblConf; // translate any volume changes VolumeManager fs = tabletServer.getFileSystem(); boolean replicationEnabled = ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration); TabletFiles tabletPaths = new TabletFiles(data.getDirectory(), data.getLogEntris(), data.getDataFiles()); tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer, tabletServer.getLock(), fs, extent, tabletPaths, replicationEnabled); // deal with relative path for the directory Path locationPath; if (tabletPaths.dir.contains(":")) { locationPath = new Path(tabletPaths.dir); } else { locationPath = tabletServer.getFileSystem().getFullPath(FileType.TABLE, extent.getTableId() + tabletPaths.dir); } this.location = locationPath; this.tabletDirectory = tabletPaths.dir; for (Entry<Long, List<FileRef>> entry : data.getBulkImported().entrySet()) { this.bulkImported.put(entry.getKey(), new CopyOnWriteArrayList<FileRef>(entry.getValue())); } setupDefaultSecurityLabels(extent); final List<LogEntry> logEntries = tabletPaths.logEntries; final SortedMap<FileRef, DataFileValue> datafiles = tabletPaths.datafiles; tableConfiguration.addObserver(configObserver = new ConfigurationObserver() { private void reloadConstraints() { log.debug("Reloading constraints for extent: " + extent); constraintChecker.set(new ConstraintChecker(tableConfiguration)); } @Override public void propertiesChanged() { reloadConstraints(); try { setupDefaultSecurityLabels(extent); } catch (Exception e) { log.error("Failed to reload default security labels for extent: " + extent.toString()); } } @Override public void propertyChanged(String prop) { if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())) reloadConstraints(); else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) { try { log.info("Default security labels changed for extent: " + extent.toString()); setupDefaultSecurityLabels(extent); } catch (Exception e) { log.error("Failed to reload default security labels for extent: " + extent.toString()); } } } @Override public void sessionExpired() { log.debug("Session expired, no longer updating per table props..."); } }); tableConfiguration.getNamespaceConfiguration().addObserver(configObserver); tabletMemory = new TabletMemory(this); // Force a load of any per-table properties configObserver.propertiesChanged(); if (!logEntries.isEmpty()) { log.info("Starting Write-Ahead Log recovery for " + this.extent); final AtomicLong entriesUsedOnTablet = new AtomicLong(0); // track max time from walog entries without timestamps final AtomicLong maxTime = new AtomicLong(Long.MIN_VALUE); final CommitSession commitSession = getTabletMemory().getCommitSession(); try { Set<String> absPaths = new HashSet<String>(); for (FileRef ref : datafiles.keySet()) absPaths.add(ref.path().toString()); tabletServer.recover(this.getTabletServer().getFileSystem(), extent, tableConfiguration, logEntries, absPaths, new MutationReceiver() { @Override public void receive(Mutation m) { // LogReader.printMutation(m); Collection<ColumnUpdate> muts = m.getUpdates(); for (ColumnUpdate columnUpdate : muts) { if (!columnUpdate.hasTimestamp()) { // if it is not a user set timestamp, it must have been set // by the system maxTime.set(Math.max(maxTime.get(), columnUpdate.getTimestamp())); } } getTabletMemory().mutate(commitSession, Collections.singletonList(m)); entriesUsedOnTablet.incrementAndGet(); } }); if (maxTime.get() != Long.MIN_VALUE) { tabletTime.useMaxTimeFromWALog(maxTime.get()); } commitSession.updateMaxCommittedTime(tabletTime.getTime()); if (entriesUsedOnTablet.get() == 0) { log.debug("No replayed mutations applied, removing unused entries for " + extent); MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries, tabletServer.getLock()); // No replication update to be made because the fact that this tablet didn't use any mutations // from the WAL implies nothing about use of this WAL by other tablets. Do nothing. logEntries.clear(); } else if (ReplicationConfigurationUtil.isEnabled(extent, tabletServer.getTableConfiguration(extent))) { // The logs are about to be re-used by this tablet, we need to record that they have data for this extent, // but that they may get more data. logEntries is not cleared which will cause the elements // in logEntries to be added to the currentLogs for this Tablet below. // // This update serves the same purpose as an update during a MinC. We know that the WAL was defined // (written when the WAL was opened) but this lets us know there are mutations written to this WAL // that could potentially be replicated. Because the Tablet is using this WAL, we can be sure that // the WAL isn't closed (WRT replication Status) and thus we're safe to update its progress. Status status = StatusUtil.openWithUnknownLength(); for (LogEntry logEntry : logEntries) { log.debug("Writing updated status to metadata table for " + logEntry.filename + " " + ProtobufUtil.toString(status)); ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.filename, status); } } } catch (Throwable t) { if (tableConfiguration.getBoolean(Property.TABLE_FAILURES_IGNORE)) { log.warn("Error recovering from log files: ", t); } else { throw new RuntimeException(t); } } // make some closed references that represent the recovered logs currentLogs = new ConcurrentSkipListSet<DfsLogger>(); for (LogEntry logEntry : logEntries) { currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename, logEntry.getColumnQualifier().toString())); } log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get() + " mutations applied, " + getTabletMemory().getNumEntries() + " entries created)"); } String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH); if (contextName != null && !contextName.equals("")) { // initialize context classloader, instead of possibly waiting for it to initialize for a scan // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292 AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName); } // do this last after tablet is completely setup because it // could cause major compaction to start datafileManager = new DatafileManager(this, datafiles); computeNumEntries(); getDatafileManager().removeFilesAfterScan(data.getScanFiles()); // look for hints of a failure on the previous tablet server if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) { // look for any temp files hanging around removeOldTemporaryFiles(); } log.log(TLevel.TABLET_HIST, extent + " opened"); }
From source file:org.apache.hadoop.hbase.quotas.TestSpaceQuotasWithSnapshots.java
void waitForStableRegionSizeReport(Connection conn, TableName tn) throws Exception { // For some stability in the value before proceeding // Helps make sure that we got the actual last value, not some inbetween AtomicLong lastValue = new AtomicLong(-1); AtomicInteger counter = new AtomicInteger(0); TEST_UTIL.waitFor(15_000, 500, new Predicate<Exception>() { @Override/*from w w w. j a v a2 s . c om*/ public boolean evaluate() throws Exception { LOG.debug("Last observed size=" + lastValue.get()); long actual = getRegionSizeReportForTable(conn, tn); if (actual == lastValue.get()) { int numMatches = counter.incrementAndGet(); if (numMatches >= 5) { return true; } // Not yet.. return false; } counter.set(0); lastValue.set(actual); return false; } }); }