List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.hadoop.hbase.io.hfile.slab.SingleSizeCache.java
/** * Default constructor. Specify the size of the blocks, number of blocks, and * the SlabCache this cache will be assigned to. * * * @param blockSize the size of each block, in bytes * * @param numBlocks the number of blocks of blockSize this cache will hold. * * @param master the SlabCache this SingleSlabCache is assigned to. *///from w w w. ja va 2s. c om public SingleSizeCache(int blockSize, int numBlocks, SlabItemActionWatcher master) { this.blockSize = blockSize; this.numBlocks = numBlocks; backingStore = new Slab(blockSize, numBlocks); this.stats = new CacheStats(); this.actionWatcher = master; this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize()); this.timeSinceLastAccess = new AtomicLong(); // This evictionListener is called whenever the cache automatically // evicts something. RemovalListener<BlockCacheKey, CacheablePair> listener = new RemovalListener<BlockCacheKey, CacheablePair>() { @Override public void onRemoval(RemovalNotification<BlockCacheKey, CacheablePair> notification) { if (!notification.wasEvicted()) { // Only process removals by eviction, not by replacement or // explicit removal return; } CacheablePair value = notification.getValue(); timeSinceLastAccess.set(System.nanoTime() - value.recentlyAccessed.get()); stats.evict(); doEviction(notification.getKey(), value); } }; backingMap = CacheBuilder.newBuilder().maximumSize(numBlocks - 1).removalListener(listener) .<BlockCacheKey, CacheablePair>build().asMap(); }
From source file:org.apache.bookkeeper.metadata.etcd.Etcd64bitIdGeneratorTest.java
/** * Test generating id in parallel and ensure there is no duplicated id. *///from w ww . j a v a 2 s.com @Test public void testGenerateIdParallel() throws Exception { final int numThreads = 10; @Cleanup("shutdown") ExecutorService executor = Executors.newFixedThreadPool(numThreads); final int numIds = 10000; final AtomicLong totalIds = new AtomicLong(numIds); final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>()); final RateLimiter limiter = RateLimiter.create(1000); final CompletableFuture<Void> doneFuture = new CompletableFuture<>(); for (int i = 0; i < numThreads; i++) { executor.submit(() -> { Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build(); Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope); AtomicBoolean running = new AtomicBoolean(true); while (running.get()) { limiter.acquire(); GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>(); gen.generateLedgerId(genFuture); genFuture.thenAccept(lid -> { boolean duplicatedFound = !(ids.add(lid)); if (duplicatedFound) { running.set(false); doneFuture.completeExceptionally( new IllegalStateException("Duplicated id " + lid + " generated : " + ids)); return; } else { if (totalIds.decrementAndGet() <= 0) { running.set(false); doneFuture.complete(null); } } }).exceptionally(cause -> { running.set(false); doneFuture.completeExceptionally(cause); return null; }); } }); } FutureUtils.result(doneFuture); assertTrue(totalIds.get() <= 0); assertTrue(ids.size() >= numIds); }
From source file:org.apache.hadoop.mapred.LocalDistributedCacheManager.java
/** * Set up the distributed cache by localizing the resources, and updating * the configuration with references to the localized resources. * @param conf//from www. j ava2s . co m * @throws IOException */ public void setup(JobConf conf) throws IOException { File workDir = new File(System.getProperty("user.dir")); // Generate YARN local resources objects corresponding to the distributed // cache configuration Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); // Generating unique numbers for FSDownload. AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis()); // Find which resources are to be put on the local classpath Map<String, Path> classpaths = new HashMap<String, Path>(); Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClassPaths != null) { for (Path p : archiveClassPaths) { classpaths.put(p.toUri().getPath().toString(), p); } } Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf); if (fileClassPaths != null) { for (Path p : fileClassPaths) { classpaths.put(p.toUri().getPath().toString(), p); } } // Localize the resources LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR); FileContext localFSFileContext = FileContext.getLocalFSFileContext(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ExecutorService exec = null; try { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalDistributedCacheManager Downloader #%d").build(); exec = Executors.newCachedThreadPool(tf); Path destPath = localDirAllocator.getLocalPathForWrite(".", conf); Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap(); for (LocalResource resource : localResources.values()) { Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource); Future<Path> future = exec.submit(download); resourcesToPaths.put(resource, future); } for (Entry<String, LocalResource> entry : localResources.entrySet()) { LocalResource resource = entry.getValue(); Path path; try { path = resourcesToPaths.get(resource).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e); } String pathString = path.toUri().toString(); String link = entry.getKey(); String target = new File(path.toUri()).getPath(); symlink(workDir, target, link); if (resource.getType() == LocalResourceType.ARCHIVE) { localArchives.add(pathString); } else if (resource.getType() == LocalResourceType.FILE) { localFiles.add(pathString); } else if (resource.getType() == LocalResourceType.PATTERN) { //PATTERN is not currently used in local mode throw new IllegalArgumentException( "Resource type PATTERN is not " + "implemented yet. " + resource.getResource()); } Path resourcePath; try { resourcePath = resource.getResource().toPath(); } catch (URISyntaxException e) { throw new IOException(e); } LOG.info(String.format("Localized %s as %s", resourcePath, path)); String cp = resourcePath.toUri().getPath(); if (classpaths.keySet().contains(cp)) { localClasspaths.add(path.toUri().getPath().toString()); } } } finally { if (exec != null) { exec.shutdown(); } } // Update the configuration object with localized data. if (!localArchives.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()]))); } if (!localFiles.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()]))); } setupCalled = true; }
From source file:com.baidu.fsg.uid.utils.NamingThreadFactory.java
/** * Get sequence for different naming prefix * // w w w. j a v a2s .co m * @param invoker * @return */ private long getSequence(String invoker) { AtomicLong r = this.sequences.get(invoker); if (r == null) { r = new AtomicLong(0); AtomicLong previous = this.sequences.putIfAbsent(invoker, r); if (previous != null) { r = previous; } } return r.incrementAndGet(); }
From source file:edu.mayo.cts2.framework.webapp.rest.controller.MethodTimingAspect.java
/** * Execute.// www. j a va 2 s . c o m * * @param pjp the pjp * @return the object * @throws Throwable the throwable */ @Around("execution(public *" + " edu.mayo.cts2.framework.webapp.rest.controller.*.*(..,edu.mayo.cts2.framework.webapp.rest.command.QueryControl,..))") public Object execute(final ProceedingJoinPoint pjp) throws Throwable { QueryControl queryControl = null; //this should never happen if (ArrayUtils.isEmpty(pjp.getArgs())) { throw new IllegalStateException("Pointcut failure!"); } for (Object arg : pjp.getArgs()) { if (arg.getClass() == QueryControl.class) { queryControl = (QueryControl) arg; break; } } //this also should never happen if (queryControl == null) { throw new IllegalStateException("Pointcut failure!"); } final AtomicLong threadId = new AtomicLong(-1); Future<Object> future = this.executorService.submit(new Callable<Object>() { @Override public Object call() { try { threadId.set(Thread.currentThread().getId()); /* * The model here is that we clear any previous timeout before we launch the job. A design flaw is that we * can't tell if we are clearing a previous timeout that simply hadn't been cleaned up yet, or if we are * clearing a timeout meant for this thread that happened before this thread even launched. The second scenario * seems unlikely as the minimum timeout is 1 second - hard to believe it would take more than 1 second to * launch this thread. Plus, this thread would have to launch in the exact window in between the timeout and * the future.cancel() * * If the above scenario did defy all odds and happen , it shouldn't cause much harm, as the end result would * be that this thread wouldn't see the cancelled flag - and would churn away for no reason, wasting some cpu * cycles, but doing no other harm. */ Timeout.clearThreadFlag(threadId.get()); return pjp.proceed(); } catch (Throwable e) { if (e instanceof Error) { throw (Error) e; } if (e instanceof RuntimeException) { throw (RuntimeException) e; } throw new RuntimeException(e); } } }); long time = queryControl.getTimelimit(); try { if (time < 0) { return future.get(); } else { return future.get(time, TimeUnit.SECONDS); } } catch (ExecutionException e) { throw e.getCause(); } catch (TimeoutException e) { try { //Set the flag for the processing thread to read Timeout.setTimeLimitExceeded(threadId.get()); //Schedule another future to make sure we don't cause a memory leak if the thread IDs aren't being reused (though, they should be) //and therefore don't get cleared up by the next run. Give the running thread 30 seconds to see the cancelled flag before this //cleanup takes place. this.scheduledExecutorService.schedule(new Runnable() { @Override public void run() { Timeout.clearThreadFlag(threadId.get()); } }, 30, TimeUnit.SECONDS); //Interrupt the processing thread so it has an opportunity to check the flag and stop. future.cancel(true); } catch (Exception e1) { // don't think this is possible, but just in case... } throw ExceptionFactory.createTimeoutException(e.getMessage()); } }
From source file:com.amazonaws.services.iot.client.shadow.AbstractAwsIotDevice.java
protected AbstractAwsIotDevice(String thingName) { this.thingName = thingName; reportedProperties = getDeviceProperties(true, false); updatableProperties = getDeviceProperties(false, true); commandManager = new AwsIotDeviceCommandManager(this); deviceSubscriptions = new ConcurrentHashMap<>(); for (String topic : getDeviceTopics()) { deviceSubscriptions.put(topic, false); }/*from w w w . ja v a2 s . com*/ jsonObjectMapper = new ObjectMapper(); SimpleModule module = new SimpleModule(); module.addSerializer(AbstractAwsIotDevice.class, new AwsIotJsonSerializer()); jsonObjectMapper.registerModule(module); localVersion = new AtomicLong(-1); }
From source file:com.norconex.committer.core.AbstractCommitter.java
private synchronized void ensureInitialDocCount() { if (docCount == null) { docCount = new AtomicLong(getInitialQueueDocCount()); }//from w ww. j a v a 2s . c o m }
From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java
/** * Use {@link com.indeed.lsmtree.recordcache.PersistentRecordCache.Builder#build()} to create instances. * * @param index lsm tree * @param recordLogDirectory record log directory * @param checkpointDir checkpoint directory * @throws IOException thrown if an I/O error occurs *///from w w w. j a v a 2s . c o m private PersistentRecordCache(final Store<K, Long> index, final RecordLogDirectory<Operation> recordLogDirectory, final File checkpointDir) throws IOException { this.index = index; this.comparator = index.getComparator(); this.recordLogDirectory = recordLogDirectory; indexUpdateFunctions = new RecordLogDirectoryPoller.Functions() { AtomicLong indexPutTime = new AtomicLong(0); AtomicLong indexDeleteTime = new AtomicLong(0); AtomicInteger indexPuts = new AtomicInteger(0); AtomicInteger indexDeletes = new AtomicInteger(0); AtomicInteger count = new AtomicInteger(0); @Override public void process(final long position, Operation op) throws IOException { count.incrementAndGet(); if (count.get() % 1000 == 0) { final int puts = indexPuts.get(); if (puts > 0) log.debug("avg index put time: " + indexPutTime.get() / puts / 1000d + " us"); final int deletes = indexDeletes.get(); if (deletes > 0) log.debug("avg index delete time: " + indexDeleteTime.get() / deletes / 1000d + " us"); } if (op.getClass() == Put.class) { final Put<K, V> put = (Put) op; final long start = System.nanoTime(); synchronized (index) { index.put(put.getKey(), position); } indexPutTime.addAndGet(System.nanoTime() - start); indexPuts.incrementAndGet(); } else if (op.getClass() == Delete.class) { final Delete<K> delete = (Delete) op; for (K k : delete.getKeys()) { final long start = System.nanoTime(); synchronized (index) { index.delete(k); } indexDeleteTime.addAndGet(System.nanoTime() - start); indexDeletes.incrementAndGet(); } } else if (op.getClass() == Checkpoint.class) { final Checkpoint checkpoint = (Checkpoint) op; if (checkpointDir != null) { sync(); index.checkpoint(new File(checkpointDir, String.valueOf(checkpoint.getTimestamp()))); } } else { log.warn("operation class unknown"); } } @Override public void sync() throws IOException { final long start = System.nanoTime(); index.sync(); log.debug("sync time: " + (System.nanoTime() - start) / 1000d + " us"); } }; }
From source file:org.apache.nutch.host.HostDb.java
public HostDb(Configuration conf) throws GoraException { try {/* w w w . j a v a2 s. c om*/ hostStore = StorageUtils.createWebStore(conf, String.class, Host.class); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } // Create a cache. // We add a removal listener to see if we need to flush the store, // in order to adhere to the put-flush-get semantic // ("read your own write") of DataStore. long lruSize = conf.getLong(HOSTDB_LRU_SIZE, DEFAULT_LRU_SIZE); int concurrencyLevel = conf.getInt(HOSTDB_CONCURRENCY_LEVEL, DEFAULT_HOSTDB_CONCURRENCY_LEVEL); RemovalListener<String, CacheHost> listener = new RemovalListener<String, CacheHost>() { @Override public void onRemoval(RemovalNotification<String, CacheHost> notification) { CacheHost removeFromCacheHost = notification.getValue(); if (removeFromCacheHost != NULL_HOST) { if (removeFromCacheHost.timestamp < lastFlush.get()) { try { hostStore.flush(); } catch (Exception e) { throw new RuntimeException(e); } lastFlush.set(System.currentTimeMillis()); } } } }; cache = CacheBuilder.newBuilder().maximumSize(lruSize).removalListener(listener) .concurrencyLevel(concurrencyLevel).build(); lastFlush = new AtomicLong(System.currentTimeMillis()); }
From source file:com.github.naoghuman.testdata.abclist.service.ExerciseTermService.java
@Override protected Task<Void> createTask() { return new Task<Void>() { {//from w w w. ja va2s . co m updateProgress(0, saveMaxEntities); } @Override protected Void call() throws Exception { LoggerFacade.getDefault().deactivate(Boolean.TRUE); final StopWatch stopWatch = new StopWatch(); stopWatch.start(); final ObservableList<Topic> topics = SqlProvider.getDefault().findAllTopics(); final ObservableList<Term> terms = SqlProvider.getDefault().findAllTerms(); final int sizeTerms = terms.size(); final AtomicInteger index = new AtomicInteger(0); final CrudService crudService = DatabaseFacade.getDefault().getCrudService(entityName); final AtomicLong id = new AtomicLong( -1_000_000_000L + DatabaseFacade.getDefault().getCrudService().count(entityName)); topics.stream().forEach(topic -> { final ObservableList<Exercise> exercises = SqlProvider.getDefault() .findAllExercisesWithTopicId(topic.getId()); exercises.stream().filter(exercise -> exercise.isReady()).forEach(exercise -> { final int maxExerciseTerms = TestdataGenerator.RANDOM.nextInt(70) + 10; for (int i = 0; i < maxExerciseTerms; i++) { final Term term = terms.get(TestdataGenerator.RANDOM.nextInt(sizeTerms)); final ExerciseTerm exerciseTerm = ModelProvider.getDefault().getExerciseTerm(); exerciseTerm.setExerciseId(exercise.getId()); exerciseTerm.setId(id.getAndIncrement()); exerciseTerm.setTermId(term.getId()); crudService.create(exerciseTerm); } }); updateProgress(index.getAndIncrement(), saveMaxEntities); }); LoggerFacade.getDefault().deactivate(Boolean.FALSE); stopWatch.split(); LoggerFacade.getDefault().debug(this.getClass(), " + " + stopWatch.toSplitString() + " for " + saveMaxEntities + " ExerciseTerms."); // NOI18N stopWatch.stop(); return null; } }; }