List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java
@Override public CompletableFuture<List<String>> getTopicsUnderNamespace(NamespaceName namespace, Mode mode) { CompletableFuture<List<String>> topicsFuture = new CompletableFuture<List<String>>(); AtomicLong opTimeoutMs = new AtomicLong(client.getConfiguration().getOperationTimeoutMs()); Backoff backoff = new Backoff(100, TimeUnit.MILLISECONDS, opTimeoutMs.get() * 2, TimeUnit.MILLISECONDS, 0, TimeUnit.MILLISECONDS); getTopicsUnderNamespace(serviceNameResolver.resolveHost(), namespace, backoff, opTimeoutMs, topicsFuture, mode);/*from w w w .j a v a 2 s . c o m*/ return topicsFuture; }
From source file:org.apache.accumulo.core.file.blockfile.cache.LruBlockCache.java
/** * Configurable constructor. Use this constructor if not using defaults. * //from w ww. j a v a 2 s . com * @param maxSize * maximum size of this cache, in bytes * @param blockSize * expected average size of blocks, in bytes * @param evictionThread * whether to run evictions in a bg thread or not * @param mapInitialSize * initial size of backing ConcurrentHashMap * @param mapLoadFactor * initial load factor of backing ConcurrentHashMap * @param mapConcurrencyLevel * initial concurrency factor for backing CHM * @param minFactor * percentage of total size that eviction will evict until * @param acceptableFactor * percentage of total size that triggers eviction * @param singleFactor * percentage of total size for single-access blocks * @param multiFactor * percentage of total size for multiple-access blocks * @param memoryFactor * percentage of total size for in-memory blocks */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor) { if (singleFactor + multiFactor + memoryFactor != 1) { throw new IllegalArgumentException("Single, multi, and memory factors " + " should total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); } if (minFactor >= 1.0f || acceptableFactor >= 1.0f) { throw new IllegalArgumentException("all factors must be < 1"); } this.maxSize = maxSize; this.blockSize = blockSize; map = new ConcurrentHashMap<String, CachedBlock>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); this.minFactor = minFactor; this.acceptableFactor = acceptableFactor; this.singleFactor = singleFactor; this.multiFactor = multiFactor; this.memoryFactor = memoryFactor; this.stats = new CacheStats(); this.count = new AtomicLong(0); this.elements = new AtomicLong(0); this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel); this.size = new AtomicLong(this.overhead); if (evictionThread) { this.evictionThread = new EvictionThread(this); this.evictionThread.start(); while (!this.evictionThread.running()) { try { Thread.sleep(10); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } } else { this.evictionThread = null; } this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); }
From source file:com.ebay.pulsar.analytics.metricstore.druid.postaggregator.PostAggregatorTest.java
public void testConstantPostAggregator() { String postAggrName = "ConstantPostAggrTest"; Long valueLong = 1001L;//w w w . jav a 2s . co m ConstantPostAggregator constantPostAggr = new ConstantPostAggregator(postAggrName, valueLong); byte[] cacheKeyLong = constantPostAggr.cacheKey(); String hashCacheKeyExpectedL = "c14b848f4656cea09df71e42c7c989132ba8aa6f"; String hashCacheKeyGotL = DigestUtils.shaHex(cacheKeyLong); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedL, hashCacheKeyGotL); Integer valueInt = 1002; constantPostAggr = new ConstantPostAggregator(postAggrName, valueInt); byte[] cacheKeyInt = constantPostAggr.cacheKey(); String hashCacheKeyExpectedI = "580228bb63a75f7b07c84dcf2099b0cdaec085ce"; String hashCacheKeyGotI = DigestUtils.shaHex(cacheKeyInt); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedI, hashCacheKeyGotI); Integer valueGot = (Integer) constantPostAggr.getValue(); assertEquals("Values NOT Equals", valueInt, valueGot); Short valueShort = 1003; constantPostAggr = new ConstantPostAggregator(postAggrName, valueShort); byte[] cacheKeyShort = constantPostAggr.cacheKey(); String hashCacheKeyExpectedS = "fd73cdc5e039a24a5441b19d158edeb015f10f0c"; String hashCacheKeyGotS = DigestUtils.shaHex(cacheKeyShort); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedS, hashCacheKeyGotS); Byte valueByte = 'a'; constantPostAggr = new ConstantPostAggregator(postAggrName, valueByte); byte[] cacheKeyByte = constantPostAggr.cacheKey(); String hashCacheKeyExpectedB = "2c0c2af089b139171def55e59d680259fd259f55"; String hashCacheKeyGotB = DigestUtils.shaHex(cacheKeyByte); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedB, hashCacheKeyGotB); Double valueDouble = (double) 1004; constantPostAggr = new ConstantPostAggregator(postAggrName, valueDouble); byte[] cacheKeyDouble = constantPostAggr.cacheKey(); String hashCacheKeyExpectedD = "5e1cc5c3150172c4aa8866a6a82b1c4f6f6c200d"; String hashCacheKeyGotD = DigestUtils.shaHex(cacheKeyDouble); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedD, hashCacheKeyGotD); Float valueFloat = (float) 1005; constantPostAggr = new ConstantPostAggregator(postAggrName, valueFloat); byte[] cacheKeyFloat = constantPostAggr.cacheKey(); String hashCacheKeyExpectedF = "8c45dc9f4dc79a2aadedf5a48482ae30a6146b02"; String hashCacheKeyGotF = DigestUtils.shaHex(cacheKeyFloat); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedF, hashCacheKeyGotF); BigDecimal valueBigDec = new BigDecimal(1006); constantPostAggr = new ConstantPostAggregator(postAggrName, valueBigDec); byte[] cacheKeyBigDec = constantPostAggr.cacheKey(); String hashCacheKeyExpectedBD = "067646bcb514d58a7f9649e2b3101ebd093f8726"; String hashCacheKeyGotBD = DigestUtils.shaHex(cacheKeyBigDec); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedBD, hashCacheKeyGotBD); BigInteger valueBigInt = new BigInteger("1007"); constantPostAggr = new ConstantPostAggregator(postAggrName, valueBigInt); byte[] cacheKeyBigInt = constantPostAggr.cacheKey(); String hashCacheKeyExpectedBI = "b278c39b38033eb31d5cddc033cc74f7872e968b"; String hashCacheKeyGotBI = DigestUtils.shaHex(cacheKeyBigInt); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedBI, hashCacheKeyGotBI); AtomicInteger valueAtomInt = new AtomicInteger(1008); constantPostAggr = new ConstantPostAggregator(postAggrName, valueAtomInt); byte[] cacheKeyAtomInt = constantPostAggr.cacheKey(); String hashCacheKeyExpectedAI = "45f2a9cf39e5c2ce13180b800b26d5fe09d4553e"; String hashCacheKeyGotAI = DigestUtils.shaHex(cacheKeyAtomInt); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedAI, hashCacheKeyGotAI); AtomicLong valueAtomLong = new AtomicLong(1009L); constantPostAggr = new ConstantPostAggregator(postAggrName, valueAtomLong); byte[] cacheKeyAtomLong = constantPostAggr.cacheKey(); String hashCacheKeyExpectedAL = "071e76f10db52ee7f98552e53d974f1879499428"; String hashCacheKeyGotAL = DigestUtils.shaHex(cacheKeyAtomLong); assertEquals("Hash of cacheKey NOT Equals", hashCacheKeyExpectedAL, hashCacheKeyGotAL); PostAggregatorType type = constantPostAggr.getType(); assertEquals("Type NOT Equals", PostAggregatorType.constant, type); ConstantPostAggregator postAggr0 = new ConstantPostAggregator(postAggrName, 1001L); assertTrue(postAggr0.equals(postAggr0)); ConstantPostAggregator postAggr = new ConstantPostAggregator(null, null); assertTrue(!postAggr.equals(postAggr0)); assertTrue(!postAggr0.equals(postAggr)); postAggr = new ConstantPostAggregator(postAggrName, null); assertTrue(!postAggr.equals(postAggr0)); assertTrue(!postAggr0.equals(postAggr)); postAggr = new ConstantPostAggregator(postAggrName, valueFloat); assertTrue(!postAggr.equals(postAggr0)); assertTrue(!postAggr0.equals(postAggr)); postAggr = new ConstantPostAggregator(postAggrName, 1001L); assertTrue(postAggr.equals(postAggr0)); assertTrue(postAggr0.equals(postAggr)); assertTrue(postAggr.hashCode() == postAggr0.hashCode()); assertTrue(!postAggr.equals(new Object())); }
From source file:org.lendingclub.mercator.aws.ELBScanner.java
protected void mapElbToInstance(JsonNode instances, String elbArn, String region) { AtomicLong oldestRelationshipTs = new AtomicLong(Long.MAX_VALUE); for (JsonNode i : instances) { String instanceName = i.path("instanceId").asText(); String instanceArn = String.format("arn:aws:ec2:%s:%s:instance/%s", region, getAccountId(), instanceName);/*from ww w . j a v a2 s . c om*/ // logger.info("{} instanceArn: {}",elbArn,instanceArn); String cypher = "match (x:AwsElb {aws_arn:{elbArn}}), (y:AwsEc2Instance {aws_arn:{instanceArn}}) " + "merge (x)-[r:DISTRIBUTES_TRAFFIC_TO]->(y) set r.updateTs=timestamp() return x,r,y"; getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "instanceArn", instanceArn).forEach(r -> { oldestRelationshipTs .set(Math.min(r.path("r").path("updateTs").asLong(), oldestRelationshipTs.get())); }); if (oldestRelationshipTs.get() > 0 && oldestRelationshipTs.get() < Long.MAX_VALUE) { cypher = "match (x:AwsElb {aws_arn:{elbArn}})-[r:DISTRIBUTES_TRAFFIC_TO]-(y:AwsEc2Instance) where r.updateTs<{oldest} delete r"; getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "oldest", oldestRelationshipTs.get()); } } }
From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java
/** * Simulates splitting a WAL out from under a regionserver that is still trying to write it. * Ensures we do not lose edits./*w ww .j a v a2s. c o m*/ * @throws IOException * @throws InterruptedException */ @Test(timeout = 300000) public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException { final AtomicLong counter = new AtomicLong(0); AtomicBoolean stop = new AtomicBoolean(false); // Region we'll write edits too and then later examine to make sure they all made it in. final String region = REGIONS.get(0); final int numWriters = 3; Thread zombie = new ZombieLastLogWriterRegionServer(counter, stop, region, numWriters); try { long startCount = counter.get(); zombie.start(); // Wait till writer starts going. while (startCount == counter.get()) Threads.sleep(1); // Give it a second to write a few appends. Threads.sleep(1000); final Configuration conf2 = HBaseConfiguration.create(this.conf); final User robber = User.createUserForTesting(conf2, ROBBER, GROUP); int count = robber.runAs(new PrivilegedExceptionAction<Integer>() { @Override public Integer run() throws Exception { StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n"); for (FileStatus status : fs.listStatus(WALDIR)) { ls.append("\t").append(status.toString()).append("\n"); } LOG.debug(ls); LOG.info("Splitting WALs out from under zombie. Expecting " + numWriters + " files."); WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf2, wals); LOG.info("Finished splitting out from under zombie."); Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region); assertEquals("wrong number of split files for region", numWriters, logfiles.length); int count = 0; for (Path logfile : logfiles) { count += countWAL(logfile); } return count; } }); LOG.info("zombie=" + counter.get() + ", robber=" + count); assertTrue( "The log file could have at most 1 extra log entry, but can't have less. " + "Zombie could write " + counter.get() + " and logfile had only " + count, counter.get() == count || counter.get() + 1 == count); } finally { stop.set(true); zombie.interrupt(); Threads.threadDumpingIsAlive(zombie); } }
From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java
public void testDelete(final String ns) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch latchMore = new CountDownLatch(10); final AtomicLong runs = new AtomicLong(0); long prior = 0; final URIExtractionNamespace namespace = new URIExtractionNamespace(ns, tmpFile.toURI(), new URIExtractionNamespace.ObjectMapperFlatDataParser( URIExtractionNamespaceTest.registerTypes(new ObjectMapper())), new Period(1l), null); final String cacheId = UUID.randomUUID().toString(); final CountDownLatch latchBeforeMore = new CountDownLatch(1); ListenableFuture<?> future = manager.schedule(namespace, factory, new Runnable() { @Override/*from www . ja v a 2 s . c o m*/ public void run() { try { if (!Thread.interrupted()) { manager.getPostRunnable(namespace, factory, cacheId).run(); } else { Thread.currentThread().interrupt(); } if (!Thread.interrupted()) { runs.incrementAndGet(); } else { Thread.currentThread().interrupt(); } } finally { latch.countDown(); try { if (latch.getCount() == 0) { latchBeforeMore.await(); } } catch (InterruptedException e) { log.debug("Interrupted"); Thread.currentThread().interrupt(); } finally { latchMore.countDown(); } } } }, cacheId); latch.await(); prior = runs.get(); latchBeforeMore.countDown(); Assert.assertFalse(future.isCancelled()); Assert.assertFalse(future.isDone()); Assert.assertTrue(fnCache.containsKey(ns)); latchMore.await(); Assert.assertTrue(runs.get() > prior); Assert.assertTrue(manager.implData.containsKey(ns)); manager.delete("ns"); Assert.assertFalse(manager.implData.containsKey(ns)); Assert.assertFalse(fnCache.containsKey(ns)); Assert.assertTrue(future.isCancelled()); Assert.assertTrue(future.isDone()); prior = runs.get(); Thread.sleep(20); Assert.assertEquals(prior, runs.get()); }
From source file:org.apache.nifi.processors.standard.ExecuteSQL.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile fileToProcess = null;/*from w w w .j av a 2 s.c o m*/ if (context.hasIncomingConnection()) { fileToProcess = session.get(); // If we have no FlowFile, and all incoming connections are self-loops then we can continue on. // However, if we have no FlowFile and we have connections coming from other Processors, then // we know that we should run only if we have a FlowFile. if (fileToProcess == null && context.hasNonLoopConnection()) { return; } } final ComponentLog logger = getLogger(); final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue(); final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean(); final StopWatch stopWatch = new StopWatch(true); final String selectQuery; if (context.getProperty(SQL_SELECT_QUERY).isSet()) { selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess) .getValue(); } else { // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query. // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled. final StringBuilder queryContents = new StringBuilder(); session.read(fileToProcess, new InputStreamCallback() { @Override public void process(InputStream in) throws IOException { queryContents.append(IOUtils.toString(in)); } }); selectQuery = queryContents.toString(); } try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) { st.setQueryTimeout(queryTimeout); // timeout in seconds final AtomicLong nrOfRows = new AtomicLong(0L); if (fileToProcess == null) { fileToProcess = session.create(); } fileToProcess = session.write(fileToProcess, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { try { logger.debug("Executing query {}", new Object[] { selectQuery }); final ResultSet resultSet = st.executeQuery(selectQuery); nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro)); } catch (final SQLException e) { throw new ProcessException(e); } } }); // set attribute how many rows were selected fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); logger.info("{} contains {} Avro records; transferring to 'success'", new Object[] { fileToProcess, nrOfRows.get() }); session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows", stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(fileToProcess, REL_SUCCESS); } catch (final ProcessException | SQLException e) { if (fileToProcess == null) { // This can happen if any exceptions occur while setting up the connection, statement, etc. logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure", new Object[] { selectQuery, e }); context.yield(); } else { if (context.hasIncomingConnection()) { logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure", new Object[] { selectQuery, fileToProcess, e }); fileToProcess = session.penalize(fileToProcess); } else { logger.error("Unable to execute SQL select query {} due to {}; routing to failure", new Object[] { selectQuery, e }); context.yield(); } session.transfer(fileToProcess, REL_FAILURE); } } }
From source file:ch.algotrader.ordermgmt.DefaultOrderBook.java
@Override public String getNextOrderIdRevision(final String intId) { if (intId == null) { return null; }/*from www . j a v a 2s . c om*/ int i = intId.indexOf('.'); String baseId = i != -1 ? intId.substring(0, i) : null; if (baseId == null) { throw new OrderRegistryException("Unexpected internal order ID format: " + intId); } String s = intId.substring(baseId.length() + 1); long revision; try { revision = Long.parseLong(s); } catch (NumberFormatException ex) { throw new OrderRegistryException("Unexpected internal order ID format: " + intId); } AtomicLong count = this.revisionMap.compute(baseId, (key, existing) -> { if (existing != null) { if (existing.get() < revision) { existing.set(revision); } return existing; } return new AtomicLong(revision); }); long nextRevision = count.incrementAndGet(); return baseId + '.' + nextRevision; }
From source file:org.apache.nifi.controller.StandardProcessorNode.java
public StandardProcessorNode(final Processor processor, final String uuid, final ValidationContextFactory validationContextFactory, final ProcessScheduler scheduler, final ControllerServiceProvider controllerServiceProvider, final String componentType, final String componentCanonicalClass, final NiFiProperties nifiProperties, final VariableRegistry variableRegistry, final ComponentLog logger) { super(processor, uuid, validationContextFactory, controllerServiceProvider, componentType, componentCanonicalClass, variableRegistry, logger); this.processor = processor; identifier = new AtomicReference<>(uuid); destinations = new HashMap<>(); connections = new HashMap<>(); incomingConnectionsRef = new AtomicReference<>(new ArrayList<>()); lossTolerant = new AtomicBoolean(false); final Set<Relationship> emptySetOfRelationships = new HashSet<>(); undefinedRelationshipsToTerminate = new AtomicReference<>(emptySetOfRelationships); comments = new AtomicReference<>(""); schedulingPeriod = new AtomicReference<>("0 sec"); schedulingNanos = new AtomicLong(MINIMUM_SCHEDULING_NANOS); yieldPeriod = new AtomicReference<>(DEFAULT_YIELD_PERIOD); yieldExpiration = new AtomicLong(0L); concurrentTaskCount = new AtomicInteger(1); position = new AtomicReference<>(new Position(0D, 0D)); style = new AtomicReference<>(Collections.unmodifiableMap(new HashMap<String, String>())); this.processGroup = new AtomicReference<>(); processScheduler = scheduler;/*from w ww . j a v a 2s. c o m*/ penalizationPeriod = new AtomicReference<>(DEFAULT_PENALIZATION_PERIOD); this.nifiProperties = nifiProperties; final Class<?> procClass = processor.getClass(); triggerWhenEmpty = procClass.isAnnotationPresent(TriggerWhenEmpty.class); sideEffectFree = procClass.isAnnotationPresent(SideEffectFree.class); batchSupported = procClass.isAnnotationPresent(SupportsBatching.class); triggeredSerially = procClass.isAnnotationPresent(TriggerSerially.class); triggerWhenAnyDestinationAvailable = procClass .isAnnotationPresent(TriggerWhenAnyDestinationAvailable.class); eventDrivenSupported = procClass.isAnnotationPresent(EventDriven.class) && !triggeredSerially && !triggerWhenEmpty; final boolean inputRequirementPresent = procClass.isAnnotationPresent(InputRequirement.class); if (inputRequirementPresent) { inputRequirement = procClass.getAnnotation(InputRequirement.class).value(); } else { inputRequirement = Requirement.INPUT_ALLOWED; } schedulingStrategy = SchedulingStrategy.TIMER_DRIVEN; executionNode = ExecutionNode.ALL; try { if (procClass.isAnnotationPresent(DefaultSchedule.class)) { DefaultSchedule dsc = procClass.getAnnotation(DefaultSchedule.class); try { this.setSchedulingStrategy(dsc.strategy()); } catch (Throwable ex) { LOG.error(String.format( "Error while setting scheduling strategy from DefaultSchedule annotation: %s", ex.getMessage()), ex); } try { this.setScheduldingPeriod(dsc.period()); } catch (Throwable ex) { this.setSchedulingStrategy(SchedulingStrategy.TIMER_DRIVEN); LOG.error(String.format( "Error while setting scheduling period from DefaultSchedule annotation: %s", ex.getMessage()), ex); } if (!triggeredSerially) { try { setMaxConcurrentTasks(dsc.concurrentTasks()); } catch (Throwable ex) { LOG.error(String.format( "Error while setting max concurrent tasks from DefaultSchedule annotation: %s", ex.getMessage()), ex); } } } } catch (Throwable ex) { LOG.error(String.format("Error while setting default schedule from DefaultSchedule annotation: %s", ex.getMessage()), ex); } }
From source file:com.grepcurl.random.BaseGenerator.java
public Long nextId(String arbitraryGeneratorName) { Validate.notNull(arbitraryGeneratorName); return _idGenerator.computeIfAbsent(arbitraryGeneratorName, s -> new AtomicLong(1)).getAndIncrement(); }