List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.hadoop.hbase.regionserver.DefaultMemStore.java
/** * Constructor.// ww w .j av a 2 s .c o m * @param c Comparator */ public DefaultMemStore(final Configuration conf, final KeyValue.KVComparator c) { this.conf = conf; this.comparator = c; this.kvset = new KeyValueSkipListSet(c); this.snapshot = new KeyValueSkipListSet(c); timeRangeTracker = new TimeRangeTracker(); snapshotTimeRangeTracker = new TimeRangeTracker(); this.size = new AtomicLong(DEEP_OVERHEAD); this.snapshotSize = 0; if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) { String className = conf.get(MSLAB_CLASS_NAME, HeapMemStoreLAB.class.getName()); this.allocator = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { Configuration.class }, new Object[] { conf }); } else { this.allocator = null; } }
From source file:com.microsoft.applicationinsights.internal.channel.common.TransmissionFileSystemOutput.java
public TransmissionFileSystemOutput(String folderPath, String maxTransmissionStorageCapacity) { if (folderPath == null) { folderPath = new File(LocalFileSystemUtils.getTempDir(), TRANSMISSION_DEFAULT_FOLDER).getPath(); }/* w w w .j a va 2 s . co m*/ capacityEnforcer = LimitsEnforcer.createWithClosestLimitOnError(MIN_CAPACITY_MEGABYTES, MAX_CAPACITY_MEGABYTES, DEFAULT_CAPACITY_MEGABYTES, MAX_TRANSMISSION_STORAGE_CAPACITY_NAME, maxTransmissionStorageCapacity); capacityInKB = capacityEnforcer.getCurrentValue() * 1024; folder = new File(folderPath); if (!folder.exists()) { folder.mkdir(); } if (!folder.exists() || !folder.canRead() || !folder.canWrite()) { throw new IllegalArgumentException("Folder must exist with read and write permissions"); } long currentSize = getTotalSizeOfTransmissionFiles(); size = new AtomicLong(currentSize); }
From source file:com.addthis.hydra.data.tree.nonconcurrent.NonConcurrentTree.java
public NonConcurrentTree(File root, int maxCacheSize, int maxPageSize, PageFactory factory) throws Exception { LessFiles.initDirectory(root);//ww w .j av a 2s . com this.root = root; long start = System.currentTimeMillis(); // setup metering meter = new Meter<>(METERTREE.values()); for (METERTREE m : METERTREE.values()) { meter.addCountMetric(m, m.toString()); } // create meter logging thread if (TreeCommonParameters.meterLogging > 0) { logger = new MeterFileLogger(this, root, "tree-metrics", TreeCommonParameters.meterLogging, TreeCommonParameters.meterLogLines); } else { logger = null; } source = new PageDB.Builder<>(root, NonConcurrentTreeNode.class, maxPageSize, maxCacheSize) .pageFactory(factory).build(); source.setCacheMem(TreeCommonParameters.maxCacheMem); source.setPageMem(TreeCommonParameters.maxPageMem); source.setMemSampleInterval(TreeCommonParameters.memSample); // get stored next db id File idFile = new File(root, "nextID"); if (idFile.exists() && idFile.isFile() && idFile.length() > 0) { nextDBID = new AtomicLong(Long.parseLong(LessBytes.toString(LessFiles.read(idFile)))); } else { nextDBID = new AtomicLong(1); } // get tree root NonConcurrentTreeNode dummyRoot = NonConcurrentTreeNode.getTreeRoot(this); treeRootNode = dummyRoot.getOrCreateEditableNode("root"); treeTrashNode = dummyRoot.getOrCreateEditableNode("trash"); long openTime = System.currentTimeMillis() - start; log.info("dir={} root={} nextdb={} openms={}", root, treeRootNode, nextDBID, openTime); }
From source file:org.apache.flume.channel.recoverable.memory.wal.TestWAL.java
@Test public void testThreadedAppend() throws IOException, InterruptedException { int numThreads = 10; final CountDownLatch startLatch = new CountDownLatch(numThreads); final CountDownLatch stopLatch = new CountDownLatch(numThreads); final AtomicLong seqid = new AtomicLong(0); final List<String> globalExpected = Collections.synchronizedList(new ArrayList<String>()); final List<Exception> errors = Collections.synchronizedList(new ArrayList<Exception>()); for (int i = 0; i < numThreads; i++) { final int id = i; Thread t = new Thread() { @Override//from w ww .j a v a 2 s. c o m public void run() { try { List<String> expected = strings(100); globalExpected.addAll(expected); startLatch.countDown(); startLatch.await(); // half batch, half do not if (id % 2 == 0) { for (String s : expected) { wal.writeEntry(new WALEntry<Text>(new Text(s), seqid.incrementAndGet())); } } else { List<WALEntry<Text>> batch = Lists.newArrayList(); for (String s : expected) { batch.add(new WALEntry<Text>(new Text(s), seqid.incrementAndGet())); } wal.writeEntries(batch); } } catch (Exception e) { logger.warn("Error doing appends", e); errors.add(e); } finally { stopLatch.countDown(); } } }; t.setDaemon(true); t.start(); } Assert.assertTrue(stopLatch.await(30, TimeUnit.SECONDS)); Assert.assertEquals(Collections.EMPTY_LIST, errors); wal.close(); wal = new WAL<Text>(dataDir, Text.class); WALReplayResult<Text> result = wal.replay(); Assert.assertEquals(1000, result.getSequenceID()); List<String> actual = toStringList(result.getResults()); // we don't know what order the items threads will be able to // append to the wal, so sort to the lists to make then sensible Collections.sort(actual); Collections.sort(globalExpected); Assert.assertEquals(globalExpected, actual); }
From source file:de.javakaffee.web.msm.serializer.javolution.AaltoTranscoderTest.java
@DataProvider(name = "typesAsSessionAttributesProvider") protected Object[][] createTypesAsSessionAttributesData() { return new Object[][] { { int.class, 42 }, { long.class, 42 }, { Boolean.class, Boolean.TRUE }, { String.class, "42" }, { Class.class, String.class }, { Long.class, new Long(42) }, { Integer.class, new Integer(42) }, { Character.class, new Character('c') }, { Byte.class, new Byte("b".getBytes()[0]) }, { Double.class, new Double(42d) }, { Float.class, new Float(42f) }, { Short.class, new Short((short) 42) }, { BigDecimal.class, new BigDecimal(42) }, { AtomicInteger.class, new AtomicInteger(42) }, { AtomicLong.class, new AtomicLong(42) }, { MutableInt.class, new MutableInt(42) }, { Integer[].class, new Integer[] { 42 } }, { Date.class, new Date(System.currentTimeMillis() - 10000) }, { Calendar.class, Calendar.getInstance() }, { ArrayList.class, new ArrayList<String>(Arrays.asList("foo")) }, { int[].class, new int[] { 1, 2 } }, { long[].class, new long[] { 1, 2 } }, { short[].class, new short[] { 1, 2 } }, { float[].class, new float[] { 1, 2 } }, { double[].class, new double[] { 1, 2 } }, { boolean[].class, new boolean[] { true, false } }, { byte[].class, "42".getBytes() }, { char[].class, "42".toCharArray() }, { String[].class, new String[] { "23", "42" } }, { Person[].class, new Person[] { createPerson("foo bar", Gender.MALE, 42) } } }; }
From source file:org.apache.nifi.controller.AbstractPort.java
public AbstractPort(final String id, final String name, final ProcessGroup processGroup, final ConnectableType type, final ProcessScheduler scheduler) { this.id = requireNonNull(id); this.name = new AtomicReference<>(requireNonNull(name)); position = new AtomicReference<>(new Position(0D, 0D)); outgoingConnections = new HashSet<>(); incomingConnections = new ArrayList<>(); comments = new AtomicReference<>(); lossTolerant = new AtomicBoolean(false); concurrentTaskCount = new AtomicInteger(1); processScheduler = scheduler;/*w ww . j a va 2s.com*/ final List<Relationship> relationshipList = new ArrayList<>(); relationshipList.add(PORT_RELATIONSHIP); relationships = Collections.unmodifiableList(relationshipList); this.processGroup = new AtomicReference<>(processGroup); this.type = type; penalizationPeriod = new AtomicReference<>("30 sec"); yieldPeriod = new AtomicReference<>("1 sec"); yieldExpiration = new AtomicLong(0L); schedulingPeriod = new AtomicReference<>("0 millis"); schedulingNanos = new AtomicLong(MINIMUM_SCHEDULING_NANOS); scheduledState = new AtomicReference<>(ScheduledState.STOPPED); }
From source file:com.joyent.manta.client.MantaSeekableByteChannel.java
/** * Creates a new instance of a read-only seekable byte channel. * * @param path path of the object on the Manta API * @param position starting position in bytes from the start of the file * @param httpHelper helper class providing useful HTTP functions *///w w w . ja v a 2s . c o m public MantaSeekableByteChannel(final String path, final long position, final HttpHelper httpHelper) { this.path = path; this.position = new AtomicLong(position); this.httpHelper = httpHelper; this.requestRef = new AtomicReference<>(); this.responseStream = new AtomicReference<>(); }
From source file:com.ning.arecibo.collector.persistent.TestTimelineAggregator.java
private void checkSamplesForATimeline(final Integer startTimeMinutesAgo, final Integer endTimeMinutesAgo, final long expectedChunks) throws InterruptedException { final AtomicLong timelineChunkSeen = new AtomicLong(0); timelineDAO.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId), ImmutableList.<Integer>of(minHeapUsedKindId, maxHeapUsedKindId), START_TIME.minusMinutes(startTimeMinutesAgo), START_TIME.minusMinutes(endTimeMinutesAgo), new TimelineChunkConsumer() { @Override// ww w . ja va 2 s. c om public void processTimelineChunk(final TimelineChunk chunk) { Assert.assertEquals((Integer) chunk.getHostId(), hostId); Assert.assertTrue(chunk.getSampleKindId() == minHeapUsedKindId || chunk.getSampleKindId() == maxHeapUsedKindId); timelineChunkSeen.incrementAndGet(); } }); Assert.assertEquals(timelineChunkSeen.get(), expectedChunks); }
From source file:com.opengamma.engine.cache.BerkeleyDBValueSpecificationIdentifierBinaryDataStoreTest.java
public void parallelPutGetTest() throws InterruptedException { final int numEntries = 5000; final int numCycles = 1; final int numGets = numCycles * numEntries; final Random random = new Random(); File dbDir = createDbDir("parallelPutGetTest"); Environment dbEnvironment = BerkeleyDBViewComputationCacheSource.constructDatabaseEnvironment(dbDir, false); final BerkeleyDBBinaryDataStore dataStore = new BerkeleyDBBinaryDataStore(dbEnvironment, "parallelPutGetTest"); dataStore.start();/*from w w w . j av a 2 s . c om*/ final AtomicLong currentMaxIdentifier = new AtomicLong(0L); final byte[] bytes = new byte[100]; random.nextBytes(bytes); Thread tPut = new Thread(new Runnable() { @Override public void run() { OperationTimer timer = new OperationTimer(s_logger, "Putting {} entries", numEntries); for (int i = 0; i < numEntries; i++) { random.nextBytes(bytes); dataStore.put(i, bytes); currentMaxIdentifier.set(i); } long numMillis = timer.finished(); double msPerPut = ((double) numMillis) / ((double) numGets); double putsPerSecond = 1000.0 / msPerPut; s_logger.info("for {} puts, {} ms/put, {} puts/sec", new Object[] { numEntries, msPerPut, putsPerSecond }); } }, "Putter"); class GetRunner implements Runnable { @Override public void run() { OperationTimer timer = new OperationTimer(s_logger, "Getting {} entries", numGets); for (int i = 0; i < numGets; i++) { int maxIdentifier = (int) currentMaxIdentifier.get(); long actualIdentifier = random.nextInt(maxIdentifier); dataStore.get(actualIdentifier); } long numMillis = timer.finished(); double msPerGet = ((double) numMillis) / ((double) numGets); double getsPerSecond = 1000.0 / msPerGet; s_logger.info("for {} gets, {} ms/get, {} gets/sec", new Object[] { numGets, msPerGet, getsPerSecond }); } } ; Thread tGet1 = new Thread(new GetRunner(), "getter-1"); Thread tGet2 = new Thread(new GetRunner(), "getter-2"); //Thread tGet3 = new Thread(new GetRunner(), "getter-3"); //Thread tGet4 = new Thread(new GetRunner(), "getter-4"); //Thread tGet5 = new Thread(new GetRunner(), "getter-5"); tPut.start(); Thread.sleep(5L); tGet1.start(); tGet2.start(); //tGet3.start(); //tGet4.start(); //tGet5.start(); tPut.join(); tGet1.join(); tGet2.join(); //tGet3.join(); //tGet4.join(); //tGet5.join(); dataStore.delete(); dataStore.stop(); dbEnvironment.close(); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo.java
public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user, Queue queue, ActiveUsersManager activeUsersManager, long epoch, ResourceUsage appResourceUsage, RMContext rmContext) {/*from w w w. j av a 2 s . co m*/ this.applicationAttemptId = appAttemptId; this.applicationId = appAttemptId.getApplicationId(); this.queue = queue; this.user = user; this.activeUsersManager = activeUsersManager; this.containerIdCounter = new AtomicLong(epoch << EPOCH_BIT_SHIFT); this.appResourceUsage = appResourceUsage; try { MessageDigest digest = MessageDigest.getInstance(rmContext.getUserFolderHashAlgo()); byte[] userBytes = user.getBytes(StandardCharsets.UTF_8); byte[] hashBase = ArrayUtils.addAll(userBytes, rmContext.getSeed()); byte[] hash = digest.digest(hashBase); userFolder = Base64.encodeBase64URLSafeString(hash); } catch (NoSuchAlgorithmException ex) { LOG.error("error while creating userFolder random string", ex); throw new Error("error while creating userFolder random string", ex); } }