List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.lendingclub.mercator.solarwinds.SolarwindsScanner.java
public void getNodeInformation() { try {//from w w w . j a v a 2s . co m ObjectNode response = querySolarwinds("SELECT Nodes.NodeID, Nodes.SysName, Nodes.Caption, " + "Nodes.Description, Nodes.IOSVersion, Nodes.CustomProperties.SerialNumber, Nodes.MachineType, " + "Nodes.Vendor, Nodes.IPAddress, Nodes.SysObjectID, Nodes.DNS, Nodes.ObjectSubType, " + "Nodes.Status, Nodes.StatusDescription, Nodes.CustomProperties.Department, Nodes.Location," + " Nodes.CustomProperties.City FROM Orion.Nodes ORDER BY Nodes.SysName"); AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE); AtomicBoolean error = new AtomicBoolean(false); response.path("results").forEach(v -> { try { //solarwindsID is the hashedURL+nodeID getProjector().getNeoRxClient().execCypher( "merge(a: SolarwindsNode {solarwindsID:{solarwindsID}}) set a+={props}, a.updateTs=timestamp() return a", "solarwindsID", solarwindsScannerBuilder.hashURL + v.path("NodeID"), "props", flattenNode(v)).blockingFirst(MissingNode.getInstance()); } catch (Exception e) { logger.warn("problem", e); error.set(true); } }); if (error.get() == false) { getNeoRxClient().execCypher( "match(a: SolarwindsNode) where a.solarwindsID={solarwindsID} and a.updateTs<{cutoff} detach delete a", "solarwindsID", solarwindsScannerBuilder.hashURL, "cutoff", earlistUpdate.get()); } } catch (Exception e) { logger.info(e.toString()); } }
From source file:org.apache.bookkeeper.benchmark.BenchThroughputLatency.java
public BenchThroughputLatency(int ensemble, int writeQuorumSize, int ackQuorumSize, byte[] passwd, int numberOfLedgers, int sendLimit, ClientConfiguration conf) throws KeeperException, IOException, InterruptedException { this.sem = new Semaphore(conf.getThrottleValue()); bk = new BookKeeper(conf); this.counter = new AtomicLong(0); this.numberOfLedgers = numberOfLedgers; this.sendLimit = sendLimit; this.latencies = new long[sendLimit]; try {/*from ww w . j ava 2 s.c o m*/ lh = new LedgerHandle[this.numberOfLedgers]; for (int i = 0; i < this.numberOfLedgers; i++) { lh[i] = bk.createLedger(ensemble, writeQuorumSize, ackQuorumSize, BookKeeper.DigestType.CRC32, passwd); LOG.debug("Ledger Handle: " + lh[i].getId()); } } catch (BKException e) { e.printStackTrace(); } }
From source file:org.apache.pulsar.compaction.CompactedTopicTest.java
/** * Build a compacted ledger, and return the id of the ledger, the position of the different * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap. *//*from w w w .j a v a 2s. c om*/ private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger( BookKeeper bk, int count) throws Exception { LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); List<Pair<MessageIdData, Long>> positions = new ArrayList<>(); List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>(); AtomicLong ledgerIds = new AtomicLong(10L); AtomicLong entryIds = new AtomicLong(0L); CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> { List<MessageIdData> idsInGap = new ArrayList<MessageIdData>(); if (r.nextInt(10) == 1) { long delta = r.nextInt(10) + 1; idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); ledgerIds.addAndGet(delta); entryIds.set(0); } long delta = r.nextInt(5); if (delta != 0) { idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); } MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get()) .setEntryId(entryIds.addAndGet(delta + 1)).build(); @Cleanup RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER); CompletableFuture<Void> f = new CompletableFuture<>(); ByteBuf buffer = m.serialize(); lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> { if (rc != BKException.Code.OK) { f.completeExceptionally(BKException.create(rc)); } else { positions.add(Pair.of(id, eid)); idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid))); f.complete(null); } }, null); return f; }).toArray(CompletableFuture[]::new)).get(); lh.close(); return Triple.of(lh.getId(), positions, idsInGaps); }
From source file:com.flowpowered.engine.scheduler.FlowTask.java
/** * Creates a new task with the specified period between consecutive calls to {@link #run()}. */// w w w . ja v a 2 s . c o m public FlowTask(FlowTaskManager manager, Scheduler scheduler, Object owner, Runnable task, boolean sync, long delay, long period, TaskPriority priority) { super(task, null); Validate.isTrue(!sync || priority != null, "Priority cannot be null if sync!"); this.taskId = nextTaskId.getAndIncrement(); this.nextCallTime = new AtomicLong(manager.getUpTime() + delay); this.executing = new AtomicBoolean(false); this.owner = owner; this.delay = delay; this.period = period; this.sync = sync; this.priority = priority; this.manager = manager; this.scheduler = scheduler; }
From source file:org.apache.hadoop.hbase.client.metrics.ScanMetrics.java
private AtomicLong createCounter(String counterName) { AtomicLong c = new AtomicLong(0); counters.put(counterName, c); return c; }
From source file:org.apache.hadoop.hbase.procedure2.store.wal.TestStressWALProcedureStore.java
@Test public void testInsertUpdateDelete() throws Exception { final long LAST_PROC_ID = 19999; final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS]; final AtomicLong procCounter = new AtomicLong((long) Math.round(Math.random() * 100)); for (int i = 0; i < thread.length; ++i) { thread[i] = new Thread() { @Override/* w w w .j ava 2 s.c om*/ public void run() { Random rand = new Random(); TestProcedure proc; do { // After HBASE-15579 there may be gap in the procId sequence, trying to simulate that. long procId = procCounter.addAndGet(1 + rand.nextInt(3)); proc = new TestProcedure(procId); // Insert procStore.insert(proc, null); // Update for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); } catch (InterruptedException e) { } procStore.update(proc); } // Delete procStore.delete(proc.getProcId()); } while (proc.getProcId() < LAST_PROC_ID); } }; thread[i].start(); } for (int i = 0; i < thread.length; ++i) { thread[i].join(); } procStore.getStoreTracker().dump(); assertTrue(procCounter.get() >= LAST_PROC_ID); assertTrue(procStore.getStoreTracker().isEmpty()); assertEquals(1, procStore.getActiveLogs().size()); }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestSecureHLog.java
@Test public void testSecureHLog() throws Exception { TableName tableName = TableName.valueOf("TestSecureHLog"); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);//from w w w. j a v a2s. c o m final int total = 10; final byte[] row = Bytes.toBytes("row"); final byte[] family = Bytes.toBytes("family"); final byte[] value = Bytes.toBytes("Test value"); FileSystem fs = TEST_UTIL.getTestFileSystem(); Path logDir = TEST_UTIL.getDataTestDir("log"); final AtomicLong sequenceId = new AtomicLong(1); // Write the WAL HLog wal = new FSHLog(fs, TEST_UTIL.getDataTestDir(), logDir.toString(), TEST_UTIL.getConfiguration()); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value)); wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId); } final Path walPath = ((FSHLog) wal).computeFilename(); wal.close(); // Insure edits are not plaintext long length = fs.getFileStatus(walPath).getLen(); FSDataInputStream in = fs.open(walPath); byte[] fileData = new byte[(int) length]; IOUtils.readFully(in, fileData); in.close(); assertFalse("Cells appear to be plaintext", Bytes.contains(fileData, value)); // Confirm the WAL can be read back HLog.Reader reader = HLogFactory.createReader(TEST_UTIL.getTestFileSystem(), walPath, TEST_UTIL.getConfiguration()); int count = 0; HLog.Entry entry = new HLog.Entry(); while (reader.next(entry) != null) { count++; List<KeyValue> kvs = entry.getEdit().getKeyValues(); assertTrue("Should be one KV per WALEdit", kvs.size() == 1); for (KeyValue kv : kvs) { byte[] thisRow = kv.getRow(); assertTrue("Incorrect row", Bytes.equals(thisRow, row)); byte[] thisFamily = kv.getFamily(); assertTrue("Incorrect family", Bytes.equals(thisFamily, family)); byte[] thisValue = kv.getValue(); assertTrue("Incorrect value", Bytes.equals(thisValue, value)); } } assertEquals("Should have read back as many KVs as written", total, count); reader.close(); }
From source file:nz.co.fortytwo.signalk.processor.FullExportProcessor.java
public FullExportProcessor(String wsSession, String routeId) { super();/*from ww w . j av a 2s .com*/ this.wsSession = wsSession; this.routeId = routeId; lastSend = new AtomicLong(System.currentTimeMillis()); signalkModel.getEventBus().register(this); //setup jmx //register the MBean try { ObjectName name = new ObjectName(getClass().getName(), "id", UUID.randomUUID().toString()); ManagementFactory.getPlatformMBeanServer().registerMBean(this, name); } catch (Exception e) { // TODO Auto-generated catch block logger.error(e.getMessage(), e); } }
From source file:com.chinamobile.bcbsp.comm.BDBMap.java
/** * Create a database.//from www . j a va 2 s. c o m * @param dbDir * @param dbName * @param keyClass * @param valueClass */ public BDBMap(BSPJob job, File dbDir, String dbName, Class<K> keyClass, Class<V> valueClass) { this.setDbDir(dbDir); this.setDbName(dbName); bdbMapSize = new AtomicLong(0); createAndBindDatabase(dbDir, dbName, keyClass, valueClass); }
From source file:com.ganji.cateye.flume.kestrel.KestrelRpcClient.java
License:asdf
public KestrelRpcClient() { stateLock = new ReentrantLock(true); connState = State.INIT;//from w w w. ja v a 2 s .c om threadCounter = new AtomicLong(0); // OK to use cached threadpool, because this is simply meant to timeout // the calls - and is IO bound. callTimeoutPool = Executors.newCachedThreadPool(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName(KestrelRpcClient.this.sinkName + "-" + String.valueOf(threadCounter.incrementAndGet())); return t; } }); }