List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java
@Test public void testIncrement() throws InterruptedException, ExecutionException { AsyncTableBase table = getTable.get(); int count = 100; CountDownLatch latch = new CountDownLatch(count); AtomicLong sum = new AtomicLong(0L); IntStream.range(0, count)//from w w w. j av a 2s .c o m .forEach(i -> table.incrementColumnValue(row, FAMILY, QUALIFIER, 1).thenAccept(x -> { sum.addAndGet(x); latch.countDown(); })); latch.await(); assertEquals(count, Bytes .toLong(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER))); assertEquals((1 + count) * count / 2, sum.get()); }
From source file:com.b5m.raindrop.source.metaq.MetaSpout.java
public void open(final Map conf, final TopologyContext context, final SpoutOutputCollector collector) { final String topic = (String) conf.get(TOPIC); if (topic == null) { throw new IllegalArgumentException(TOPIC + " is null"); }/* w w w .ja v a 2s .c o m*/ Integer maxSize = (Integer) conf.get(FETCH_MAX_SIZE); if (maxSize == null) { log.warn("Using default FETCH_MAX_SIZE"); maxSize = DEFAULT_MAX_SIZE; } this.id2wrapperMap = new ConcurrentHashMap<Long, MetaMessageWrapper>(); this.messageQueue = new LinkedTransferQueue<MetaMessageWrapper>(); receivedCount = new AtomicLong(0); try { this.collector = collector; logConsumerConfig(this.consumerConfig); logMetaClientConfig(this.metaClientConfig); this.setUpMeta(topic, maxSize); } catch (final MetaClientException e) { log.error("Setup meta consumer failed", e); } }
From source file:org.apache.hadoop.raid.SmokeTestThread.java
@Override public Boolean call() throws Exception { Path testPath = null;//www. ja v a 2 s .co m try { fileSys = FileSystem.get(distRaidNode.getConf()); // Create a small file with 3 blocks String testFile = testFileBase + rand.nextLong(); testPath = new Path(testFile); if (fileSys.exists(testPath)) { fileSys.delete(testPath, true); } long blockSize = BLOCK_SIZE; FSDataOutputStream stm = fileSys.create(testPath, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) 3, blockSize); // Write 3 blocks. byte[] b = new byte[(int) blockSize]; for (int i = 0; i < NUM_SOURCE_BLOCKS; i++) { rand.nextBytes(b); stm.write(b); checksum.update(b); } stm.close(); LOG.info( "[SMOKETEST] Created a test file: " + testFile + " with CRC32 checksum " + checksum.getValue()); PolicyInfo info = new PolicyInfo(testFile, distRaidNode.getConf()); info.setCodecId(TEST_CODEC); info.setSrcPath(testFileDirectory); info.setShouldRaid(true); info.setProperty("modTimePeriod", "0"); info.setProperty("targetReplication", "1"); info.setProperty("metaReplication", "1"); FileStatus stat = fileSys.getFileStatus(testPath); ArrayList<FileStatus> fstats = new ArrayList<FileStatus>(); fstats.add(stat); // Raid it using rs DistRaid dr = DistRaidNode.raidFiles(distRaidNode.getConf(), distRaidNode.jobMonitor, fstats, info); LOG.info("[SMOKETEST] RS Raid test file: " + testFile); if (dr == null) { throw new IOException("Failed to sart a raiding job"); } long startTime = System.currentTimeMillis(); while (!dr.checkComplete() && System.currentTimeMillis() - startTime < timeOut) { Thread.sleep(SLEEP_TIME); } if (!dr.checkComplete()) { throw new IOException("Failed to finish the raiding job in " + (timeOut / 1000) + " seconds"); } if (!dr.successful()) { throw new IOException("Failed to raid the file " + testFile); } LOG.info("[SMOKETEST] Finish raiding test file: " + testFile); // Verify parity file exists Codec codec = Codec.getCodec(TEST_CODEC); Path parityPath = new Path(codec.getParityPrefix(), RaidNode.makeRelative(testPath)); FileStatus parityStat = fileSys.getFileStatus(parityPath); long numParityBlocks = RaidNode.numBlocks(parityStat); long expectedNumParityBlocks = RaidNode.numStripes(NUM_SOURCE_BLOCKS, codec.stripeLength) * codec.parityLength; if (numParityBlocks != expectedNumParityBlocks || parityStat.getLen() != expectedNumParityBlocks * BLOCK_SIZE) { throw new IOException("[SMOKETEST] Parity file " + parityPath + " has " + numParityBlocks + " blocks and " + parityStat.getLen() + " bytes, but we expect " + expectedNumParityBlocks + " blocks and " + (expectedNumParityBlocks * BLOCK_SIZE) + " bytes"); } LOG.info("[SMOKETEST] Verification of parity file " + parityPath + " succeeded"); LocatedBlock[] blocks = new LocatedBlock[1]; LocatedBlocks lbs = ((DistributedFileSystem) fileSys).getLocatedBlocks(testPath, 0, Integer.MAX_VALUE); // Corrupt the first block blocks[0] = lbs.get(0); ((DistributedFileSystem) fileSys).getClient().reportBadBlocks(blocks); LOG.info("[SMOKETEST] Finish corrupting the first block " + lbs.get(0).getBlock()); // submit a job to "fix" it Set<String> jobFiles = new HashSet<String>(); jobFiles.add(testFile); Job job = DistBlockIntegrityMonitor.startOneJob( (DistBlockIntegrityMonitor.Worker) distRaidNode.blockIntegrityMonitor.getCorruptionMonitor(), Priority.HIGH, jobFiles, System.currentTimeMillis(), new AtomicLong(0), new AtomicLong(System.currentTimeMillis()), Integer.MAX_VALUE); startTime = System.currentTimeMillis(); while (!job.isComplete() && System.currentTimeMillis() - startTime < timeOut) { Thread.sleep(SLEEP_TIME); } if (!job.isComplete()) { throw new IOException("Failed to finish the blockfixing job in " + (timeOut / 1000) + " seconds"); } if (!job.isSuccessful()) { throw new IOException("Failed to fix the file " + testFile); } LOG.info("[SMOKETEST] Finish blockfixing test file: " + testFile); // wait for block is reported startTime = System.currentTimeMillis(); while (((DistributedFileSystem) fileSys).getLocatedBlocks(testPath, 0, Integer.MAX_VALUE).get(0) .isCorrupt() && System.currentTimeMillis() - startTime < timeOut) { Thread.sleep(SLEEP_TIME); } CRC32 newChk = new CRC32(); FSDataInputStream readStm = fileSys.open(testPath); int num = 0; while (num >= 0) { num = readStm.read(b); if (num < 0) { break; } newChk.update(b, 0, num); } stm.close(); if (newChk.getValue() != checksum.getValue()) { throw new IOException( "Fixed file's checksum " + newChk.getValue() + " != original one " + checksum.getValue()); } LOG.info("[SMOKETEST] Verification of fixed test file: " + testFile); return true; } catch (IOException ex) { LOG.error("Get IOException in SmokeTestThread", ex); ioe = ex; return false; } catch (Throwable ex) { LOG.error("Get Error in SmokeTestThread", ex); ioe = new IOException(ex); return false; } finally { try { if (fileSys != null) { fileSys.delete(testPath, true); } } catch (IOException ioe) { LOG.error("Get error during deletion", ioe); } } }
From source file:org.apache.flume.sink.hdfs.BucketWriter.java
BucketWriter(long rollInterval, long rollSize, long rollCount, long batchSize, Context context, String filePath, String fileName, String inUsePrefix, String inUseSuffix, String fileSuffix, CompressionCodec codeC, CompressionType compType, HDFSWriter writer, ScheduledExecutorService timedRollerPool, PrivilegedExecutor proxyUser, SinkCounter sinkCounter, int idleTimeout, WriterCallback onCloseCallback, String onCloseCallbackPath, long callTimeout, ExecutorService callTimeoutPool, long retryInterval, int maxCloseTries) { this.rollInterval = rollInterval; this.rollSize = rollSize; this.rollCount = rollCount; this.batchSize = batchSize; this.filePath = filePath; this.fileName = fileName; this.inUsePrefix = inUsePrefix; this.inUseSuffix = inUseSuffix; this.fileSuffix = fileSuffix; this.codeC = codeC; this.compType = compType; this.writer = writer; this.timedRollerPool = timedRollerPool; this.proxyUser = proxyUser; this.sinkCounter = sinkCounter; this.idleTimeout = idleTimeout; this.onCloseCallback = onCloseCallback; this.onCloseCallbackPath = onCloseCallbackPath; this.callTimeout = callTimeout; this.callTimeoutPool = callTimeoutPool; fileExtensionCounter = new AtomicLong(clock.currentTimeMillis()); this.retryInterval = retryInterval; this.maxRenameTries = maxCloseTries; isOpen = false;/* www. ja v a 2 s .c o m*/ isUnderReplicated = false; this.writer.configure(context); }
From source file:org.apache.druid.client.cache.MemcachedCache.java
public static MemcachedCache create(final MemcachedCacheConfig config) { final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>(); final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>(); final AbstractMonitor monitor = new AbstractMonitor() { final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>( new HashMap<String, Long>()); @Override//from w w w. j av a 2 s. c o m public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue())); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/delta", entry.getValue() - prior)); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; } private Map<String, Long> getCurrentValues() { final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } return builder.build(); } }; try { LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize()); // always use compression transcoder.setCompressionThreshold(0); OperationQueueFactory opQueueFactory; long maxQueueBytes = config.getMaxOperationQueueSize(); if (maxQueueBytes > 0) { opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes); } else { opQueueFactory = new LinkedOperationQueueFactory(); } final Predicate<String> interesting = new Predicate<String>() { // See net.spy.memcached.MemcachedConnection.registerMetrics() private final Set<String> interestingMetrics = ImmutableSet.of( "[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (s)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success"); @Override public boolean apply(@Nullable String input) { return input != null && interestingMetrics.contains(input); } }; final MetricCollector metricCollector = new MetricCollector() { @Override public void addCounter(String name) { if (!interesting.apply(name)) { return; } counters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Add Counter [%s]", name); } } @Override public void removeCounter(String name) { if (log.isDebugEnabled()) { log.debug("Ignoring request to remove [%s]", name); } } @Override public void incrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment [%s]", name); } } @Override public void incrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.addAndGet(amount); if (log.isDebugEnabled()) { log.debug("Increment [%s] %d", name, amount); } } @Override public void decrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.decrementAndGet(); if (log.isDebugEnabled()) { log.debug("Decrement [%s]", name); } } @Override public void decrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0L)); counter = counters.get(name); } counter.addAndGet(-amount); if (log.isDebugEnabled()) { log.debug("Decrement [%s] %d", name, amount); } } @Override public void addMeter(String name) { if (!interesting.apply(name)) { return; } meters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Adding meter [%s]", name); } } @Override public void removeMeter(String name) { if (!interesting.apply(name)) { return; } if (log.isDebugEnabled()) { log.debug("Ignoring request to remove meter [%s]", name); } } @Override public void markMeter(String name) { if (!interesting.apply(name)) { return; } AtomicLong meter = meters.get(name); if (meter == null) { meters.putIfAbsent(name, new AtomicLong(0L)); meter = meters.get(name); } meter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment counter [%s]", name); } } @Override public void addHistogram(String name) { log.debug("Ignoring add histogram [%s]", name); } @Override public void removeHistogram(String name) { log.debug("Ignoring remove histogram [%s]", name); } @Override public void updateHistogram(String name, int amount) { log.debug("Ignoring update histogram [%s]: %d", name, amount); } }; final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder() // 1000 repetitions gives us good distribution with murmur3_128 // (approx < 5% difference in counts across nodes, with 5 cache nodes) .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128) .setProtocol(ConnectionFactoryBuilder.Protocol .valueOf(StringUtils.toUpperCase(config.getProtocol()))) .setLocatorType( ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator()))) .setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder) .setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout()) .setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize()) .setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector) .setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds .build(); final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts()); final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier; if (config.getNumConnections() > 1) { clientSupplier = new MemcacheClientPool(config.getNumConnections(), new Supplier<MemcachedClientIF>() { @Override public MemcachedClientIF get() { try { return new MemcachedClient(connectionFactory, hosts); } catch (IOException e) { log.error(e, "Unable to create memcached client"); throw Throwables.propagate(e); } } }); } else { clientSupplier = Suppliers .ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts))); } return new MemcachedCache(clientSupplier, config, monitor); } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:org.apache.bookkeeper.common.util.TestBackoff.java
@Test public void testDecorrelatedJitteredPolicy() throws Exception { long startMs = ThreadLocalRandom.current().nextLong(1L, 1000L); long maxMs = ThreadLocalRandom.current().nextLong(startMs, startMs * 2); Stream<Long> backoffs = Backoff.Jitter.of(DECORRELATED, startMs, maxMs, 10).toBackoffs(); Iterator<Long> backoffIter = backoffs.iterator(); assertTrue(backoffIter.hasNext());// w w w .j a v a 2s .com assertEquals(startMs, backoffIter.next().longValue()); AtomicLong prevMs = new AtomicLong(startMs); backoffIter.forEachRemaining(backoffMs -> { assertTrue(backoffMs >= startMs); assertTrue(backoffMs <= prevMs.get() * 3); assertTrue(backoffMs <= maxMs); prevMs.set(backoffMs); }); }
From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java
private void issueEnqueueRequests(PinLater.ServiceIface iface) throws InterruptedException { Preconditions.checkNotNull(queueName, "Queue was not specified."); final AtomicLong queriesIssued = new AtomicLong(0); final Semaphore permits = new Semaphore(concurrency); while (numQueries == -1 || queriesIssued.get() < numQueries) { final PinLaterEnqueueRequest request = new PinLaterEnqueueRequest(); request.setQueueName(queueName); for (int i = 0; i < batchSize; i++) { PinLaterJob job = new PinLaterJob( ByteBuffer.wrap(new String("task_" + random.nextInt(Integer.MAX_VALUE)).getBytes())); job.setPriority(priority);/*from w ww.ja v a2 s .co m*/ request.addToJobs(job); } final long startTimeNanos = System.nanoTime(); queriesIssued.incrementAndGet(); permits.acquire(); iface.enqueueJobs(REQUEST_CONTEXT, request) .respond(new Function<Try<PinLaterEnqueueResponse>, BoxedUnit>() { @Override public BoxedUnit apply(Try<PinLaterEnqueueResponse> responseTry) { permits.release(); statsLogger .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos)); if (responseTry.isThrow()) { LOG.info("Exception for request: " + request + " : " + ((Throw) responseTry).e()); } return BoxedUnit.UNIT; } }); } permits.acquire(concurrency); LOG.info("Enqueue queries issued: " + queriesIssued); }
From source file:eu.eubrazilcc.lvl.service.rest.TaskResource.java
@Path("progress/{id}") @GET/*from ww w . j a va2 s .c om*/ @Produces(SseFeature.SERVER_SENT_EVENTS) public EventOutput getServerSentEvents(final @PathParam("id") String id, final @QueryParam("refresh") @DefaultValue("30") int refresh, final @QueryParam("token") @DefaultValue("") String token, final @Context HttpServletRequest request, final @Context HttpHeaders headers) { if (isBlank(id) || !REFRESH_RANGE.contains(refresh)) { throw new WebApplicationException("Missing required parameters", Response.Status.BAD_REQUEST); } OAuth2SecurityManager.login(request, null, isBlank(token) ? headers : ssehHttpHeaders(token), RESOURCE_NAME) .requiresPermissions("tasks:*:*:" + id.trim() + ":view"); // get from task storage final CancellableTask<?> task = TASK_STORAGE.get(fromString(id)); if (task == null) { throw new WebApplicationException("Element not found", Response.Status.NOT_FOUND); } final String client = getClientAddress(request); LOGGER.info("Subscribed to progress events: " + client); final AtomicLong eventId = new AtomicLong(0l); final EventOutput eventOutput = new EventOutput(); TASK_RUNNER.submit(new Callable<Void>() { @Override public Void call() throws Exception { try { do { final ListenableScheduledFuture<?> future = TASK_SCHEDULER.schedule( checkTaskProgress(eventOutput, eventId, task), eventId.getAndIncrement() == 0 ? 0 : refresh, SECONDS); future.get(); } while (!task.isDone()); } catch (Exception e) { LOGGER.error("Failed to get task status", e); } finally { try { eventOutput.close(); } catch (Exception ignored) { } LOGGER.info("Closing progress events where subscriber is: " + client); } return null; } }); return eventOutput; }
From source file:org.apache.hadoop.hbase.mapreduce.TestWALRecordReader.java
/** * Test basic functionality/*from w w w .j av a 2 s . c o m*/ * @throws Exception */ @Test public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, null, getName()); WAL log = walfactory.getWAL(info.getEncodedNameAsBytes()); byte[] value = Bytes.toBytes("value"); final AtomicLong sequenceId = new AtomicLong(0); WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), System.currentTimeMillis(), value)); long txid = log.append(htd, info, getWalKey(System.currentTimeMillis()), edit, sequenceId, true, null); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp long secondTs = System.currentTimeMillis(); log.rollWriter(); edit = new WALEdit(); edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value)); txid = log.append(htd, info, getWalKey(System.currentTimeMillis()), edit, sequenceId, true, null); log.sync(txid); log.shutdown(); walfactory.shutdown(); long thirdTs = System.currentTimeMillis(); // should have 2 log files now WALInputFormat input = new WALInputFormat(); Configuration jobConf = new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); // make sure both logs are found List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2, splits.size()); // should return exactly one KV testSplit(splits.get(0), Bytes.toBytes("1")); // same for the 2nd split testSplit(splits.get(1), Bytes.toBytes("2")); // now test basic time ranges: // set an endtime, the 2nd log file can be ignored completely. jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs - 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); // now set a start time jobConf.setLong(WALInputFormat.END_TIME_KEY, Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY, thirdTs); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); // both logs need to be considered assertEquals(2, splits.size()); // but both readers skip all edits testSplit(splits.get(0)); testSplit(splits.get(1)); }
From source file:com.addthis.hydra.data.tree.concurrent.ConcurrentTree.java
ConcurrentTree(File root, int numDeletionThreads, int cleanQSize, int maxCacheSize, int maxPageSize, PageFactory factory) throws Exception { LessFiles.initDirectory(root);//from w w w .j a va 2 s . c o m this.root = root; long start = System.currentTimeMillis(); // setup metering meter = new Meter<>(METERTREE.values()); for (METERTREE m : METERTREE.values()) { meter.addCountMetric(m, m.toString()); } // create meter logging thread if (TreeCommonParameters.meterLogging > 0) { logger = new MeterFileLogger(this, root, "tree-metrics", TreeCommonParameters.meterLogging, TreeCommonParameters.meterLogLines); } else { logger = null; } source = new PageDB.Builder<>(root, ConcurrentTreeNode.class, maxPageSize, maxCacheSize) .pageFactory(factory).build(); source.setCacheMem(TreeCommonParameters.maxCacheMem); source.setPageMem(TreeCommonParameters.maxPageMem); source.setMemSampleInterval(TreeCommonParameters.memSample); // create cache cache = new MediatedEvictionConcurrentHashMap.Builder<CacheKey, ConcurrentTreeNode>() .mediator(new CacheMediator(source)).maximumWeightedCapacity(cleanQSize).build(); // get stored next db id idFile = new File(root, "nextID"); if (idFile.exists() && idFile.isFile() && idFile.length() > 0) { nextDBID = new AtomicLong(Long.parseLong(LessBytes.toString(LessFiles.read(idFile)))); } else { nextDBID = new AtomicLong(1); } // get tree root ConcurrentTreeNode dummyRoot = ConcurrentTreeNode.getTreeRoot(this); treeRootNode = dummyRoot.getOrCreateEditableNode("root"); treeTrashNode = dummyRoot.getOrCreateEditableNode("trash"); treeTrashNode.requireNodeDB(); deletionThreadPool = Executors.newScheduledThreadPool(numDeletionThreads, new NamedThreadFactory(scope + "-deletion-", true)); for (int i = 0; i < numDeletionThreads; i++) { deletionThreadPool .scheduleAtFixedRate( new ConcurrentTreeDeletionTask(this, closed::get, LoggerFactory .getLogger(ConcurrentTreeDeletionTask.class.getName() + ".Background")), i, deletionThreadSleepMillis, TimeUnit.MILLISECONDS); } long openTime = System.currentTimeMillis() - start; log.info("dir={} root={} trash={} cache={} nextdb={} openms={}", root, treeRootNode, treeTrashNode, TreeCommonParameters.cleanQMax, nextDBID, openTime); }