Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:sx.blah.discord.handle.impl.obj.Channel.java

@Override
public MessageHistory getMessageHistory(int messageCount) {
    if (messageCount <= messages.size()) { // we already have all of the wanted messages in the cache
        return new MessageHistory(messages.values().stream().sorted(new MessageComparator(true))
                .limit(messageCount).collect(Collectors.toList()));
    } else {/*from   w  w w . j a  v  a  2s  . c o  m*/
        List<IMessage> retrieved = new ArrayList<>(messageCount);
        AtomicLong lastMessage = new AtomicLong(DiscordUtils.getSnowflakeFromTimestamp(Instant.now()));
        int chunkSize = messageCount < MESSAGE_CHUNK_COUNT ? messageCount : MESSAGE_CHUNK_COUNT;

        while (retrieved.size() < messageCount) { // while we dont have messageCount messages
            IMessage[] chunk = getHistory(lastMessage.get(), chunkSize);

            if (chunk.length == 0)
                break;

            lastMessage.set(chunk[chunk.length - 1].getLongID());
            Collections.addAll(retrieved, chunk);
        }

        return new MessageHistory(
                retrieved.size() > messageCount ? retrieved.subList(0, messageCount) : retrieved);
    }
}

From source file:io.druid.data.input.impl.PrefetchableTextFilesFirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser, File temporaryDirectory) throws IOException {
    if (maxCacheCapacityBytes == 0 && maxFetchCapacityBytes == 0) {
        return super.connect(firehoseParser, temporaryDirectory);
    }//from  w ww. ja v a2s  .  c o m

    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }

    Preconditions.checkState(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
            temporaryDirectory);
    Preconditions.checkState(temporaryDirectory.isDirectory(), "temporaryDirectory[%s] is not a directory",
            temporaryDirectory);

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = createFetchExecutor();

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        // When prefetching is enabled, fetchFiles and nextFetchIndex are updated by the fetchExecutor thread, but
        // read by both the main thread (in hasNext()) and the fetchExecutor thread (in fetch()). To guarantee that
        // fetchFiles and nextFetchIndex are updated atomically, this lock must be held before updating
        // them.
        private final Object fetchLock = new Object();
        private final LinkedBlockingQueue<FetchedFile> fetchFiles = new LinkedBlockingQueue<>();

        // Number of bytes currently fetched files.
        // This is updated when a file is successfully fetched or a fetched file is deleted.
        private final AtomicLong fetchedBytes = new AtomicLong(0);
        private final boolean cacheInitialized;
        private final boolean prefetchEnabled;

        private Future<Void> fetchFuture;
        private int cacheIterateIndex;
        // nextFetchIndex indicates which object should be downloaded when fetch is triggered.
        private int nextFetchIndex;

        {
            cacheInitialized = totalCachedBytes > 0;
            prefetchEnabled = maxFetchCapacityBytes > 0;

            if (cacheInitialized) {
                nextFetchIndex = cacheFiles.size();
            }
            if (prefetchEnabled) {
                fetchIfNeeded(totalCachedBytes);
            }
        }

        private void fetchIfNeeded(long remainingBytes) {
            if ((fetchFuture == null || fetchFuture.isDone()) && remainingBytes <= prefetchTriggerBytes) {
                fetchFuture = fetchExecutor.submit(() -> {
                    fetch();
                    return null;
                });
            }
        }

        /**
         * Fetch objects to a local disk up to {@link PrefetchableTextFilesFirehoseFactory#maxFetchCapacityBytes}.
         * This method is not thread safe and must be called by a single thread.  Note that even
         * {@link PrefetchableTextFilesFirehoseFactory#maxFetchCapacityBytes} is 0, at least 1 file is always fetched.
         * This is for simplifying design, and should be improved when our client implementations for cloud storages
         * like S3 support range scan.
         */
        private void fetch() throws Exception {
            for (int i = nextFetchIndex; i < objects.size()
                    && fetchedBytes.get() <= maxFetchCapacityBytes; i++) {
                final ObjectType object = objects.get(i);
                LOG.info("Fetching object[%s], fetchedBytes[%d]", object, fetchedBytes.get());
                final File outFile = File.createTempFile(FETCH_FILE_PREFIX, null, temporaryDirectory);
                fetchedBytes.addAndGet(download(object, outFile, 0));
                synchronized (fetchLock) {
                    fetchFiles.put(new FetchedFile(object, outFile));
                    nextFetchIndex++;
                }
            }
        }

        /**
         * Downloads an object. It retries downloading {@link PrefetchableTextFilesFirehoseFactory#maxFetchRetry}
         * times and throws an exception.
         *
         * @param object   an object to be downloaded
         * @param outFile  a file which the object data is stored
         * @param tryCount current retry count
         *
         * @return number of downloaded bytes
         *
         * @throws IOException
         */
        private long download(ObjectType object, File outFile, int tryCount) throws IOException {
            try (final InputStream is = openObjectStream(object);
                    final CountingOutputStream cos = new CountingOutputStream(new FileOutputStream(outFile))) {
                IOUtils.copy(is, cos);
                return cos.getCount();
            } catch (IOException e) {
                final int nextTry = tryCount + 1;
                if (!Thread.currentThread().isInterrupted() && nextTry < maxFetchRetry) {
                    LOG.error(e, "Failed to download object[%s], retrying (%d of %d)", object, nextTry,
                            maxFetchRetry);
                    outFile.delete();
                    return download(object, outFile, nextTry);
                } else {
                    LOG.error(e, "Failed to download object[%s], retries exhausted, aborting", object);
                    throw e;
                }
            }
        }

        @Override
        public boolean hasNext() {
            synchronized (fetchLock) {
                return (cacheInitialized && cacheIterateIndex < cacheFiles.size()) || !fetchFiles.isEmpty()
                        || nextFetchIndex < objects.size();
            }
        }

        @Override
        public LineIterator next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }

            // If fetch() fails, hasNext() always returns true because nextFetchIndex must be smaller than the number
            // of objects, which means next() is always called. The below method checks that fetch() threw an exception
            // and propagates it if exists.
            checkFetchException();

            final OpenedObject openedObject;

            try {
                // Check cache first
                if (cacheInitialized && cacheIterateIndex < cacheFiles.size()) {
                    final FetchedFile fetchedFile = cacheFiles.get(cacheIterateIndex++);
                    openedObject = new OpenedObject(fetchedFile, getNoopCloser());
                } else if (prefetchEnabled) {
                    openedObject = openObjectFromLocal();
                } else {
                    openedObject = openObjectFromRemote();
                }

                final InputStream stream = wrapObjectStream(openedObject.object, openedObject.objectStream);

                return new ResourceCloseableLineIterator(new InputStreamReader(stream, Charsets.UTF_8),
                        openedObject.resourceCloser);
            } catch (IOException e) {
                throw Throwables.propagate(e);
            }
        }

        private void checkFetchException() {
            if (fetchFuture != null && fetchFuture.isDone()) {
                try {
                    fetchFuture.get();
                    fetchFuture = null;
                } catch (InterruptedException | ExecutionException e) {
                    throw Throwables.propagate(e);
                }
            }
        }

        private OpenedObject openObjectFromLocal() throws IOException {
            final FetchedFile fetchedFile;
            final Closeable resourceCloser;

            if (!fetchFiles.isEmpty()) {
                // If there are already fetched files, use them
                fetchedFile = fetchFiles.poll();
                resourceCloser = cacheIfPossibleAndGetCloser(fetchedFile, fetchedBytes);
                fetchIfNeeded(fetchedBytes.get());
            } else {
                // Otherwise, wait for fetching
                try {
                    fetchIfNeeded(fetchedBytes.get());
                    fetchedFile = fetchFiles.poll(fetchTimeout, TimeUnit.MILLISECONDS);
                    if (fetchedFile == null) {
                        // Check the latest fetch is failed
                        checkFetchException();
                        // Or throw a timeout exception
                        throw new RuntimeException(new TimeoutException());
                    }
                    resourceCloser = cacheIfPossibleAndGetCloser(fetchedFile, fetchedBytes);
                    // trigger fetch again for subsequent next() calls
                    fetchIfNeeded(fetchedBytes.get());
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }
            return new OpenedObject(fetchedFile, resourceCloser);
        }

        private OpenedObject openObjectFromRemote() throws IOException {
            final OpenedObject openedObject;
            final Closeable resourceCloser = getNoopCloser();

            if (totalCachedBytes < maxCacheCapacityBytes) {
                LOG.info("Caching object[%s]", objects.get(nextFetchIndex));
                try {
                    // Since maxFetchCapacityBytes is 0, at most one file is fetched.
                    fetch();
                    FetchedFile fetchedFile = fetchFiles.poll();
                    if (fetchedFile == null) {
                        throw new ISE("Cannot fetch object[%s]", objects.get(nextFetchIndex));
                    }
                    cacheIfPossible(fetchedFile);
                    fetchedBytes.addAndGet(-fetchedFile.length());
                    openedObject = new OpenedObject(fetchedFile, resourceCloser);
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
            } else {
                final ObjectType object = objects.get(nextFetchIndex++);
                LOG.info("Reading object[%s]", object);
                openedObject = new OpenedObject(object, openObjectStream(object), resourceCloser);
            }
            return openedObject;
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(fetchExecutor.awaitTermination(fetchTimeout, TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}

From source file:org.apache.usergrid.persistence.index.impl.EntityIndexTest.java

@Test
public void testIndexThreads() throws IOException {

    long now = System.currentTimeMillis();
    final int threads = 20;
    final int size = 30;

    final String entityType = "thing";

    final CountDownLatch latch = new CountDownLatch(threads);
    final AtomicLong failTime = new AtomicLong(0);
    InputStream is = this.getClass().getResourceAsStream("/sample-large.json");
    ObjectMapper mapper = new ObjectMapper();
    final List<Object> sampleJson = mapper.readValue(is, new TypeReference<List<Object>>() {
    });//from w  w w .j  ava 2s. c om
    for (int i = 0; i < threads; i++) {

        final IndexEdge indexEdge = new IndexEdgeImpl(appId, "things", SearchEdge.NodeType.SOURCE, i);

        Thread thread = new Thread(() -> {
            try {

                EntityIndexBatch batch = entityIndex.createBatch();
                insertJsonBlob(sampleJson, batch, entityType, indexEdge, size, 0);
                indexProducer.put(batch.build()).subscribe();
                ;
            } catch (Exception e) {
                synchronized (failTime) {
                    if (failTime.get() == 0) {
                        failTime.set(System.currentTimeMillis());
                    }
                }
                System.out.println(e.toString());
                fail("threw exception");
            } finally {
                latch.countDown();
            }
        });
        thread.start();
    }
    try {
        latch.await();
    } catch (InterruptedException ie) {
        throw new RuntimeException(ie);
    }
    assertTrue("system must have failed at " + (failTime.get() - now), failTime.get() == 0);
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testAppend() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch latch = new CountDownLatch(count);
    char suffix = ':';
    AtomicLong suffixCount = new AtomicLong(0L);
    IntStream.range(0, count).forEachOrdered(i -> table
            .append(new Append(row).add(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))).thenAccept(r -> {
                suffixCount.addAndGet(/*from w w w.  ja  v a2s  . c  o  m*/
                        Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars().filter(x -> x == suffix).count());
                latch.countDown();
            }));
    latch.await();
    assertEquals((1 + count) * count / 2, suffixCount.get());
    String value = Bytes
            .toString(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER));
    int[] actual = Arrays.asList(value.split("" + suffix)).stream().mapToInt(Integer::parseInt).sorted()
            .toArray();
    assertArrayEquals(IntStream.range(0, count).toArray(), actual);
}

From source file:org.a3badran.platform.logging.writer.MetricsWriter.java

private void updateMetrics(RequestScope scope, String prefixName, long tt, long count, boolean updateCounters) {
    String name = Strings.isNullOrEmpty(prefixName) ? scope.getName() : prefixName + "." + scope.getName();

    String metricTotalCount = name + ".totalCount";
    scopeTotalMetrics.putIfAbsent(metricTotalCount, new AtomicLong(0));
    scopeTotalMetrics.get(metricTotalCount).addAndGet(count);

    String serviceTotalCount = appName + ".totalCount";
    appTotalMetrics.putIfAbsent(serviceTotalCount, new AtomicLong(0));
    appTotalMetrics.get(serviceTotalCount).addAndGet(count);

    if (!Strings.isNullOrEmpty(scope.getError())) {
        String metricErrorCount = name + ".errorCount";
        scopeTotalMetrics.putIfAbsent(metricErrorCount, new AtomicLong(0));
        scopeTotalMetrics.get(metricErrorCount).addAndGet(1);

        String serviceErrorCount = appName + ".errorCount";
        appTotalMetrics.putIfAbsent(serviceErrorCount, new AtomicLong(0));
        appTotalMetrics.get(serviceErrorCount).addAndGet(1);
    } else if (!Strings.isNullOrEmpty(scope.getWarninge())) {
        String metricWarningCount = name + ".warningCount";
        scopeTotalMetrics.putIfAbsent(metricWarningCount, new AtomicLong(0));
        scopeTotalMetrics.get(metricWarningCount).addAndGet(1);

        String serviceWarningCount = appName + ".warningCount";
        appTotalMetrics.putIfAbsent(serviceWarningCount, new AtomicLong(0));
        appTotalMetrics.get(serviceWarningCount).addAndGet(1);
    }//from w w  w .  jav a 2 s  .co m

    String metricTotalTime = name + ".totalTime";
    scopeTotalMetrics.putIfAbsent(metricTotalTime, new AtomicLong(0));
    scopeTotalMetrics.get(metricTotalTime).addAndGet(tt);

    // sample data
    if (random.nextFloat() <= sampleRate) {
        sampleMetrics.putIfAbsent(name, new SynchronizedDescriptiveStatistics(sampleWindow));
        sampleMetrics.get(name).addValue(tt);

        // sample counters
        if (updateCounters == true && scope.getCounters() != null) {
            for (Map.Entry<String, AtomicLong> entry : scope.getCounters().entrySet()) {
                String counterName = String.format("%s.%s", name, entry.getKey());
                sampleCounterMetrics.putIfAbsent(counterName,
                        new SynchronizedDescriptiveStatistics(sampleWindow));
                sampleCounterMetrics.get(counterName).addValue(entry.getValue().doubleValue());
            }
        }
    }

}

From source file:org.apache.hadoop.hbase.wal.TestWALFactory.java

/**
 * Just write multiple logs then split.  Before fix for HADOOP-2283, this
 * would fail./* w w w.ja  va2 s.  c  o  m*/
 * @throws IOException
 */
@Test
public void testSplit() throws IOException {
    final TableName tableName = TableName.valueOf(currentTest.getMethodName());
    final byte[] rowName = tableName.getName();
    final Path logdir = new Path(hbaseDir, DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
    Path oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
    final int howmany = 3;
    HRegionInfo[] infos = new HRegionInfo[3];
    Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
    fs.mkdirs(tabledir);
    for (int i = 0; i < howmany; i++) {
        infos[i] = new HRegionInfo(tableName, Bytes.toBytes("" + i), Bytes.toBytes("" + (i + 1)), false);
        fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
        LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
    }
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("column"));

    // Add edits for three regions.
    final AtomicLong sequenceId = new AtomicLong(1);
    for (int ii = 0; ii < howmany; ii++) {
        for (int i = 0; i < howmany; i++) {
            final WAL log = wals.getWAL(infos[i].getEncodedNameAsBytes());
            for (int j = 0; j < howmany; j++) {
                WALEdit edit = new WALEdit();
                byte[] family = Bytes.toBytes("column");
                byte[] qualifier = Bytes.toBytes(Integer.toString(j));
                byte[] column = Bytes.toBytes("column:" + Integer.toString(j));
                edit.add(new KeyValue(rowName, family, qualifier, System.currentTimeMillis(), column));
                LOG.info("Region " + i + ": " + edit);
                log.append(htd, infos[i],
                        new WALKey(infos[i].getEncodedNameAsBytes(), tableName, System.currentTimeMillis()),
                        edit, sequenceId, true, null);
            }
            log.sync();
            log.rollWriter();
        }
    }
    wals.shutdown();
    List<Path> splits = WALSplitter.split(hbaseDir, logdir, oldLogDir, fs, conf, wals);
    verifySplits(splits, howmany);
}

From source file:org.apache.hadoop.hbase.wal.TestDefaultWALProvider.java

@Test
public void testLogCleaning() throws Exception {
    LOG.info("testLogCleaning");
    final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testLogCleaning"))
            .addFamily(new HColumnDescriptor("row"));
    final HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf("testLogCleaning2"))
            .addFamily(new HColumnDescriptor("row"));
    final Configuration localConf = new Configuration(conf);
    localConf.set(WALFactory.WAL_PROVIDER, DefaultWALProvider.class.getName());
    final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
    final AtomicLong sequenceId = new AtomicLong(1);
    try {//from   w  w  w.ja  v a2  s  .  co  m
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(htd2.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        // we want to mix edits from regions, so pick our own identifier.
        final WAL log = wals.getWAL(UNSPECIFIED_REGION);

        // Add a single edit and make sure that rolling won't remove the file
        // Before HBASE-3198 it used to delete it
        addEdits(log, hri, htd, 1, sequenceId);
        log.rollWriter();
        assertEquals(1, DefaultWALProvider.getNumRolledLogFiles(log));

        // See if there's anything wrong with more than 1 edit
        addEdits(log, hri, htd, 2, sequenceId);
        log.rollWriter();
        assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(log));

        // Now mix edits from 2 regions, still no flushing
        addEdits(log, hri, htd, 1, sequenceId);
        addEdits(log, hri2, htd2, 1, sequenceId);
        addEdits(log, hri, htd, 1, sequenceId);
        addEdits(log, hri2, htd2, 1, sequenceId);
        log.rollWriter();
        assertEquals(3, DefaultWALProvider.getNumRolledLogFiles(log));

        // Flush the first region, we expect to see the first two files getting
        // archived. We need to append something or writer won't be rolled.
        addEdits(log, hri2, htd2, 1, sequenceId);
        log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
        log.completeCacheFlush(hri.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(log));

        // Flush the second region, which removes all the remaining output files
        // since the oldest was completely flushed and the two others only contain
        // flush information
        addEdits(log, hri2, htd2, 1, sequenceId);
        log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getFamiliesKeys());
        log.completeCacheFlush(hri2.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(log));
    } finally {
        if (wals != null) {
            wals.close();
        }
    }
}

From source file:org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.java

OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client,
        IdUserGroup iug) {//ww  w.  java  2s  . co m
    this.fos = fos;
    this.latestAttr = latestAttr;
    // We use the ReverseComparatorOnMin as the comparator of the map. In this
    // way, we first dump the data with larger offset. In the meanwhile, we
    // retrieve the last element to write back to HDFS.
    pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(OffsetRange.ReverseComparatorOnMin);

    pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();

    updateLastAccessTime();
    activeState = true;
    asyncStatus = false;
    asyncWriteBackStartOffset = 0;
    dumpOut = null;
    raf = null;
    nonSequentialWriteInMemory = new AtomicLong(0);

    this.dumpFilePath = dumpFilePath;
    enabledDump = dumpFilePath == null ? false : true;
    nextOffset = new AtomicLong();
    nextOffset.set(latestAttr.getSize());
    try {
        assert (nextOffset.get() == this.fos.getPos());
    } catch (IOException e) {
    }
    dumpThread = null;
    this.client = client;
    this.iug = iug;
}

From source file:com.amazon.alexa.avs.AVSController.java

public AVSController(ExpectSpeechListener listenHandler, AVSAudioPlayerFactory audioFactory,
        AlertManagerFactory alarmFactory, AVSClientFactory avsClientFactory,
        DialogRequestIdAuthority dialogRequestIdAuthority, boolean wakeWordAgentEnabled,
        WakeWordIPCFactory wakewordIPCFactory, WakeWordDetectedHandler wakeWakeDetectedHandler)
        throws Exception {

    this.wakeWordAgentEnabled = wakeWordAgentEnabled;
    this.wakeWordDetectedHandler = wakeWakeDetectedHandler;

    if (this.wakeWordAgentEnabled) {
        try {/*from   w  ww  . j  av  a2  s  .c  om*/
            log.info("Creating Wake Word IPC | port number: " + WAKE_WORD_AGENT_PORT_NUMBER);
            this.wakeWordIPC = wakewordIPCFactory.createWakeWordIPC(this, WAKE_WORD_AGENT_PORT_NUMBER);
            this.wakeWordIPC.init();
            Thread.sleep(1000);
            log.info("Created Wake Word IPC ok.");
        } catch (IOException e) {
            log.error("Error creating Wake Word IPC ok.", e);
        }
    }

    initializeMicrophone();

    this.player = audioFactory.getAudioPlayer(this);
    this.player.registerAlexaSpeechListener(this);
    this.dialogRequestIdAuthority = dialogRequestIdAuthority;
    speechRequestAudioPlayerPauseController = new SpeechRequestAudioPlayerPauseController(player);

    expectSpeechListeners = new HashSet<ExpectSpeechListener>(
            Arrays.asList(listenHandler, speechRequestAudioPlayerPauseController));
    dependentQueue = new LinkedBlockingDeque<>();

    independentQueue = new LinkedBlockingDeque<>();

    DirectiveEnqueuer directiveEnqueuer = new DirectiveEnqueuer(dialogRequestIdAuthority, dependentQueue,
            independentQueue);

    avsClient = avsClientFactory.getAVSClient(directiveEnqueuer, this);

    alertManager = alarmFactory.getAlertManager(this, this, AlertsFileDataStore.getInstance());

    // Ensure that we have attempted to finish loading all alarms from file before sending
    // synchronize state
    alertManager.loadFromDisk(new ResultListener() {
        @Override
        public void onSuccess() {
            sendSynchronizeStateEvent();
        }

        @Override
        public void onFailure() {
            sendSynchronizeStateEvent();
        }
    });

    // ensure we notify AVS of playbackStopped on app exit
    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            player.stop();
            avsClient.shutdown();
        }
    });

    dependentDirectiveThread = new BlockableDirectiveThread(dependentQueue, this, "DependentDirectiveThread");
    independentDirectiveThread = new BlockableDirectiveThread(independentQueue, this,
            "IndependentDirectiveThread");

    lastUserInteractionTimestampSeconds = new AtomicLong(System.currentTimeMillis() / MILLISECONDS_PER_SECOND);
    scheduledExecutor.scheduleAtFixedRate(new UserInactivityReport(), USER_INACTIVITY_REPORT_PERIOD_HOURS,
            USER_INACTIVITY_REPORT_PERIOD_HOURS, TimeUnit.HOURS);
}

From source file:com.alibaba.napoli.metamorphosis.client.extension.producer.LocalMessageStorageManager.java

/**
 * ????,?(???????)//from   w  w w  .j  a  v a 2  s  .co m
 * 
 * @param topic
 * @param partition
 * @param recoverer
 *            ?????
 * @return ?????
 * */
@Override
public boolean recover(final String topic, final Partition partition, final MessageRecoverer recoverer) {

    final String name = this.generateKey(topic, partition);
    final FutureTask<Boolean> recoverTask = new FutureTask<Boolean>(new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            final AtomicLong count = new AtomicLong(0);
            try {

                final Store store = LocalMessageStorageManager.this.getOrCreateStore(topic, partition);

                this.innerRecover(store, recoverer, count, name);
            } catch (final Throwable e) {
                log.error("SendRecover?????,name=" + name, e);
            } finally {
                log.info("SendRecover????,name=" + name + ",???"
                        + count.get() + "?");
                LocalMessageStorageManager.this.topicRecoverTaskMap.remove(name);
            }
            return true;
        }

        private void innerRecover(final Store store, final MessageRecoverer recoverer, final AtomicLong count,
                final String name) throws IOException, Exception {
            final Iterator<byte[]> it = store.iterator();
            while (it.hasNext()) {
                final byte[] key = it.next();
                final Message msg = (Message) LocalMessageStorageManager.this.deserializer
                        .decodeObject(store.get(key));
                recoverer.handle(msg);
                try {
                    store.remove(key);
                    count.incrementAndGet();
                    if (count.get() % 20000 == 0) {
                        log.info("SendRecover " + name + "????:" + count.get());
                    }
                } catch (final IOException e) {
                    log.error("SendRecover remove message failed", e);
                }
            }
        }
    });

    final FutureTask<Boolean> ret = this.topicRecoverTaskMap.putIfAbsent(name, recoverTask);
    if (ret == null) {
        this.threadPoolExecutor.submit(recoverTask);
        return true;
    } else {
        if (log.isDebugEnabled()) {
            log.debug("SendRecover?????,????,name=" + name);
        }
        return false;
    }

}