Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:com.indeed.lsmtree.recordcache.StandalonePersistentRecordCache.java

private StandalonePersistentRecordCache(final Store<K, V> index, final File checkpointDir) throws IOException {
    this.index = index;
    indexUpdateFunctions = new RecordLogDirectoryPoller.Functions() {

        AtomicLong indexPutTime = new AtomicLong(0);

        AtomicLong indexDeleteTime = new AtomicLong(0);

        AtomicInteger indexPuts = new AtomicInteger(0);

        AtomicInteger indexDeletes = new AtomicInteger(0);

        AtomicInteger count = new AtomicInteger(0);

        @Override// ww w  .j  a  va2s .c o m
        public void process(final long position, Operation op) throws IOException {

            count.incrementAndGet();
            if (count.get() % 1000 == 0) {
                final int puts = indexPuts.get();
                if (log.isDebugEnabled() && puts > 0) {
                    log.debug("avg index put time: " + indexPutTime.get() / puts / 1000d + " us");
                }
                final int deletes = indexDeletes.get();
                if (log.isDebugEnabled() && deletes > 0) {
                    log.debug("avg index delete time: " + indexDeleteTime.get() / deletes / 1000d + " us");
                }
            }

            if (op.getClass() == Put.class) {
                final Put<K, V> put = (Put) op;
                final long start = System.nanoTime();
                synchronized (index) {
                    index.put(put.getKey(), put.getValue());
                }
                indexPutTime.addAndGet(System.nanoTime() - start);
                indexPuts.incrementAndGet();
            } else if (op.getClass() == Delete.class) {
                final Delete<K> delete = (Delete) op;
                for (K k : delete.getKeys()) {
                    final long start = System.nanoTime();
                    synchronized (index) {
                        index.delete(k);
                    }
                    indexDeleteTime.addAndGet(System.nanoTime() - start);
                    indexDeletes.incrementAndGet();
                }
            } else if (op.getClass() == Checkpoint.class) {
                final Checkpoint checkpoint = (Checkpoint) op;
                if (checkpointDir != null) {
                    sync();
                    index.checkpoint(new File(checkpointDir, String.valueOf(checkpoint.getTimestamp())));
                }
            } else {
                log.warn("operation class unknown");
            }
        }

        @Override
        public void sync() throws IOException {
            final long start = System.nanoTime();
            index.sync();
            log.debug("sync time: " + (System.nanoTime() - start) / 1000d + " us");
        }
    };
}

From source file:org.apache.cxf.transport.jms.JMSConduit.java

public JMSConduit(EndpointInfo endpointInfo, EndpointReferenceType target, JMSConfiguration jmsConfig) {
    super(target);
    this.jmsConfig = jmsConfig;
    this.endpointInfo = endpointInfo;
    correlationMap = new ConcurrentHashMap<String, Exchange>();
    conduitId = UUID.randomUUID().toString().replaceAll("-", "");
    messageCount = new AtomicLong(0);
}

From source file:org.apache.spark.network.client.TransportResponseHandler.java

public TransportResponseHandler(Channel channel) {
    this.channel = channel;
    this.outstandingFetches = new ConcurrentHashMap<>();
    this.outstandingRpcs = new ConcurrentHashMap<>();
    this.streamCallbacks = new ConcurrentLinkedQueue<>();
    this.timeOfLastRequestNs = new AtomicLong(0);
}

From source file:org.apache.hadoop.raid.PurgeMonitor.java

public PurgeMonitor(Configuration conf, PlacementMonitor placementMonitor, final RaidNode raidNode) {
    this.conf = conf;
    this.placementMonitor = placementMonitor;
    this.directoryTraversalShuffle = conf.getBoolean(RaidNode.RAID_DIRECTORYTRAVERSAL_SHUFFLE, true);
    this.directoryTraversalThreads = conf.getInt(RaidNode.RAID_DIRECTORYTRAVERSAL_THREADS, 4);
    this.purgeMonitorSleepTime = conf.getLong(PURGE_MONITOR_SLEEP_TIME_KEY, PURGE_MONITOR_SLEEP_TIME_DEFAULT);
    this.entriesProcessed = new AtomicLong(0);
    this.raidNode = raidNode;
}

From source file:org.coursera.cmbrehm.kewlvideo.server.VideoController.java

public VideoController() {
    idGenerator = new AtomicLong(1l);
    videoList = new HashMap<Long, Video>();
}

From source file:de.dfki.kiara.jsonrpc.JsonRpcProtocol.java

public JsonRpcProtocol() {
    nextId = new AtomicLong(1);
    objectMapper = new ObjectMapper();
    objectMapper.registerModule(createSerializationModule());
    objectMapper.configure(SerializationFeature.WRITE_ENUMS_USING_INDEX, true);
    objectReader = objectMapper.reader();
    objectWriter = objectMapper.writer();
}

From source file:com.joyent.manta.client.MantaSeekableByteChannel.java

/**
 * Creates a new instance of a read-only seekable byte channel.
 *
 * @param path path of the object on the Manta API
 * @param position starting position in bytes from the start of the file
 * @param connectionFactory connection factory instance used for building requests to Manta
 * @param httpHelper helper class providing useful HTTP functions
 *//* w w w. j  a v a 2 s. com*/
@Deprecated
public MantaSeekableByteChannel(final String path, final long position,
        final MantaConnectionFactory connectionFactory, final HttpHelper httpHelper) {
    this.path = path;
    this.position = new AtomicLong(position);
    this.httpHelper = httpHelper;
    this.requestRef = new AtomicReference<>();
    this.responseStream = new AtomicReference<>();
}

From source file:com.chinamobile.bcbsp.comm.BDBMap.java

/**
 * Open an existing database.//w w  w  .ja va2 s.com
 * @param db
 * @param keyClass
 * @param valueClass
 * @param classCatalog
 */
public BDBMap(BSPJob job, Database db, Class<K> keyClass, Class<V> valueClass,
        StoredClassCatalog classCatalog) {
    mapDb = db;
    setDbName(db.getDatabaseName());
    bdbMapSize = new AtomicLong(0);
    bindDatabase(mapDb, keyClass, valueClass, classCatalog);
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener.java

/**
 * Add a bunch of dummy data and roll the logs every two insert. We
 * should end up with 10 rolled files (plus the roll called in
 * the constructor). Also test adding a listener while it's running.
 *//*w ww.  j  ava 2s  . c  o m*/
@Test
public void testActionListener() throws Exception {
    DummyWALActionsListener observer = new DummyWALActionsListener();
    List<WALActionsListener> list = new ArrayList<WALActionsListener>();
    list.add(observer);
    DummyWALActionsListener laterobserver = new DummyWALActionsListener();
    HLog hlog = HLogFactory.createHLog(fs, TEST_UTIL.getDataTestDir(), logName, conf, list, null);
    final AtomicLong sequenceId = new AtomicLong(1);
    HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES), SOME_BYTES, SOME_BYTES, false);

    for (int i = 0; i < 20; i++) {
        byte[] b = Bytes.toBytes(i + "");
        KeyValue kv = new KeyValue(b, b, b);
        WALEdit edit = new WALEdit();
        edit.add(kv);
        HTableDescriptor htd = new HTableDescriptor();
        htd.addFamily(new HColumnDescriptor(b));

        hlog.append(hri, TableName.valueOf(b), edit, 0, htd, sequenceId);
        if (i == 10) {
            hlog.registerWALActionsListener(laterobserver);
        }
        if (i % 2 == 0) {
            hlog.rollWriter();
        }
    }

    hlog.close();
    hlog.closeAndDelete();

    assertEquals(11, observer.preLogRollCounter);
    assertEquals(11, observer.postLogRollCounter);
    assertEquals(5, laterobserver.preLogRollCounter);
    assertEquals(5, laterobserver.postLogRollCounter);
    assertEquals(1, observer.closedCount);
}

From source file:org.axonframework.migration.eventstore.JpaEventStoreMigrator.java

public boolean run() throws Exception {
    final AtomicInteger updateCount = new AtomicInteger();
    final AtomicInteger skipCount = new AtomicInteger();
    final AtomicLong lastId = new AtomicLong(
            Long.parseLong(configuration.getProperty("lastProcessedId", "-1")));
    try {//from www  .j  a  v  a 2s  . c o  m
        TransactionTemplate template = new TransactionTemplate(txManager);
        template.setReadOnly(true);
        System.out.println("Starting conversion. Fetching batches of " + QUERY_BATCH_SIZE + " items.");
        while (template.execute(new TransactionCallback<Boolean>() {
            @Override
            public Boolean doInTransaction(TransactionStatus status) {
                final Session hibernate = entityManager.unwrap(Session.class);
                Iterator<Object[]> results = hibernate.createQuery(
                        "SELECT e.aggregateIdentifier, e.sequenceNumber, e.type, e.id FROM DomainEventEntry e "
                                + "WHERE e.id > :lastIdentifier ORDER BY e.id ASC")
                        .setFetchSize(1000).setMaxResults(QUERY_BATCH_SIZE).setReadOnly(true)
                        .setParameter("lastIdentifier", lastId.get()).iterate();
                if (!results.hasNext()) {
                    System.out.println("Empty batch. Assuming we're done.");
                    return false;
                } else if (Thread.interrupted()) {
                    System.out.println("Received an interrupt. Stopping...");
                    return false;
                }
                while (results.hasNext()) {
                    List<ConversionItem> conversionBatch = new ArrayList<ConversionItem>();
                    while (conversionBatch.size() < CONVERSION_BATCH_SIZE && results.hasNext()) {
                        Object[] item = results.next();
                        String aggregateIdentifier = (String) item[0];
                        long sequenceNumber = (Long) item[1];
                        String type = (String) item[2];
                        Long entryId = (Long) item[3];
                        lastId.set(entryId);
                        conversionBatch
                                .add(new ConversionItem(sequenceNumber, aggregateIdentifier, type, entryId));
                    }
                    if (!conversionBatch.isEmpty()) {
                        executor.submit(new TransformationTask(conversionBatch, skipCount));
                    }
                }
                return true;
            }
        })) {
            System.out.println("Reading next batch, starting at ID " + lastId.get() + ".");
            System.out.println(
                    "Estimated backlog size is currently: " + (workQueue.size() * CONVERSION_BATCH_SIZE));
        }
    } finally {
        executor.shutdown();
        executor.awaitTermination(5, TimeUnit.MINUTES);
        if (lastId.get() >= 0) {
            System.out.println(
                    "Processed events from old event store up to (and including) id = " + lastId.get());
        }
    }
    System.out.println("In total " + updateCount.get() + " items have been converted.");
    return skipCount.get() == 0;
}