Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:io.druid.client.cache.CacheDistributionTest.java

@Test
public void testDistribution() throws Exception {
    KetamaNodeLocator locator = new KetamaNodeLocator(ImmutableList.of(dummyNode("druid-cache.0001", 11211),
            dummyNode("druid-cache.0002", 11211), dummyNode("druid-cache.0003", 11211),
            dummyNode("druid-cache.0004", 11211), dummyNode("druid-cache.0005", 11211)), hash,
            new DefaultKetamaNodeLocatorConfiguration() {
                @Override// ww  w.j  a  v  a 2  s.  co  m
                public int getNodeRepetitions() {
                    return reps;
                }
            });

    Map<MemcachedNode, AtomicLong> counter = Maps.newHashMap();
    long t = 0;
    for (int i = 0; i < KEY_COUNT; ++i) {
        final String k = DigestUtils.sha1Hex("abc" + i) + ":" + DigestUtils.sha1Hex("xyz" + i);
        long t0 = System.nanoTime();
        MemcachedNode node = locator.getPrimary(k);
        t += System.nanoTime() - t0;
        if (counter.containsKey(node)) {
            counter.get(node).incrementAndGet();
        } else {
            counter.put(node, new AtomicLong(1));
        }
    }

    long min = Long.MAX_VALUE;
    long max = 0;
    System.out.printf("%25s\t%5d\t", hash, reps);
    for (AtomicLong count : counter.values()) {
        System.out.printf("%10d\t", count.get());
        min = Math.min(min, count.get());
        max = Math.max(max, count.get());
    }
    System.out.printf("%7.2f\t%5.0f\n", (double) min / (double) max, (double) t / KEY_COUNT);
}

From source file:org.apache.hadoop.hbase.wal.TestSecureWAL.java

@Test
public void testSecureWAL() throws Exception {
    TableName tableName = TableName.valueOf("TestSecureWAL");
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(tableName.getName()));
    HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
            false);// w  w  w  .  j a  v  a  2s  .  com
    final int total = 10;
    final byte[] row = Bytes.toBytes("row");
    final byte[] family = Bytes.toBytes("family");
    final byte[] value = Bytes.toBytes("Test value");
    FileSystem fs = TEST_UTIL.getTestFileSystem();
    final WALFactory wals = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestSecureWAL");
    final AtomicLong sequenceId = new AtomicLong(1);

    // Write the WAL
    final WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes());

    for (int i = 0; i < total; i++) {
        WALEdit kvs = new WALEdit();
        kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value));
        wal.append(htd, regioninfo,
                new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                sequenceId, true, null);
    }
    wal.sync();
    final Path walPath = DefaultWALProvider.getCurrentFileName(wal);
    wals.shutdown();

    // Insure edits are not plaintext
    long length = fs.getFileStatus(walPath).getLen();
    FSDataInputStream in = fs.open(walPath);
    byte[] fileData = new byte[(int) length];
    IOUtils.readFully(in, fileData);
    in.close();
    assertFalse("Cells appear to be plaintext", Bytes.contains(fileData, value));

    // Confirm the WAL can be read back
    WAL.Reader reader = wals.createReader(TEST_UTIL.getTestFileSystem(), walPath);
    int count = 0;
    WAL.Entry entry = new WAL.Entry();
    while (reader.next(entry) != null) {
        count++;
        List<Cell> cells = entry.getEdit().getCells();
        assertTrue("Should be one KV per WALEdit", cells.size() == 1);
        for (Cell cell : cells) {
            assertTrue("Incorrect row", Bytes.equals(cell.getRowArray(), cell.getRowOffset(),
                    cell.getRowLength(), row, 0, row.length));
            assertTrue("Incorrect family", Bytes.equals(cell.getFamilyArray(), cell.getFamilyOffset(),
                    cell.getFamilyLength(), family, 0, family.length));
            assertTrue("Incorrect value", Bytes.equals(cell.getValueArray(), cell.getValueOffset(),
                    cell.getValueLength(), value, 0, value.length));
        }
    }
    assertEquals("Should have read back as many KVs as written", total, count);
    reader.close();
}

From source file:org.apache.hadoop.ipc.FairCallQueue.java

/**
 * Create a FairCallQueue.//w w  w.ja v  a2 s .  c o  m
 * @param capacity the total size of all sub-queues
 * @param ns the prefix to use for configuration
 * @param conf the configuration to read from
 * Notes: Each sub-queue has a capacity of `capacity / numSubqueues`.
 * The first or the highest priority sub-queue has an excess capacity
 * of `capacity % numSubqueues`
 */
public FairCallQueue(int priorityLevels, int capacity, String ns, Configuration conf) {
    if (priorityLevels < 1) {
        throw new IllegalArgumentException("Number of Priority Levels must be " + "at least 1");
    }
    int numQueues = priorityLevels;
    LOG.info("FairCallQueue is in use with " + numQueues + " queues with total capacity of " + capacity);

    this.queues = new ArrayList<BlockingQueue<E>>(numQueues);
    this.overflowedCalls = new ArrayList<AtomicLong>(numQueues);
    int queueCapacity = capacity / numQueues;
    int capacityForFirstQueue = queueCapacity + (capacity % numQueues);
    for (int i = 0; i < numQueues; i++) {
        if (i == 0) {
            this.queues.add(new LinkedBlockingQueue<E>(capacityForFirstQueue));
        } else {
            this.queues.add(new LinkedBlockingQueue<E>(queueCapacity));
        }
        this.overflowedCalls.add(new AtomicLong(0));
    }

    this.multiplexer = new WeightedRoundRobinMultiplexer(numQueues, ns, conf);
    // Make this the active source of metrics
    MetricsProxy mp = MetricsProxy.getInstance(ns);
    mp.setDelegate(this);
}

From source file:org.apache.hadoop.hdfs.hoss.db.HosMetaData.java

private void initialize(String metaDir, int warmCapacity, int hotCapacity) {
    objectsMap = new ObjectsMap(new File(metaDir));
    objId = new ObjectId();
    currentId = new AtomicLong(objId.getCurrentId());
    ids = objId.getDeletedIDSet();/*  w w  w .  j ava2 s  .com*/
    LOG.info("current id: " + currentId);
    hosBloomFilter = new HosBloomFilter();
    ps = new PathStore();
    hs = new HotStore();
    if (!disablecache) {
        hossCache = new HossCache(warmCapacity, hotCapacity);
    }
}

From source file:com.aol.advertising.qiao.management.metrics.StatisticsStore.java

@Override
public void set(String key, long value) {
    if (stats.containsKey(key)) {
        stats.get(key).set(value);/* w  w w.  jav a  2  s  .co m*/
    } else {
        stats.put(key, new AtomicLong(0));
    }
}

From source file:org.apache.carbondata.processing.loading.sort.impl.UnsafeParallelReadMergeSorterWithColumnRangeImpl.java

@Override
public void initialize(SortParameters sortParameters) {
    this.originSortParameters = sortParameters;
    int totalInMemoryChunkSizeInMB = CarbonProperties.getInstance().getSortMemoryChunkSizeInMB();
    inMemoryChunkSizeInMB = totalInMemoryChunkSizeInMB / columnRangeInfo.getNumOfRanges();
    if (inMemoryChunkSizeInMB < 5) {
        inMemoryChunkSizeInMB = 5;//from ww  w  . j a v  a  2s .c  o m
    }
    this.insideRowCounterList = new ArrayList<>(columnRangeInfo.getNumOfRanges());
    for (int i = 0; i < columnRangeInfo.getNumOfRanges(); i++) {
        insideRowCounterList.add(new AtomicLong(0));
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogReaderOnSecureHLog.java

private Path writeWAL(String tblName) throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
    conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, CustomWALCellCodec.class, WALCellCodec.class);
    TableName tableName = TableName.valueOf(tblName);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(tableName.getName()));
    HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
            false);/*from  w  ww  . ja va 2  s .  com*/
    final int total = 10;
    final byte[] row = Bytes.toBytes("row");
    final byte[] family = Bytes.toBytes("family");
    FileSystem fs = TEST_UTIL.getTestFileSystem();
    Path logDir = TEST_UTIL.getDataTestDir(tblName);
    final AtomicLong sequenceId = new AtomicLong(1);

    // Write the WAL
    FSHLog wal = new FSHLog(fs, TEST_UTIL.getDataTestDir(), logDir.toString(), conf);
    for (int i = 0; i < total; i++) {
        WALEdit kvs = new WALEdit();
        kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value));
        wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
    }
    final Path walPath = ((FSHLog) wal).computeFilename();
    wal.close();
    // restore the cell codec class
    conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, clsName);

    return walPath;
}

From source file:org.apache.hadoop.hbase.regionserver.TestFailedAppendAndSync.java

/**
 * Reproduce locking up that happens when we get an exceptions appending and syncing.
 * See HBASE-14317./*from   w  ww .  jav  a2 s.c o  m*/
 * First I need to set up some mocks for Server and RegionServerServices. I also need to
 * set up a dodgy WAL that will throw an exception when we go to append to it.
 */
@Test(timeout = 300000)
public void testLockupAroundBadAssignSync() throws IOException {
    final AtomicLong rolls = new AtomicLong(0);
    // Dodgy WAL. Will throw exceptions when flags set.
    class DodgyFSLog extends FSHLog {
        volatile boolean throwSyncException = false;
        volatile boolean throwAppendException = false;

        public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException {
            super(fs, root, logDir, conf);
        }

        @Override
        public byte[][] rollWriter(boolean force) throws FailedLogCloseException, IOException {
            byte[][] regions = super.rollWriter(force);
            rolls.getAndIncrement();
            return regions;
        }

        @Override
        protected Writer createWriterInstance(Path path) throws IOException {
            final Writer w = super.createWriterInstance(path);
            return new Writer() {
                @Override
                public void close() throws IOException {
                    w.close();
                }

                @Override
                public void sync() throws IOException {
                    if (throwSyncException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...");
                    }
                    w.sync();
                }

                @Override
                public void append(Entry entry) throws IOException {
                    if (throwAppendException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...");
                    }
                    w.append(entry);
                }

                @Override
                public long getLength() throws IOException {
                    return w.getLength();
                }
            };
        }
    }

    // Make up mocked server and services.
    Server server = mock(Server.class);
    when(server.getConfiguration()).thenReturn(CONF);
    when(server.isStopped()).thenReturn(false);
    when(server.isAborted()).thenReturn(false);
    RegionServerServices services = mock(RegionServerServices.class);
    // OK. Now I have my mocked up Server and RegionServerServices and my dodgy WAL, go ahead with
    // the test.
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + getName());
    DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF);
    LogRoller logRoller = new LogRoller(server, services);
    logRoller.addWAL(dodgyWAL);
    logRoller.start();

    boolean threwOnSync = false;
    boolean threwOnAppend = false;
    boolean threwOnBoth = false;

    HRegion region = initHRegion(tableName, null, null, dodgyWAL);
    try {
        // Get some random bytes.
        byte[] value = Bytes.toBytes(getName());
        try {
            // First get something into memstore
            Put put = new Put(value);
            put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), value);
            region.put(put);
        } catch (IOException ioe) {
            fail();
        }
        long rollsCount = rolls.get();
        try {
            dodgyWAL.throwAppendException = true;
            dodgyWAL.throwSyncException = false;
            Put put = new Put(value);
            put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("3"), value);
            region.put(put);
        } catch (IOException ioe) {
            threwOnAppend = true;
        }
        while (rollsCount == rolls.get())
            Threads.sleep(100);
        rollsCount = rolls.get();

        // When we get to here.. we should be ok. A new WAL has been put in place. There were no
        // appends to sync. We should be able to continue.

        try {
            dodgyWAL.throwAppendException = true;
            dodgyWAL.throwSyncException = true;
            Put put = new Put(value);
            put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("4"), value);
            region.put(put);
        } catch (IOException ioe) {
            threwOnBoth = true;
        }
        while (rollsCount == rolls.get())
            Threads.sleep(100);

        // Again, all should be good. New WAL and no outstanding unsync'd edits so we should be able
        // to just continue.

        // So, should be no abort at this stage. Verify.
        Mockito.verify(server, Mockito.atLeast(0)).abort(Mockito.anyString(), (Throwable) Mockito.anyObject());
        try {
            dodgyWAL.throwAppendException = false;
            dodgyWAL.throwSyncException = true;
            Put put = new Put(value);
            put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("2"), value);
            region.put(put);
        } catch (IOException ioe) {
            threwOnSync = true;
        }
        // An append in the WAL but the sync failed is a server abort condition. That is our
        // current semantic. Verify. It takes a while for abort to be called. Just hang here till it
        // happens. If it don't we'll timeout the whole test. That is fine.
        while (true) {
            try {
                Mockito.verify(server, Mockito.atLeast(1)).abort(Mockito.anyString(),
                        (Throwable) Mockito.anyObject());
                break;
            } catch (WantedButNotInvoked t) {
                Threads.sleep(1);
            }
        }
    } finally {
        // To stop logRoller, its server has to say it is stopped.
        Mockito.when(server.isStopped()).thenReturn(true);
        if (logRoller != null)
            logRoller.interrupt();
        if (region != null) {
            try {
                region.close(true);
            } catch (DroppedSnapshotException e) {
                LOG.info("On way out; expected!", e);
            }
        }
        if (dodgyWAL != null)
            dodgyWAL.close();
        assertTrue("The regionserver should have thrown an exception", threwOnBoth);
        assertTrue("The regionserver should have thrown an exception", threwOnAppend);
        assertTrue("The regionserver should have thrown an exception", threwOnSync);
    }
}

From source file:ch.algotrader.service.ib.IBNativeHistoricalDataServiceImpl.java

public IBNativeHistoricalDataServiceImpl(final IBSession iBSession, final IBConfig iBConfig,
        final IBPendingRequests pendingRequests, final IdGenerator requestIdGenerator,
        final SecurityDao securityDao, final BarDao barDao) {

    super(barDao);

    Validate.notNull(iBSession, "IBSession is null");
    Validate.notNull(iBConfig, "IBConfig is null");
    Validate.notNull(pendingRequests, "IBPendingRequests is null");
    Validate.notNull(requestIdGenerator, "IdGenerator is null");
    Validate.notNull(securityDao, "SecurityDao is null");

    this.lastTimeStamp = new AtomicLong(0L);
    this.iBSession = iBSession;
    this.iBConfig = iBConfig;
    this.pendingRequests = pendingRequests;
    this.requestIdGenerator = requestIdGenerator;
    this.securityDao = securityDao;
}

From source file:de.hybris.platform.jdbcwrapper.ConnectionPoolTest.java

private void doTestMultithreadedAccess(final int RUN_SECONDS, final int THREADS, final int PERCENT_NO_TX,
        final int PERCENT_TX_ROLLBACK, final boolean useInterrupt, final boolean sendDummyStatement) {
    HybrisDataSource dataSource = null;/*from w  w  w  .j  av  a2s  .  c  o  m*/

    LOG.info("--- test multithreaded access to connection pool duration:" + RUN_SECONDS + "s threads:" + THREADS
            + " nonTx:" + PERCENT_NO_TX + "% rollback:" + PERCENT_TX_ROLLBACK + "% interrupt:" + useInterrupt
            + "-----------------------------------");
    try {
        final Collection<TestConnectionImpl> allConnections = new ConcurrentLinkedQueue<TestConnectionImpl>();

        final AtomicLong rollbackCounter = new AtomicLong(0);
        final AtomicLong connectionCounter = new AtomicLong(0);
        final AtomicBoolean finished = new AtomicBoolean(false);

        dataSource = createDataSource(Registry.getCurrentTenantNoFallback(), allConnections, connectionCounter,
                false, false);

        assertEquals(0, dataSource.getNumInUse());
        assertEquals(1, dataSource.getNumPhysicalOpen());
        assertEquals(1, dataSource.getMaxInUse());
        assertEquals(1, dataSource.getMaxPhysicalOpen());

        final int maxConnections = dataSource.getMaxAllowedPhysicalOpen();

        final String runId = "[" + RUN_SECONDS + "|" + THREADS + "|" + PERCENT_NO_TX + "|" + PERCENT_TX_ROLLBACK
                + "|" + useInterrupt + "]";

        final Runnable runnable = new ContinuousAccessRunnable(dataSource, PERCENT_NO_TX, PERCENT_TX_ROLLBACK,
                rollbackCounter, finished, runId, sendDummyStatement);

        final TestThreadsHolder threadsHolder = new TestThreadsHolder(THREADS, runnable) {
            @Override
            public void stopAll() {
                if (useInterrupt) {
                    super.stopAll();
                } else {
                    finished.set(true);
                }
            }
        };

        threadsHolder.startAll();

        waitDuration(RUN_SECONDS, maxConnections, dataSource, allConnections);

        threadsHolder.stopAll();
        final boolean allStoppedNormal = threadsHolder.waitForAll(30, TimeUnit.SECONDS);

        if (!allStoppedNormal) {
            // try fallback method
            finished.set(true);
            final boolean allStoppedFallback = threadsHolder.waitForAll(10, TimeUnit.SECONDS);
            if (allStoppedFallback) {
                LOG.error("Threads did not stop normally but only after using boolean flag!");
            } else {
                fail("db connection test threads did not stop correctly even after fallback method");
            }
        }

        // kill data source
        dataSource.destroy();
        assertTrue(dataSource.getConnectionPool().isPoolClosed());
        assertTrue(waitForAllInactive(dataSource.getConnectionPool(), 10, TimeUnit.SECONDS));

        if (PERCENT_TX_ROLLBACK > 0) {
            assertTrue(rollbackCounter.get() > 0);
        }

        final long maxAllowedConnections = maxConnections + rollbackCounter.get();

        final Stats stats = getStats(allConnections);

        LOG.info(//
                "max connections :" + maxConnections + "\n" + //
                        "rollbacks :" + rollbackCounter.get() + "\n" + //
                        "real connections :" + connectionCounter.get() + "\n" + //
                        "closed:" + stats.closed + "\n" + //
                        "open:" + stats.open + "\n" + //
                        "borrowed :" + stats.borrowed + "\n" + //
                        "returned :" + stats.returned + "\n" + //
                        "invalidated :" + stats.invalidated + "\n");

        // we cannot be sure since not each rollbacked connections *must* be re-created
        assertTrue(
                "handed out more than max connections (got:" + connectionCounter.get() + " > max:"
                        + maxAllowedConnections + ")", //
                connectionCounter.get() <= maxAllowedConnections);
        assertEquals("still got " + stats.borrowed + "borrowed connections", 0, stats.borrowed);
        assertEquals(
                "connection count mismatch - total:" + connectionCounter.get() + " <> "
                        + (stats.returned + stats.invalidated) + " (returned:" + stats.returned
                        + " + invalidated:" + stats.invalidated + ")", //
                connectionCounter.get(), stats.returned + stats.invalidated);

        // make sure all connections have been finally closed

        assertEquals(
                "data source " + dataSource + "still got " + dataSource.getNumInUse() + " connections in use", //
                0, dataSource.getNumInUse());
        assertEquals(
                "data source " + dataSource + "still got " + dataSource.getNumPhysicalOpen()
                        + " physical connections open (despite none are in use)", //
                0, dataSource.getNumPhysicalOpen());
        assertTrue(
                "data source " + dataSource + " had more than max allowed connections (max:" + maxConnections
                        + ", max in use:" + dataSource.getMaxInUse() + ")", //
                maxConnections >= dataSource.getMaxInUse());
        assertTrue(
                "data source " + dataSource + " had more than max allowed physical connections (max:"
                        + maxConnections + ", max physical in use:" + dataSource.getMaxPhysicalOpen() + ")", //
                maxConnections >= dataSource.getMaxPhysicalOpen());
    } finally {
        destroyDataSource(dataSource);
    }

}