Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:reactor.bus.SelectorUnitTests.java

@SuppressWarnings("unchecked")
private void runTest(String type, Function<Integer, Tuple2<Selector, Object>> fn) {
    final AtomicLong counter = new AtomicLong(selectors * iterations);
    Registry<Object, Consumer<?>> registry = Registries.create();

    Consumer<?> countDown = new Consumer<Object>() {
        @Override/*w w  w  .  j a va2  s .  co  m*/
        public void accept(Object obj) {
            counter.decrementAndGet();
        }
    };

    Selector<Object>[] sels = new Selector[selectors];
    Object[] keys = new Object[selectors];

    for (int i = 0; i < selectors; i++) {
        Tuple2<Selector, Object> tup = fn.apply(i);
        sels[i] = tup.getT1();
        keys[i] = tup.getT2();
        registry.register(sels[i], countDown);
    }

    long start = System.currentTimeMillis();
    for (int i = 0; i < selectors * iterations; i++) {
        int j = i % selectors;
        for (Registration<?, ? extends Consumer<?>> reg : registry.select(keys[j])) {
            reg.getObject().accept(null);
        }
    }
    long end = System.currentTimeMillis();
    double elapsed = (end - start);
    long throughput = Math.round((selectors * iterations) / (elapsed / 1000));
    LOG.info("{} throughput: {}M/s in {}ms", type, throughput, Math.round(elapsed));

    assertThat("All handlers have been found and executed.", counter.get() == 0);
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

private static void wrapperDump(PrintWriter pw, GTSDecoderIterator iter, boolean dedup, boolean signed,
        byte[] fetchPSK, long timespan, AtomicReference<Metadata> lastMeta, AtomicLong lastCount)
        throws IOException {

    if (!signed) {
        throw new IOException("Unsigned request.");
    }/*  www  .  j  ava  2s. co m*/

    // Labels for Sensision
    Map<String, String> labels = new HashMap<String, String>();

    StringBuilder sb = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (dedup) {
            decoder = decoder.dedup();
        }

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        GTSEncoder encoder = decoder.getEncoder(true);

        if (encoder.getCount() > toDecodeCount) {
            // We have too much data, shrink the encoder
            GTSEncoder enc = new GTSEncoder();
            enc.safeSetMetadata(decoder.getMetadata());
            while (decoder.next() && toDecodeCount > 0) {
                enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(),
                        decoder.getValue());
                toDecodeCount--;
            }
            encoder = enc;
        }

        if (timespan < 0) {
            currentCount += encoder.getCount();
        }

        if (encoder.size() <= 0) {
            continue;
        }

        //
        // Build a GTSWrapper
        //

        GTSWrapper wrapper = GTSWrapperHelper.fromGTSEncoderToGTSWrapper(encoder, true);

        //      GTSWrapper wrapper = new GTSWrapper();
        //      wrapper.setBase(encoder.getBaseTimestamp());
        //      wrapper.setMetadata(encoder.getMetadata());
        //      wrapper.setCount(encoder.getCount());
        //      wrapper.setEncoded(encoder.getBytes());

        //
        // Serialize the wrapper
        //

        TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());
        byte[] data = null;

        try {
            data = serializer.serialize(wrapper);
        } catch (TException te) {
            throw new IOException(te);
        }

        //
        // Output is GTSWrapperId <WSP> HASH <WSP> GTSWrapper
        //

        pw.write(Hex.encodeHex(GTSWrapperHelper.getId(wrapper)));

        pw.write(' ');

        if (null != fetchPSK) {
            //
            // Compute HMac for the wrapper
            //

            long hash = SipHashInline.hash24(fetchPSK, data);

            //
            // Output the MAC before the data, as hex digits
            //
            pw.write(Hex.encodeHex(Longs.toByteArray(hash)));
        } else {
            pw.write('-');
        }

        pw.write(' ');

        //
        // Base64 encode the wrapper
        //

        OrderPreservingBase64.encodeToWriter(data, pw);
        pw.write('\r');
        pw.write('\n');

        //
        // Sensision metrics
        //

        labels.clear();
        labels.put(SensisionConstants.SENSISION_LABEL_APPLICATION,
                wrapper.getMetadata().getLabels().get(Constants.APPLICATION_LABEL));

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS, Sensision.EMPTY_LABELS,
                1);
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_PERAPP, labels, 1);

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_SIZE,
                Sensision.EMPTY_LABELS, data.length);
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_SIZE_PERAPP, labels,
                data.length);

        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_DATAPOINTS,
                Sensision.EMPTY_LABELS, wrapper.getCount());
        Sensision.update(SensisionConstants.SENSISION_CLASS_CONTINUUM_SFETCH_WRAPPERS_DATAPOINTS_PERAPP, labels,
                wrapper.getCount());

    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:com.opengamma.engine.cache.BerkeleyDBValueSpecificationIdentifierBinaryDataStoreTest.java

public void parallelPutGetTest() throws InterruptedException {
    final int numEntries = 5000;
    final int numCycles = 1;
    final int numGets = numCycles * numEntries;
    final Random random = new Random();

    File dbDir = createDbDir("parallelPutGetTest");
    Environment dbEnvironment = BerkeleyDBViewComputationCacheSource.constructDatabaseEnvironment(dbDir, false);

    final BerkeleyDBBinaryDataStore dataStore = new BerkeleyDBBinaryDataStore(dbEnvironment,
            "parallelPutGetTest");
    dataStore.start();/*from w  w  w.j  a v  a2 s  .  c om*/

    final AtomicLong currentMaxIdentifier = new AtomicLong(0L);
    final byte[] bytes = new byte[100];
    random.nextBytes(bytes);
    Thread tPut = new Thread(new Runnable() {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Putting {} entries", numEntries);
            for (int i = 0; i < numEntries; i++) {
                random.nextBytes(bytes);
                dataStore.put(i, bytes);
                currentMaxIdentifier.set(i);
            }
            long numMillis = timer.finished();

            double msPerPut = ((double) numMillis) / ((double) numGets);
            double putsPerSecond = 1000.0 / msPerPut;

            s_logger.info("for {} puts, {} ms/put, {} puts/sec",
                    new Object[] { numEntries, msPerPut, putsPerSecond });
        }

    }, "Putter");

    class GetRunner implements Runnable {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Getting {} entries", numGets);
            for (int i = 0; i < numGets; i++) {
                int maxIdentifier = (int) currentMaxIdentifier.get();
                long actualIdentifier = random.nextInt(maxIdentifier);
                dataStore.get(actualIdentifier);
            }
            long numMillis = timer.finished();

            double msPerGet = ((double) numMillis) / ((double) numGets);
            double getsPerSecond = 1000.0 / msPerGet;

            s_logger.info("for {} gets, {} ms/get, {} gets/sec",
                    new Object[] { numGets, msPerGet, getsPerSecond });
        }
    }
    ;
    Thread tGet1 = new Thread(new GetRunner(), "getter-1");
    Thread tGet2 = new Thread(new GetRunner(), "getter-2");
    //Thread tGet3 = new Thread(new GetRunner(), "getter-3");
    //Thread tGet4 = new Thread(new GetRunner(), "getter-4");
    //Thread tGet5 = new Thread(new GetRunner(), "getter-5");

    tPut.start();
    Thread.sleep(5L);
    tGet1.start();
    tGet2.start();
    //tGet3.start();
    //tGet4.start();
    //tGet5.start();

    tPut.join();
    tGet1.join();
    tGet2.join();
    //tGet3.join();
    //tGet4.join();
    //tGet5.join();

    dataStore.delete();
    dataStore.stop();
    dbEnvironment.close();
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static void runIbrTest(final long ibrInterval) throws Exception {
    final ExecutorService executor = createExecutor();
    final Random ran = new Random();

    final Configuration conf = newConf(ibrInterval);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();

    try {//from www .j  a v a  2 s .c o m
        final String dirPathString = "/dir";
        final Path dir = new Path(dirPathString);
        dfs.mkdirs(dir);

        // start testing
        final long testStartTime = Time.monotonicNow();
        final ExecutorCompletionService<Path> createService = new ExecutorCompletionService<>(executor);
        final AtomicLong createFileTime = new AtomicLong();
        final AtomicInteger numBlockCreated = new AtomicInteger();

        // create files
        for (int i = 0; i < NUM_FILES; i++) {
            createService.submit(new Callable<Path>() {
                @Override
                public Path call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        final long seed = ran.nextLong();
                        final int numBlocks = ran.nextInt(MAX_BLOCK_NUM) + 1;
                        numBlockCreated.addAndGet(numBlocks);
                        return createFile(dir, numBlocks, seed, dfs);
                    } finally {
                        createFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }

        // verify files
        final ExecutorCompletionService<Boolean> verifyService = new ExecutorCompletionService<>(executor);
        final AtomicLong verifyFileTime = new AtomicLong();
        for (int i = 0; i < NUM_FILES; i++) {
            final Path file = createService.take().get();
            verifyService.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        return verifyFile(file, dfs);
                    } finally {
                        verifyFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }
        for (int i = 0; i < NUM_FILES; i++) {
            Assert.assertTrue(verifyService.take().get());
        }
        final long testEndTime = Time.monotonicNow();

        LOG.info("ibrInterval=" + ibrInterval + " ("
                + toConfString(DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY, conf) + "), numBlockCreated="
                + numBlockCreated);
        LOG.info("duration=" + toSecondString(testEndTime - testStartTime) + ", createFileTime="
                + toSecondString(createFileTime.get()) + ", verifyFileTime="
                + toSecondString(verifyFileTime.get()));
        LOG.info("NUM_FILES=" + NUM_FILES + ", MAX_BLOCK_NUM=" + MAX_BLOCK_NUM + ", BLOCK_SIZE=" + BLOCK_SIZE
                + ", NUM_THREADS=" + NUM_THREADS + ", NUM_DATANODES=" + NUM_DATANODES);
        logIbrCounts(cluster.getDataNodes());
    } finally {
        executor.shutdown();
        cluster.shutdown();
    }
}

From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java

private void getTopicsUnderNamespace(InetSocketAddress socketAddress, NamespaceName namespace, Backoff backoff,
        AtomicLong remainingTime, CompletableFuture<List<String>> topicsFuture, Mode mode) {
    client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> {
        long requestId = client.newRequestId();
        ByteBuf request = Commands.newGetTopicsOfNamespaceRequest(namespace.toString(), requestId, mode);

        clientCnx.newGetTopicsOfNamespace(request, requestId).thenAccept(topicsList -> {
            if (log.isDebugEnabled()) {
                log.debug("[namespace: {}] Success get topics list in request: {}", namespace.toString(),
                        requestId);/* w  w w .j  a va2  s  . c  o  m*/
            }

            // do not keep partition part of topic name
            List<String> result = Lists.newArrayList();
            topicsList.forEach(topic -> {
                String filtered = TopicName.get(topic).getPartitionedTopicName();
                if (!result.contains(filtered)) {
                    result.add(filtered);
                }
            });

            topicsFuture.complete(result);
        }).exceptionally((e) -> {
            topicsFuture.completeExceptionally(e);
            return null;
        });
    }).exceptionally((e) -> {
        long nextDelay = Math.min(backoff.next(), remainingTime.get());
        if (nextDelay <= 0) {
            topicsFuture.completeExceptionally(new PulsarClientException.TimeoutException(
                    "Could not getTopicsUnderNamespace within configured timeout."));
            return null;
        }

        ((ScheduledExecutorService) executor).schedule(() -> {
            log.warn(
                    "[namespace: {}] Could not get connection while getTopicsUnderNamespace -- Will try again in {} ms",
                    namespace, nextDelay);
            remainingTime.addAndGet(-nextDelay);
            getTopicsUnderNamespace(socketAddress, namespace, backoff, remainingTime, topicsFuture, mode);
        }, nextDelay, TimeUnit.MILLISECONDS);
        return null;
    });
}

From source file:org.apache.hadoop.hbase.procedure2.store.wal.TestStressWALProcedureStore.java

@Test
public void testInsertUpdateDelete() throws Exception {
    final long LAST_PROC_ID = 19999;
    final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS];
    final AtomicLong procCounter = new AtomicLong((long) Math.round(Math.random() * 100));
    for (int i = 0; i < thread.length; ++i) {
        thread[i] = new Thread() {
            @Override//from  w  ww.  jav a 2s  . c om
            public void run() {
                Random rand = new Random();
                TestProcedure proc;
                do {
                    // After HBASE-15579 there may be gap in the procId sequence, trying to simulate that.
                    long procId = procCounter.addAndGet(1 + rand.nextInt(3));
                    proc = new TestProcedure(procId);
                    // Insert
                    procStore.insert(proc, null);
                    // Update
                    for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) {
                        try {
                            Thread.sleep(0, rand.nextInt(15));
                        } catch (InterruptedException e) {
                        }
                        procStore.update(proc);
                    }
                    // Delete
                    procStore.delete(proc.getProcId());
                } while (proc.getProcId() < LAST_PROC_ID);
            }
        };
        thread[i].start();
    }

    for (int i = 0; i < thread.length; ++i) {
        thread[i].join();
    }

    procStore.getStoreTracker().dump();
    assertTrue(procCounter.get() >= LAST_PROC_ID);
    assertTrue(procStore.getStoreTracker().isEmpty());
    assertEquals(1, procStore.getActiveLogs().size());
}

From source file:com.jivesoftware.os.amza.service.storage.binary.BinaryRowReaderWriterTest.java

@Test(enabled = false)
public void testConcurrency() throws Exception {
    MemoryBackedWALFiler walFiler = new MemoryBackedWALFiler(
            new MultiAutoGrowingByteBufferBackedFiler(32, 1_024 * 1_024, new HeapByteBufferFactory()));
    IoStats ioStats = new IoStats();
    BinaryRowReader binaryRowReader = new BinaryRowReader(walFiler);
    BinaryRowWriter binaryRowWriter = new BinaryRowWriter(walFiler);

    ExecutorService executors = Executors.newFixedThreadPool(9);
    AtomicBoolean running = new AtomicBoolean(true);
    AtomicLong scanned = new AtomicLong();
    List<Future<?>> futures = Lists.newArrayList();
    for (int i = 0; i < 8; i++) {
        futures.add(executors.submit(() -> {
            try {
                while (running.get()) {
                    binaryRowReader.scan(ioStats, 0, false, (rowFP, rowTxId, rowType, row) -> {
                        scanned.incrementAndGet();
                        return true;
                    });// w w w .  j a va2  s.c o m
                }
                return true;
            } catch (Throwable t) {
                t.printStackTrace();
                throw t;
            }
        }));
    }
    futures.add(executors.submit(() -> {
        try {
            for (int i = 0; i < 1_000_000; i++) {
                byte[] row = UIO.intBytes(i);
                binaryRowWriter.write(ioStats, i, RowType.primary, 1, 16, stream -> stream.stream(row),
                        stream -> true,
                        (txId, prefix, key, value, valueTimestamp, valueTombstoned, valueVersion, fp) -> true,
                        false, false);
                if (i % 10_000 == 0) {
                    System.out.println("Finished i:" + i + " scanned:" + scanned.get());
                }
            }
        } finally {
            running.set(false);
        }
        return null;
    }));

    for (Future<?> future : futures) {
        future.get();
    }
}

From source file:edu.mayo.cts2.framework.webapp.rest.controller.MethodTimingAspect.java

/**
 * Execute./* w  ww .jav  a 2  s  .  c  o  m*/
 *
 * @param pjp the pjp
 * @return the object
 * @throws Throwable the throwable
 */
@Around("execution(public *"
        + " edu.mayo.cts2.framework.webapp.rest.controller.*.*(..,edu.mayo.cts2.framework.webapp.rest.command.QueryControl,..))")
public Object execute(final ProceedingJoinPoint pjp) throws Throwable {

    QueryControl queryControl = null;

    //this should never happen
    if (ArrayUtils.isEmpty(pjp.getArgs())) {
        throw new IllegalStateException("Pointcut failure!");
    }

    for (Object arg : pjp.getArgs()) {
        if (arg.getClass() == QueryControl.class) {
            queryControl = (QueryControl) arg;
            break;
        }
    }

    //this also should never happen
    if (queryControl == null) {
        throw new IllegalStateException("Pointcut failure!");
    }

    final AtomicLong threadId = new AtomicLong(-1);

    Future<Object> future = this.executorService.submit(new Callable<Object>() {

        @Override
        public Object call() {
            try {
                threadId.set(Thread.currentThread().getId());

                /*
                 * The model here is that we clear any previous timeout before we launch the job. A design flaw is that we
                 * can't tell if we are clearing a previous timeout that simply hadn't been cleaned up yet, or if we are
                 * clearing a timeout meant for this thread that happened before this thread even launched. The second scenario 
                 * seems unlikely as the minimum timeout is 1 second - hard to believe it would take more than 1 second to 
                 * launch this thread. Plus, this thread would have to launch in the exact window in between the timeout and 
                 * the future.cancel()
                 * 
                 * If the above scenario did defy all odds and happen , it shouldn't cause much harm, as the end result would
                 * be that this thread wouldn't see the cancelled flag - and would churn away for no reason, wasting some cpu
                 * cycles, but doing no other harm.
                 */

                Timeout.clearThreadFlag(threadId.get());
                return pjp.proceed();
            } catch (Throwable e) {

                if (e instanceof Error) {
                    throw (Error) e;
                }

                if (e instanceof RuntimeException) {
                    throw (RuntimeException) e;
                }

                throw new RuntimeException(e);
            }
        }
    });

    long time = queryControl.getTimelimit();

    try {
        if (time < 0) {
            return future.get();
        } else {
            return future.get(time, TimeUnit.SECONDS);
        }
    } catch (ExecutionException e) {
        throw e.getCause();
    } catch (TimeoutException e) {
        try {
            //Set the flag for the processing thread to read
            Timeout.setTimeLimitExceeded(threadId.get());

            //Schedule another future to make sure we don't cause a memory leak if the thread IDs aren't being reused (though, they should be)
            //and therefore don't get cleared up by the next run.  Give the running thread 30 seconds to see the cancelled flag before this 
            //cleanup takes place.
            this.scheduledExecutorService.schedule(new Runnable() {
                @Override
                public void run() {
                    Timeout.clearThreadFlag(threadId.get());
                }
            }, 30, TimeUnit.SECONDS);

            //Interrupt the processing thread so it has an opportunity to check the flag and stop.
            future.cancel(true);
        } catch (Exception e1) {
            // don't think this is possible, but just in case...
        }
        throw ExceptionFactory.createTimeoutException(e.getMessage());
    }
}

From source file:org.apache.usergrid.services.assets.data.GoogleBinaryStore.java

@Override
public void write(UUID appId, Entity entity, InputStream inputStream) throws Exception {

    getService();//  w  ww  . j av  a 2s.  c o  m

    final AtomicLong writtenSize = new AtomicLong();

    final int chunkSize = 1024; // one KB

    // determine max size file allowed, default to 50mb
    long maxSizeBytes = 50 * FileUtils.ONE_MB;
    String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
    if (StringUtils.isNumeric(maxSizeMbString)) {
        maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
    }

    byte[] firstData = new byte[chunkSize];
    int firstSize = inputStream.read(firstData);
    writtenSize.addAndGet(firstSize);

    // from the first sample chunk, determine the file size
    final String contentType = AssetMimeHandler.get().getMimeType(entity, firstData);

    // Convert to the Google Cloud Storage Blob
    final BlobId blobId = BlobId.of(bucketName, AssetUtils.buildAssetKey(appId, entity));
    final BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(contentType).build();

    // always allow files up to 5mb
    if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
        maxSizeBytes = 5 * FileUtils.ONE_MB;
    }

    EntityManager em = entityManagerFactory.getEntityManager(appId);
    Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);

    // directly upload files that are smaller than the chunk size
    if (writtenSize.get() < chunkSize) {

        // Upload to Google cloud Storage
        instance.create(blobInfo, firstData);

    } else {

        WriteChannel writer = instance.writer(blobInfo);

        // write the initial sample data used to determine file type
        writer.write(ByteBuffer.wrap(firstData, 0, firstData.length));

        // start writing remaining chunks from the stream
        byte[] buffer = new byte[chunkSize];
        int limit;
        while ((limit = inputStream.read(buffer)) >= 0) {

            writtenSize.addAndGet(limit);
            if (writtenSize.get() > maxSizeBytes) {
                try {
                    fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                    em.update(entity);

                } catch (Exception e) {
                    logger.error("Error updating entity with error message", e);
                }
                return;
            }

            try {
                writer.write(ByteBuffer.wrap(buffer, 0, limit));

            } catch (Exception ex) {
                logger.error("Error writing chunk to Google Cloud Storage for asset ");
            }
        }

        writer.close();
    }

    fileMetadata.put(AssetUtils.CONTENT_LENGTH, writtenSize.get());
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());
    fileMetadata.put(AssetUtils.E_TAG, RandomStringUtils.randomAlphanumeric(10));
    fileMetadata.put(AssetUtils.CONTENT_TYPE, contentType);

    try {
        em.update(entity);
    } catch (Exception e) {
        throw new IOException("Unable to update entity filedata", e);
    }

}

From source file:org.apache.hadoop.hbase.replication.regionserver.TestRegionReplicaReplicationEndpoint.java

public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable) throws Exception {
    // tests having edits from a disabled or dropped table is handled correctly by skipping those
    // entries and further edits after the edits from dropped/disabled table can be replicated
    // without problems.
    TableName tableName = TableName.valueOf("testRegionReplicaReplicationIgnoresDisabledTables" + dropTable);
    HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
    int regionReplication = 3;
    htd.setRegionReplication(regionReplication);
    HTU.deleteTableIfAny(tableName);/*from w  ww  . j a  v  a 2 s.  c om*/
    HTU.getHBaseAdmin().createTable(htd);
    TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
    HTU.deleteTableIfAny(toBeDisabledTable);
    htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
    htd.setRegionReplication(regionReplication);
    HTU.getHBaseAdmin().createTable(htd);

    // both tables are created, now pause replication
    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());

    // now that the replication is disabled, write to the table to be dropped, then drop the table.

    Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
    Table table = connection.getTable(tableName);
    Table tableToBeDisabled = connection.getTable(toBeDisabledTable);

    HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);

    AtomicLong skippedEdits = new AtomicLong();
    RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink = mock(
            RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
    when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
    RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter = new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(
            sink, (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
    RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
    HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
    byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();

    Entry entry = new Entry(new WALKey(encodedRegionName, toBeDisabledTable, 1), new WALEdit());

    HTU.getHBaseAdmin().disableTable(toBeDisabledTable); // disable the table
    if (dropTable) {
        HTU.getHBaseAdmin().deleteTable(toBeDisabledTable);
    }

    sinkWriter.append(toBeDisabledTable, encodedRegionName, HConstants.EMPTY_BYTE_ARRAY,
            Lists.newArrayList(entry, entry));

    assertEquals(2, skippedEdits.get());

    try {
        // load some data to the to-be-dropped table

        // load the data to the table
        HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

        // now enable the replication
        admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());

        verifyReplication(tableName, regionReplication, 0, 1000);

    } finally {
        admin.close();
        table.close();
        rl.close();
        tableToBeDisabled.close();
        HTU.deleteTableIfAny(toBeDisabledTable);
        connection.close();
    }
}