Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

public FileSystemRepository(final NiFiProperties nifiProperties) throws IOException {
    this.nifiProperties = nifiProperties;
    // determine the file repository paths and ensure they exist
    final Map<String, Path> fileRespositoryPaths = nifiProperties.getContentRepositoryPaths();
    for (final Path path : fileRespositoryPaths.values()) {
        Files.createDirectories(path);
    }/*from   w w w . jav  a  2s  .c  om*/

    this.containers = new HashMap<>(fileRespositoryPaths);
    this.containerNames = new ArrayList<>(containers.keySet());
    index = new AtomicLong(0L);

    for (final String containerName : containerNames) {
        reclaimable.put(containerName, new LinkedBlockingQueue<>(10000));
        archivedFiles.put(containerName, new LinkedBlockingQueue<>(100000));
    }

    final String enableArchiving = nifiProperties.getProperty(NiFiProperties.CONTENT_ARCHIVE_ENABLED);
    final String maxArchiveRetentionPeriod = nifiProperties
            .getProperty(NiFiProperties.CONTENT_ARCHIVE_MAX_RETENTION_PERIOD);
    final String maxArchiveSize = nifiProperties
            .getProperty(NiFiProperties.CONTENT_ARCHIVE_MAX_USAGE_PERCENTAGE);
    final String archiveBackPressureSize = nifiProperties
            .getProperty(NiFiProperties.CONTENT_ARCHIVE_BACK_PRESSURE_PERCENTAGE);

    if ("true".equalsIgnoreCase(enableArchiving)) {
        archiveData = true;

        if (maxArchiveSize == null) {
            throw new RuntimeException("No value specified for property '"
                    + NiFiProperties.CONTENT_ARCHIVE_MAX_USAGE_PERCENTAGE
                    + "' but archiving is enabled. You must configure the max disk usage in order to enable archiving.");
        }

        if (!MAX_ARCHIVE_SIZE_PATTERN.matcher(maxArchiveSize.trim()).matches()) {
            throw new RuntimeException(
                    "Invalid value specified for the '" + NiFiProperties.CONTENT_ARCHIVE_MAX_USAGE_PERCENTAGE
                            + "' property. Value must be in format: <XX>%");
        }
    } else if ("false".equalsIgnoreCase(enableArchiving)) {
        archiveData = false;
    } else {
        LOG.warn("No property set for '{}'; will not archive content", NiFiProperties.CONTENT_ARCHIVE_ENABLED);
        archiveData = false;
    }

    double maxArchiveRatio = 0D;
    double archiveBackPressureRatio = 0.01D;

    if (maxArchiveSize != null && MAX_ARCHIVE_SIZE_PATTERN.matcher(maxArchiveSize.trim()).matches()) {
        maxArchiveRatio = getRatio(maxArchiveSize);

        if (archiveBackPressureSize != null
                && MAX_ARCHIVE_SIZE_PATTERN.matcher(archiveBackPressureSize.trim()).matches()) {
            archiveBackPressureRatio = getRatio(archiveBackPressureSize);
        } else {
            archiveBackPressureRatio = maxArchiveRatio + 0.02D;
        }
    }

    if (maxArchiveRatio > 0D) {
        for (final Map.Entry<String, Path> container : containers.entrySet()) {
            final String containerName = container.getKey();

            final long capacity = container.getValue().toFile().getTotalSpace();
            if (capacity == 0) {
                throw new RuntimeException("System returned total space of the partition for " + containerName
                        + " is zero byte. Nifi can not create a zero sized FileSystemRepository");
            }
            final long maxArchiveBytes = (long) (capacity * (1D - (maxArchiveRatio - 0.02)));
            minUsableContainerBytesForArchive.put(container.getKey(), Long.valueOf(maxArchiveBytes));
            LOG.info(
                    "Maximum Threshold for Container {} set to {} bytes; if volume exceeds this size, archived data will be deleted until it no longer exceeds this size",
                    containerName, maxArchiveBytes);

            final long backPressureBytes = (long) (container.getValue().toFile().getTotalSpace()
                    * archiveBackPressureRatio);
            final ContainerState containerState = new ContainerState(containerName, true, backPressureBytes,
                    capacity);
            containerStateMap.put(containerName, containerState);
        }
    } else {
        for (final String containerName : containerNames) {
            containerStateMap.put(containerName,
                    new ContainerState(containerName, false, Long.MAX_VALUE, Long.MAX_VALUE));
        }
    }

    if (maxArchiveRatio <= 0D) {
        maxArchiveMillis = 0L;
    } else {
        maxArchiveMillis = StringUtils.isEmpty(maxArchiveRetentionPeriod) ? Long.MAX_VALUE
                : FormatUtils.getTimeDuration(maxArchiveRetentionPeriod, TimeUnit.MILLISECONDS);
    }

    this.alwaysSync = Boolean.parseBoolean(nifiProperties.getProperty("nifi.content.repository.always.sync"));
    LOG.info("Initializing FileSystemRepository with 'Always Sync' set to {}", alwaysSync);
    initializeRepository();

    containerCleanupExecutor = new FlowEngine(containers.size(), "Cleanup FileSystemRepository Container",
            true);
}

From source file:org.apache.hadoop.hive.llap.cache.BuddyAllocator.java

@VisibleForTesting
public BuddyAllocator(boolean isDirectVal, boolean isMappedVal, int minAllocVal, int maxAllocVal,
        int arenaCount, long maxSizeVal, long defragHeadroom, String mapPath, MemoryManager memoryManager,
        LlapDaemonCacheMetrics metrics, String discardMethod) {
    isDirect = isDirectVal;/*  www.ja v  a2  s . co m*/
    isMapped = isMappedVal;
    minAllocation = minAllocVal;
    maxAllocation = maxAllocVal;
    if (isMapped) {
        try {
            cacheDir = Files.createTempDirectory(FileSystems.getDefault().getPath(mapPath), "llap-", RWX);
        } catch (IOException ioe) {
            // conf validator already checks this, so it will never trigger usually
            throw new AssertionError("Configured mmap directory should be writable", ioe);
        }
    } else {
        cacheDir = null;
    }

    arenaSize = validateAndDetermineArenaSize(arenaCount, maxSizeVal);
    maxSize = validateAndDetermineMaxSize(maxSizeVal);
    memoryManager.updateMaxSize(determineMaxMmSize(defragHeadroom, maxSize));

    minAllocLog2 = 31 - Integer.numberOfLeadingZeros(minAllocation);
    maxAllocLog2 = 31 - Integer.numberOfLeadingZeros(maxAllocation);
    arenaSizeLog2 = 63 - Long.numberOfLeadingZeros(arenaSize);
    maxArenas = (int) (maxSize / arenaSize);
    arenas = new Arena[maxArenas];
    for (int i = 0; i < maxArenas; ++i) {
        arenas[i] = new Arena();
    }
    Arena firstArena = arenas[0];
    firstArena.init(0);
    allocatedArenas.set(1);
    this.memoryManager = memoryManager;
    defragCounters = new AtomicLong[maxAllocLog2 - minAllocLog2 + 1];
    for (int i = 0; i < defragCounters.length; ++i) {
        defragCounters[i] = new AtomicLong(0);
    }
    this.metrics = metrics;
    metrics.incrAllocatedArena();
    boolean isBoth = null == discardMethod || "both".equalsIgnoreCase(discardMethod);
    doUseFreeListDiscard = isBoth || "freelist".equalsIgnoreCase(discardMethod);
    doUseBruteDiscard = isBoth || "brute".equalsIgnoreCase(discardMethod);
    ctxPool = new FixedSizedObjectPool<DiscardContext>(32,
            new FixedSizedObjectPool.PoolObjectHelper<DiscardContext>() {
                @Override
                public DiscardContext create() {
                    return new DiscardContext();
                }

                @Override
                public void resetBeforeOffer(DiscardContext t) {
                }
            });
}

From source file:org.apache.hadoop.hbase.wal.TestFSHLogProvider.java

@Test
public void testLogCleaning() throws Exception {
    LOG.info("testLogCleaning");
    final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testLogCleaning"))
            .addFamily(new HColumnDescriptor("row"));
    final HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf("testLogCleaning2"))
            .addFamily(new HColumnDescriptor("row"));
    NavigableMap<byte[], Integer> scopes1 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes1.put(fam, 0);/*ww w  .  jav a2s .  co m*/
    }
    NavigableMap<byte[], Integer> scopes2 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd2.getFamiliesKeys()) {
        scopes2.put(fam, 0);
    }
    final Configuration localConf = new Configuration(conf);
    localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName());
    final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
    final AtomicLong sequenceId = new AtomicLong(1);
    try {
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(htd2.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        // we want to mix edits from regions, so pick our own identifier.
        final WAL log = wals.getWAL(UNSPECIFIED_REGION, null);

        // Add a single edit and make sure that rolling won't remove the file
        // Before HBASE-3198 it used to delete it
        addEdits(log, hri, htd, 1, scopes1);
        log.rollWriter();
        assertEquals(1, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // See if there's anything wrong with more than 1 edit
        addEdits(log, hri, htd, 2, scopes1);
        log.rollWriter();
        assertEquals(2, FSHLogProvider.getNumRolledLogFiles(log));

        // Now mix edits from 2 regions, still no flushing
        addEdits(log, hri, htd, 1, scopes1);
        addEdits(log, hri2, htd2, 1, scopes2);
        addEdits(log, hri, htd, 1, scopes1);
        addEdits(log, hri2, htd2, 1, scopes2);
        log.rollWriter();
        assertEquals(3, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // Flush the first region, we expect to see the first two files getting
        // archived. We need to append something or writer won't be rolled.
        addEdits(log, hri2, htd2, 1, scopes2);
        log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
        log.completeCacheFlush(hri.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(2, AbstractFSWALProvider.getNumRolledLogFiles(log));

        // Flush the second region, which removes all the remaining output files
        // since the oldest was completely flushed and the two others only contain
        // flush information
        addEdits(log, hri2, htd2, 1, scopes2);
        log.startCacheFlush(hri2.getEncodedNameAsBytes(), htd2.getFamiliesKeys());
        log.completeCacheFlush(hri2.getEncodedNameAsBytes());
        log.rollWriter();
        assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(log));
    } finally {
        if (wals != null) {
            wals.close();
        }
    }
}

From source file:org.apache.hadoop.hbase.coprocessor.TestWALObserver.java

/**
 * Test WAL replay behavior with WALObserver.
 *//*from   w w  w.j a va 2 s  .  c  o m*/
@Test
public void testWALCoprocessorReplay() throws Exception {
    // WAL replay is handled at HRegion::replayRecoveredEdits(), which is
    // ultimately called by HRegion::initialize()
    TableName tableName = TableName.valueOf("testWALCoprocessorReplay");
    final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
    final AtomicLong sequenceId = new AtomicLong(0);
    // final HRegionInfo hri =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    // final HRegionInfo hri1 =
    // createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
    final HRegionInfo hri = new HRegionInfo(tableName, null, null);

    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    fs.mkdirs(new Path(basedir, hri.getEncodedName()));

    final Configuration newConf = HBaseConfiguration.create(this.conf);

    // HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
    HLog wal = createWAL(this.conf);
    // Put p = creatPutWith2Families(TEST_ROW);
    WALEdit edit = new WALEdit();
    long now = EnvironmentEdgeManager.currentTimeMillis();
    // addFamilyMapToWALEdit(p.getFamilyMap(), edit);
    final int countPerFamily = 1000;
    // for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        // addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        // EnvironmentEdgeManager.getDelegate(), wal);
        addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
                EnvironmentEdgeManager.getDelegate(), wal, htd, sequenceId);
    }
    wal.append(hri, tableName, edit, now, htd, sequenceId);
    // sync to fs.
    wal.sync();

    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            Path p = runWALSplit(newConf);
            LOG.info("WALSplit path == " + p);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            HLog wal2 = createWAL(newConf);
            HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2,
                    TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
            long seqid2 = region.getOpenSeqNum();

            SampleRegionWALObserver cp2 = (SampleRegionWALObserver) region.getCoprocessorHost()
                    .findCoprocessor(SampleRegionWALObserver.class.getName());
            // TODO: asserting here is problematic.
            assertNotNull(cp2);
            assertTrue(cp2.isPreWALRestoreCalled());
            assertTrue(cp2.isPostWALRestoreCalled());
            region.close();
            wal2.closeAndDelete();
            return null;
        }
    });
}

From source file:org.apache.nifi.processors.kite.ConvertCSVToAvro.java

@Override
public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile incomingCSV = session.get();
    if (incomingCSV == null) {
        return;//from w w w. ja v a  2s  . c  o m
    }

    CSVProperties props = new CSVProperties.Builder()
            .charset(context.getProperty(CHARSET).evaluateAttributeExpressions(incomingCSV).getValue())
            .delimiter(context.getProperty(DELIMITER).evaluateAttributeExpressions(incomingCSV).getValue())
            .quote(context.getProperty(QUOTE).evaluateAttributeExpressions(incomingCSV).getValue())
            .escape(context.getProperty(ESCAPE).evaluateAttributeExpressions(incomingCSV).getValue())
            .hasHeader(context.getProperty(HAS_HEADER).evaluateAttributeExpressions(incomingCSV).asBoolean())
            .linesToSkip(
                    context.getProperty(LINES_TO_SKIP).evaluateAttributeExpressions(incomingCSV).asInteger())
            .build();

    String schemaProperty = context.getProperty(SCHEMA).evaluateAttributeExpressions(incomingCSV).getValue();
    final Schema schema;
    try {
        schema = getSchema(schemaProperty, DefaultConfiguration.get());
    } catch (SchemaNotFoundException e) {
        getLogger().error("Cannot find schema: " + schemaProperty);
        session.transfer(incomingCSV, FAILURE);
        return;
    }

    try (final DataFileWriter<Record> writer = new DataFileWriter<>(
            AvroUtil.newDatumWriter(schema, Record.class))) {
        writer.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue()));

        try {
            final AtomicLong written = new AtomicLong(0L);
            final FailureTracker failures = new FailureTracker();

            FlowFile badRecords = session.clone(incomingCSV);
            FlowFile outgoingAvro = session.write(incomingCSV, new StreamCallback() {
                @Override
                public void process(InputStream in, OutputStream out) throws IOException {
                    try (CSVFileReader<Record> reader = new CSVFileReader<>(in, props, schema, Record.class)) {
                        reader.initialize();
                        try (DataFileWriter<Record> w = writer.create(schema, out)) {
                            while (reader.hasNext()) {
                                try {
                                    Record record = reader.next();
                                    w.append(record);
                                    written.incrementAndGet();
                                } catch (DatasetRecordException e) {
                                    failures.add(e);
                                }
                            }
                        }
                    }
                }
            });

            long errors = failures.count();

            session.adjustCounter("Converted records", written.get(),
                    false /* update only if file transfer is successful */);
            session.adjustCounter("Conversion errors", errors,
                    false /* update only if file transfer is successful */);

            if (written.get() > 0L) {
                session.transfer(outgoingAvro, SUCCESS);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors + written.get() });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                    session.transfer(badRecords, INCOMPATIBLE);
                } else {
                    session.remove(badRecords);
                }

            } else {
                session.remove(outgoingAvro);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                } else {
                    badRecords = session.putAttribute(badRecords, "errors", "No incoming records");
                }

                session.transfer(badRecords, FAILURE);
            }

        } catch (ProcessException | DatasetIOException e) {
            getLogger().error("Failed reading or writing", e);
            session.transfer(incomingCSV, FAILURE);
        } catch (DatasetException e) {
            getLogger().error("Failed to read FlowFile", e);
            session.transfer(incomingCSV, FAILURE);
        }
    } catch (final IOException ioe) {
        throw new RuntimeException("Unable to close Avro Writer", ioe);
    }
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueDequeueAckRequests(final PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterDequeueRequest request = new PinLaterDequeueRequest();
        request.setQueueName(queueName);
        request.setLimit(batchSize);//from ww w . j a  v a  2 s  .  c o  m
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.dequeueJobs(REQUEST_CONTEXT, request)
                .flatMap(new Function<PinLaterDequeueResponse, Future<Void>>() {
                    @Override
                    public Future<Void> apply(PinLaterDequeueResponse response) {
                        if (response.getJobsSize() == 0) {
                            return Future.Void();
                        }

                        PinLaterJobAckRequest jobAckRequest = new PinLaterJobAckRequest(queueName);
                        for (String job : response.getJobs().keySet()) {
                            if (random.nextInt(100) < dequeueSuccessPercent) {
                                jobAckRequest.addToJobsSucceeded(new PinLaterJobAckInfo(job));
                            } else {
                                jobAckRequest.addToJobsFailed(new PinLaterJobAckInfo(job));
                            }
                        }
                        return iface.ackDequeuedJobs(REQUEST_CONTEXT, jobAckRequest);
                    }
                }).respond(new Function<Try<Void>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<Void> voidTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (voidTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) voidTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Dequeue/ack queries issued: " + queriesIssued);
}

From source file:com.buaa.cfs.nfs3.OpenFileCtx.java

OpenFileCtx(DataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client,
        IdMappingServiceProvider iug, boolean aixCompatMode, NfsConfiguration config) {
    this.fos = fos;
    this.latestAttr = latestAttr;
    this.aixCompatMode = aixCompatMode;
    // We use the ReverseComparatorOnMin as the comparator of the map. In this
    // way, we first dump the data with larger offset. In the meanwhile, we
    // retrieve the last element to write back to HDFS.
    pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(OffsetRange.ReverseComparatorOnMin);

    pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();

    updateLastAccessTime();/*from  ww w  .j a  va 2 s . c o m*/
    activeState = true;
    asyncStatus = false;
    asyncWriteBackStartOffset = 0;
    dumpOut = null;
    raf = null;
    nonSequentialWriteInMemory = new AtomicLong(0);

    this.dumpFilePath = dumpFilePath;
    enabledDump = dumpFilePath != null;
    nextOffset = new AtomicLong();
    nextOffset.set(latestAttr.getSize());
    //        try {
    //            assert (nextOffset.get() == this.fos.getPos());
    //        } catch (IOException e) {
    //        }
    dumpThread = null;
    this.client = client;
    this.iug = iug;
    this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD,
            NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT);
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void getByListBlocking(List<String> ids, AsyncResultHandler<List<T>> resultHandler) {
    if (ids == null) {
        resultHandler.handle(Future.failedFuture(new IllegalArgumentException("List of ids can't be null.")));
        return;/* w  w w.  jav  a2  s .c  o  m*/
    } else if (ids.isEmpty()) {
        resultHandler.handle(Future.succeededFuture(Collections.emptyList()));
        return;
    }
    AtomicLong c = new AtomicLong(0);
    ArrayList<T> l = new ArrayList<>(ids.size());
    IntStream.range(0, ids.size()).forEach(i -> l.add(null));
    ids.stream().forEach(e -> {
        getBlocking(e, r -> {
            l.set(ids.indexOf(e), r.result());
            if (c.incrementAndGet() == ids.size()) {
                resultHandler.handle(Future.succeededFuture(
                        l.stream().filter(s -> s != null).collect(Collectors.toCollection(ArrayList::new))));
            }
        });
    });
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.  Ensures we do not
 * lose edits./*from ww  w .  j av a 2s.c  o m*/
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    Thread zombie = new ZombieLastLogWriterRegionServer(this.conf, counter, stop, region);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                FileSystem fs = FileSystem.get(conf2);
                int expectedFiles = fs.listStatus(HLOGDIR).length;
                HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf2);
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals(expectedFiles, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countHLog(logfile, fs, conf2);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. Zombie could write "
                        + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 *
 * Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start {numThreads} worker threads to fetch queries from the queue and send them.
 * We start with the start QPS, and keep adding delta QPS to the start QPS during the test. The main thread is
 * responsible for collecting the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.//from w w w .j a  v  a2s  .  c  om
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS
 * @param deltaQPS delta QPS
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numThreads,
        double startQPS, double deltaQPS) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int timePerTargetQPSMillis = 60000;
    final int queueLengthThreshold = Math.max(20, (int) startQPS);

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }
    final int numQueries = queries.size();

    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    double currentQPS = startQPS;
    int intervalMillis = (int) (MILLIS_PER_SECOND / currentQPS);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                while (true) {
                    String query = queryQueue.poll();
                    if (query == null) {
                        try {
                            Thread.sleep(1);
                            continue;
                        } catch (InterruptedException e) {
                            LOGGER.error("Interrupted.", e);
                            return;
                        }
                    }
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(System.currentTimeMillis() - startTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
            }
        });
    }

    LOGGER.info("Start with QPS: {}, delta QPS: {}", startQPS, deltaQPS);
    while (true) {
        long startTime = System.currentTimeMillis();
        while (System.currentTimeMillis() - startTime <= timePerTargetQPSMillis) {
            if (queryQueue.size() > queueLengthThreshold) {
                executorService.shutdownNow();
                throw new RuntimeException("Cannot achieve target QPS of: " + currentQPS);
            }
            queryQueue.add(queries.get(random.nextInt(numQueries)));
            Thread.sleep(intervalMillis);
        }
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.getAndSet(0);
        double avgResponseTime = ((double) totalResponseTime.getAndSet(0)) / count;
        LOGGER.info("Target QPS: {}, Interval: {}ms, Actual QPS: {}, Avg Response Time: {}ms", currentQPS,
                intervalMillis, count / timePassedSeconds, avgResponseTime);

        // Find a new interval
        int newIntervalMillis;
        do {
            currentQPS += deltaQPS;
            newIntervalMillis = (int) (MILLIS_PER_SECOND / currentQPS);
        } while (newIntervalMillis == intervalMillis);
        intervalMillis = newIntervalMillis;
    }
}