Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hbase.client.TestAdmin.java

void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize)
        throws Exception {
    TableName tableName = TableName.valueOf("testForceSplit");
    StringBuilder sb = new StringBuilder();
    // Add tail to String so can see better in logs where a test is running.
    for (int i = 0; i < rowCounts.length; i++) {
        sb.append("_").append(Integer.toString(rowCounts[i]));
    }/*from   ww  w  .  j av a  2 s . c o m*/
    assertFalse(admin.tableExists(tableName));
    final HTable table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize);

    int rowCount = 0;
    byte[] q = new byte[0];

    // insert rows into column families. The number of rows that have values
    // in a specific column family is decided by rowCounts[familyIndex]
    for (int index = 0; index < familyNames.length; index++) {
        ArrayList<Put> puts = new ArrayList<Put>(rowCounts[index]);
        for (int i = 0; i < rowCounts[index]; i++) {
            byte[] k = Bytes.toBytes(i);
            Put put = new Put(k);
            put.add(familyNames[index], q, k);
            puts.add(put);
        }
        table.put(puts);

        if (rowCount < rowCounts[index]) {
            rowCount = rowCounts[index];
        }
    }

    // get the initial layout (should just be one region)
    Map<HRegionInfo, ServerName> m = table.getRegionLocations();
    LOG.info("Initial regions (" + m.size() + "): " + m);
    assertTrue(m.size() == 1);

    // Verify row count
    Scan scan = new Scan();
    ResultScanner scanner = table.getScanner(scan);
    int rows = 0;
    for (@SuppressWarnings("unused")
    Result result : scanner) {
        rows++;
    }
    scanner.close();
    assertEquals(rowCount, rows);

    // Have an outstanding scan going on to make sure we can scan over splits.
    scan = new Scan();
    scanner = table.getScanner(scan);
    // Scan first row so we are into first region before split happens.
    scanner.next();

    // Split the table
    this.admin.split(tableName.getName(), splitPoint);

    final AtomicInteger count = new AtomicInteger(0);
    Thread t = new Thread("CheckForSplit") {
        public void run() {
            for (int i = 0; i < 45; i++) {
                try {
                    sleep(1000);
                } catch (InterruptedException e) {
                    continue;
                }
                // check again    table = new HTable(conf, tableName);
                Map<HRegionInfo, ServerName> regions = null;
                try {
                    regions = table.getRegionLocations();
                } catch (IOException e) {
                    e.printStackTrace();
                }
                if (regions == null)
                    continue;
                count.set(regions.size());
                if (count.get() >= 2) {
                    LOG.info("Found: " + regions);
                    break;
                }
                LOG.debug("Cycle waiting on split");
            }
            LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
        }
    };
    t.setPriority(Thread.NORM_PRIORITY - 2);
    t.start();
    t.join();

    // Verify row count
    rows = 1; // We counted one row above.
    for (@SuppressWarnings("unused")
    Result result : scanner) {
        rows++;
        if (rows > rowCount) {
            scanner.close();
            assertTrue("Scanned more than expected (" + rowCount + ")", false);
        }
    }
    scanner.close();
    assertEquals(rowCount, rows);

    Map<HRegionInfo, ServerName> regions = null;
    try {
        regions = table.getRegionLocations();
    } catch (IOException e) {
        e.printStackTrace();
    }
    assertEquals(2, regions.size());
    Set<HRegionInfo> hRegionInfos = regions.keySet();
    HRegionInfo[] r = hRegionInfos.toArray(new HRegionInfo[hRegionInfos.size()]);
    if (splitPoint != null) {
        // make sure the split point matches our explicit configuration
        assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[0].getEndKey()));
        assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[1].getStartKey()));
        LOG.debug("Properly split on " + Bytes.toString(splitPoint));
    } else {
        if (familyNames.length > 1) {
            int splitKey = Bytes.toInt(r[0].getEndKey());
            // check if splitKey is based on the largest column family
            // in terms of it store size
            int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey);
            LOG.debug(
                    "SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + r[0]);
            for (int index = 0; index < familyNames.length; index++) {
                int delta = Math.abs(rowCounts[index] / 2 - splitKey);
                if (delta < deltaForLargestFamily) {
                    assertTrue(
                            "Delta " + delta + " for family " + index
                                    + " should be at least deltaForLargestFamily " + deltaForLargestFamily,
                            false);
                }
            }
        }
    }
    TEST_UTIL.deleteTable(tableName);
    table.close();
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testStartReadAllJobUsingStreamedBehavior() throws IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {// ww w . ja  v  a2s.c o  m
        final AtomicInteger numFailuresRecorded = new AtomicInteger(0);

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = HELPERS.startReadAllJobUsingStreamedBehavior(BUCKET_NAME);
        readJob.attachFailureEventListener(failureEventListener);
        readJob.transfer(new FileObjectGetter(tempDirectory));

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        final List<String> filesWeExpectToBeInTempDirectory = Arrays.asList("beowulf.txt", "lesmis.txt",
                "lesmis-copies.txt", "GreatExpectations.txt");

        for (final File fileInTempDirectory : filesInTempDirectory) {
            assertTrue(filesWeExpectToBeInTempDirectory.contains(fileInTempDirectory.getName()));
        }

        assertEquals(0, numFailuresRecorded.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testStartReadAllJobUsingRandomAccessBehavior() throws IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/*  ww w.j a v  a2 s . com*/
        final AtomicInteger numFailuresRecorded = new AtomicInteger(0);

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = HELPERS.startReadAllJobUsingRandomAccessBehavior(BUCKET_NAME);
        readJob.attachFailureEventListener(failureEventListener);
        readJob.transfer(new FileObjectGetter(tempDirectory));

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        final List<String> filesWeExpectToBeInTempDirectory = Arrays.asList("beowulf.txt", "lesmis.txt",
                "lesmis-copies.txt", "GreatExpectations.txt");

        for (final File fileInTempDirectory : filesInTempDirectory) {
            assertTrue(filesWeExpectToBeInTempDirectory.contains(fileInTempDirectory.getName()));
        }

        assertEquals(0, numFailuresRecorded.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:languages.TabFile.java

/**
 * Returns the value that has the highest {@link #stringCorrelation} with
 * the given {@link String}./*www . j  a  va  2s  . c o  m*/
 *
 * @param column       The column to look for values.
 * @param value        The {@link String} to be compared. Only values with equal
 *                     length as {@code value} are returned due to the way
 *                     {@link #stringCorrelation} works.
 * @param ignoredWords Words to be filtered out before doing the comparison.
 * @return The value in the specified column that has the highest
 * correlation.
 */
public String getValueWithHighestCorrelation(int column, String value, List<String> ignoredWords) {
    ArrayList<Thread> threads = new ArrayList<>();
    AtomicInteger currentIndex = new AtomicInteger(0);
    AtomicInteger maxIndex = new AtomicInteger(-1);
    AtomicDouble maxCorr = new AtomicDouble(-1);

    List<String> ignoredWordsCopy = new ArrayList<>(ignoredWords);

    // split all entries up that contain a space
    List<String> stringsToSplit = new ArrayList<>();

    // Find words to split
    for (String word : ignoredWordsCopy) {
        if (word.contains(" ")) {
            stringsToSplit.add(word);
        }
    }

    // Actually to the splitting
    for (String word : stringsToSplit) {
        ignoredWordsCopy.remove(word);
        ignoredWordsCopy.addAll(Arrays.asList(word.split(" ")));
    }

    for (int i = 0; i < AppConfig.getParallelThreadCount(); i++) {
        threads.add(new Thread(() -> {
            int index = currentIndex.getAndIncrement();
            while (index < getRowCount()) {
                if (value.length() == getValueAt(index, column).length()
                        && !ignoredWordsCopy.contains(getValueAt(index, column))
                        && !HangmanSolver.currentWordContainsWrongChar(getValueAt(index, column))) {
                    double corr = stringCorrelation(value, getValueAt(index, column));

                    if (corr > maxCorr.get()) {
                        maxCorr.set(corr);
                        maxIndex.set(index);
                    }
                }

                // Grab the next index
                index = currentIndex.getAndIncrement();
            }
        }));
        threads.get(i).start();
    }

    // Wait for threads
    for (int i = 0; i < AppConfig.getParallelThreadCount(); i++) {
        try {
            threads.get(i).join();
        } catch (InterruptedException e) {
            FOKLogger.log(TabFile.class.getName(), Level.SEVERE, "An error occurred", e);
        }
    }

    return getValueAt(maxIndex.get(), column);
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testGetJobUserSuppliedTransferRetryDecorator() throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {//ww  w  .  j  a va  2 s  .c  om
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final AtomicInteger numTimesTransferCalled = new AtomicInteger(0);

        final TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder()
                .withDs3Client(client).withMasterObjectList(masterObjectList)
                .withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withTransferRetryDecorator(new UserSuppliedTransferRetryDecorator(new Monitorable() {
                    @Override
                    public void monitor() {
                        numTimesTransferCalled.getAndIncrement();
                    }
                }));

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(1, numTimesTransferCalled.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java

@Test
public void testLockConsistency() throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();

    conf.setLedgerDirNames(ledgerDirs);/*w w w .ja  v a2s.c om*/
    conf.setEntryLogFilePreAllocationEnabled(false);
    conf.setEntryLogPerLedgerEnabled(true);
    conf.setMaximumNumberOfActiveEntryLogs(5);

    CountDownLatch latch = new CountDownLatch(1);
    AtomicInteger count = new AtomicInteger(0);

    /*
     * Inject wait operation in 'getWritableLedgerDirsForNewLog' method of
     * ledgerDirsManager. getWritableLedgerDirsForNewLog will be called when
     * entryLogManager.createNewLog is called.
     */
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())) {
        /*
         * getWritableLedgerDirsForNewLog is called for the first time, it
         * will await on 'latch' latch before calling super
         * getWritableLedgerDirsForNewLog.
         */
        @Override
        public List<File> getWritableLedgerDirsForNewLog() throws NoWritableLedgerDirException {
            if (count.incrementAndGet() == 1) {
                try {
                    latch.await();
                } catch (InterruptedException e) {
                    LOG.error("Got InterruptedException while awaiting for latch countdown", e);
                }
            }
            return super.getWritableLedgerDirsForNewLog();
        }
    };

    EntryLogger el = new EntryLogger(conf, ledgerDirsManager);
    EntryLogManagerForEntryLogPerLedger entryLogManager = (EntryLogManagerForEntryLogPerLedger) el
            .getEntryLogManager();

    long firstLedgerId = 100L;
    AtomicBoolean newLogCreated = new AtomicBoolean(false);

    Assert.assertFalse("EntryLogManager cacheMap should not contain entry for firstLedgerId",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));
    Assert.assertEquals("Value of the count should be 0", 0, count.get());
    /*
     * In a new thread, create newlog for 'firstLedgerId' and then set
     * 'newLogCreated' to true. Since this is the first createNewLog call,
     * it is going to be blocked untill latch is countdowned to 0.
     */
    new Thread() {
        @Override
        public void run() {
            try {
                entryLogManager.createNewLog(firstLedgerId);
                newLogCreated.set(true);
            } catch (IOException e) {
                LOG.error("Got IOException while creating new log", e);
            }
        }
    }.start();

    /*
     * Wait until entry for 'firstLedgerId' is created in cacheMap. It will
     * be created because in the other thread createNewLog is called.
     */
    while (!entryLogManager.getCacheAsMap().containsKey(firstLedgerId)) {
        Thread.sleep(200);
    }
    Lock firstLedgersLock = entryLogManager.getLock(firstLedgerId);

    /*
     * since 'latch' is not counteddown, newlog should not be created even
     * after waitign for 2 secs.
     */
    Thread.sleep(2000);
    Assert.assertFalse("New log shouldn't have created", newLogCreated.get());

    /*
     * create MaximumNumberOfActiveEntryLogs of entrylogs and do cache
     * cleanup, so that the earliest entry from cache will be removed.
     */
    for (int i = 1; i <= conf.getMaximumNumberOfActiveEntryLogs(); i++) {
        entryLogManager.createNewLog(firstLedgerId + i);
    }
    entryLogManager.doEntryLogMapCleanup();
    Assert.assertFalse("Entry for that ledger shouldn't be there",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));

    /*
     * now countdown the latch, so that the other thread can make progress
     * with createNewLog and since this entry is evicted from cache,
     * entrylog of the newly created entrylog will be added to
     * rotatedentrylogs.
     */
    latch.countDown();
    while (!newLogCreated.get()) {
        Thread.sleep(200);
    }
    while (entryLogManager.getRotatedLogChannels().size() < 1) {
        Thread.sleep(200);
    }

    /*
     * Entry for 'firstLedgerId' is removed from cache, but even in this
     * case when we get lock for the 'firstLedgerId' it should be the same
     * as we got earlier.
     */
    Lock lockForThatLedgerAfterRemoval = entryLogManager.getLock(firstLedgerId);
    Assert.assertEquals("For a given ledger lock should be the same before and after removal", firstLedgersLock,
            lockForThatLedgerAfterRemoval);
}

From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java

public void testRandom() throws Exception {
    final int[] ints = createTree();
    final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir,
            new IntSerializer(), new LongSerializer(), false);
    final int max = ints[ints.length - 1];
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override/* w w w  .  j  av  a2 s  . c  om*/
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < treeSize; i++) {
                        int rand = r.nextInt(max + 1);
                        int insertionindex = Arrays.binarySearch(ints, rand);
                        final Iterator<Generation.Entry<Integer, Long>> iterator = reader.iterator(rand, true);
                        try {
                            assertTrue(iterator.hasNext());
                        } catch (Throwable t) {
                            System.err.println("rand: " + rand);
                            throw Throwables.propagate(t);
                        }
                        Generation.Entry<Integer, Long> entry = iterator.next();
                        assertTrue("entry: " + entry + " rand: " + rand, entry.getKey() >= rand);
                        assertTrue(entry.getKey().longValue() == entry.getValue());
                        if (insertionindex >= 0) {
                            assertTrue(rand == ints[insertionindex]);
                            assertTrue(entry.getKey() == rand);
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result.getValue() == rand);
                        } else {
                            if (insertionindex != -1)
                                assertTrue(ints[(~insertionindex) - 1] < rand);
                            assertTrue(
                                    "insertionindex: " + insertionindex + " entry: " + entry
                                            + " ints[!insertionindex]" + ints[~insertionindex],
                                    ints[~insertionindex] == entry.getKey());
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result == null);
                        }
                    }
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    reader.close();
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/* w w  w .j  a  v  a 2 s. c  o m*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:com.twitter.distributedlog.BKLogHandler.java

/**
 * Get a list of all segments in the journal.
 *//*from w w  w  . j a v a  2s . c  om*/
protected List<LogSegmentMetadata> forceGetLedgerList(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, boolean throwOnEmpty) throws IOException {
    final List<LogSegmentMetadata> ledgers = new ArrayList<LogSegmentMetadata>();
    final AtomicInteger result = new AtomicInteger(-1);
    final CountDownLatch latch = new CountDownLatch(1);
    Stopwatch stopwatch = Stopwatch.createStarted();
    asyncGetLedgerListInternal(comparator, segmentFilter, null,
            new GenericCallback<List<LogSegmentMetadata>>() {
                @Override
                public void operationComplete(int rc, List<LogSegmentMetadata> logSegmentMetadatas) {
                    result.set(rc);
                    if (KeeperException.Code.OK.intValue() == rc) {
                        ledgers.addAll(logSegmentMetadatas);
                    } else {
                        LOG.error("Failed to get ledger list for {} : with error {}", getFullyQualifiedName(),
                                rc);
                    }
                    latch.countDown();
                }
            }, new AtomicInteger(conf.getZKNumRetries()), new AtomicLong(conf.getZKRetryBackoffStartMillis()));
    try {
        latch.await();
    } catch (InterruptedException e) {
        forceGetListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        throw new DLInterruptedException(
                "Interrupted on reading ledger list from zkfor " + getFullyQualifiedName(), e);
    }
    long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);

    KeeperException.Code rc = KeeperException.Code.get(result.get());
    if (rc == KeeperException.Code.OK) {
        forceGetListStat.registerSuccessfulEvent(elapsedMicros);
    } else {
        forceGetListStat.registerFailedEvent(elapsedMicros);
        if (KeeperException.Code.NONODE == rc) {
            throw new LogNotFoundException("Log " + getFullyQualifiedName() + " is not found");
        } else {
            throw new IOException("ZK Exception " + rc + " reading ledger list for " + getFullyQualifiedName());
        }
    }

    if (throwOnEmpty && ledgers.isEmpty()) {
        throw new LogEmptyException("Log " + getFullyQualifiedName() + " is empty");
    }
    return ledgers;
}