Example usage for com.google.common.collect Iterators peekingIterator

List of usage examples for com.google.common.collect Iterators peekingIterator

Introduction

In this page you can find the example usage for com.google.common.collect Iterators peekingIterator.

Prototype

@Deprecated
public static <T> PeekingIterator<T> peekingIterator(PeekingIterator<T> iterator) 

Source Link

Document

Simply returns its argument.

Usage

From source file:org.geogit.api.plumbing.diff.DiffTreeVisitor.java

/**
 * Traverse and compare the {@link RevTree#children() children} nodes of two leaf trees, calling
 * {@link #node(Consumer, Node, Node)} for each diff.
 *///from   w  w  w  . java  2 s .c om
private void traverseLeafLeaf(Consumer consumer, Iterator<Node> leftc, Iterator<Node> rightc) {
    PeekingIterator<Node> li = Iterators.peekingIterator(leftc);
    PeekingIterator<Node> ri = Iterators.peekingIterator(rightc);

    while (li.hasNext() && ri.hasNext()) {
        Node lpeek = li.peek();
        Node rpeek = ri.peek();
        int order = ORDER.compare(lpeek, rpeek);
        if (order < 0) {
            node(consumer, li.next(), null);// removal
        } else if (order == 0) {// change
            // same feature at both sides of the traversal, consume them and check if its
            // changed it or not
            Node l = li.next();
            Node r = ri.next();
            if (!l.equals(r)) {
                node(consumer, l, r);
            }
        } else {
            node(consumer, null, ri.next());// addition
        }
    }

    checkState(!li.hasNext() || !ri.hasNext(),
            "either the left or the right iterator should have been fully consumed");

    // right fully consumed, any remaining node in left is a removal
    while (li.hasNext()) {
        node(consumer, li.next(), null);
    }

    // left fully consumed, any remaining node in right is an add
    while (ri.hasNext()) {
        node(consumer, null, ri.next());
    }
}

From source file:org.kiji.schema.impl.cassandra.CassandraKijiPartition.java

/**
 * Convert a set of (start-token, host) pairs into a set of (token-range, host) pairs.
 *
 * Package private for testing./*from ww  w .j av  a2  s. c  om*/
 *
 * @param startTokens The set of start tokens with hosts.
 * @return The token corresponding token ranges.
 */
static Map<Range<Long>, InetAddress> getTokenRanges(final SortedMap<Long, InetAddress> startTokens) {

    ImmutableMap.Builder<Range<Long>, InetAddress> tokenRangesBldr = ImmutableMap.builder();

    final PeekingIterator<Entry<Long, InetAddress>> startTokensItr = Iterators
            .peekingIterator(startTokens.entrySet().iterator());

    // Add a range for [-, firstStartToken) owned by the final key (the wrap-around range).
    // For more information on Casandra VNode token ranges:
    //    http://www.datastax.com/dev/blog/virtual-nodes-in-cassandra-1-2
    tokenRangesBldr.put(Range.lessThan(startTokens.firstKey()), startTokens.get(startTokens.lastKey()));

    while (startTokensItr.hasNext()) {
        Entry<Long, InetAddress> startToken = startTokensItr.next();
        if (!startTokensItr.hasNext()) {
            // The final start token
            // Add a range for [lastStartToken, )
            tokenRangesBldr.put(Range.atLeast(startToken.getKey()), startToken.getValue());
        } else {
            // Add a range for [thisStartToken, nextStartToken)
            tokenRangesBldr.put(Range.closedOpen(startToken.getKey(), startTokensItr.peek().getKey()),
                    startToken.getValue());
        }
    }

    final Map<Range<Long>, InetAddress> tokenRanges = tokenRangesBldr.build();

    // Check that the returned ranges are coherent; most importantly that all possible tokens fall
    // within the returned range set.

    if (startTokens.size() + 1 != tokenRanges.size()) {
        throw new InternalKijiError(
                String.format("Unexpected number of token ranges. start-tokens: %s, token-ranges: %s.",
                        startTokens.size(), tokenRanges.size()));
    }

    final RangeSet<Long> ranges = TreeRangeSet.create();
    for (Range<Long> tokenRange : tokenRanges.keySet()) {
        ranges.add(tokenRange);
    }

    if (!ranges.encloses(Range.closed(Long.MIN_VALUE, Long.MAX_VALUE))) {
        throw new InternalKijiError("Token range does not include all possible tokens.");
    }

    return tokenRanges;
}

From source file:com.linkedin.pinot.core.query.aggregation.function.quantile.digest.QuantileDigest.java

/**
 * Gets the values at the specified quantiles +/- maxError. The list of quantiles must be sorted
 * in increasing order, and each value must be in the range [0, 1]
 *//*from  w w  w  . j  ava2  s . com*/
public List<Long> getQuantiles(List<Double> quantiles) {
    checkArgument(Ordering.natural().isOrdered(quantiles), "quantiles must be sorted in increasing order");
    for (double quantile : quantiles) {
        checkArgument(quantile >= 0 && quantile <= 1, "quantile must be between [0,1]");
    }

    final ImmutableList.Builder<Long> builder = ImmutableList.builder();
    final PeekingIterator<Double> iterator = Iterators.peekingIterator(quantiles.iterator());

    postOrderTraversal(root, new Callback() {
        private double sum = 0;

        @Override
        public boolean process(Node node) {
            sum += node.weightedCount;

            while (iterator.hasNext() && sum > iterator.peek() * weightedCount) {
                iterator.next();

                // we know the max value ever seen, so cap the percentile to provide better error
                // bounds in this case
                long value = Math.min(node.getUpperBound(), max);

                builder.add(value);
            }

            return iterator.hasNext();
        }
    });

    // we finished the traversal without consuming all quantiles. This means the remaining quantiles
    // correspond to the max known value
    while (iterator.hasNext()) {
        builder.add(max);
        iterator.next();
    }

    return builder.build();
}

From source file:com.cloudera.crash.Main.java

@Override
public int run(String[] args) throws Exception {
    // generic options parsing...
    HashMultimap<String, String> options = HashMultimap.create();
    List<String> targets = Lists.newArrayList();
    PeekingIterator<String> strings = Iterators.peekingIterator(Iterators.forArray(args));
    while (strings.hasNext()) {
        final String arg = strings.next();
        if (arg.startsWith("--")) {
            final String option = arg.substring(2);
            if (FLAGS.contains(option) || strings.peek().startsWith("--")) {
                options.put(option, "true");
            } else {
                options.putAll(option, Splitter.on(',').split(strings.next()));
            }/*ww w . j  a  v a2 s .c  o m*/
        } else {
            targets.add(arg);
        }
    }

    // add directories to the distributed cache
    // -libjars doesn't seem to work with vendor/
    if (options.containsKey("vendor")) {
        for (String path : options.get("vendor")) {
            File file = new File(path);
            if (file.isDirectory()) {
                DistCache.addJarDirToDistributedCache(getConf(), file);
            } else if (file.isFile()) {
                DistCache.addJarToDistributedCache(getConf(), file);
            }
        }
    }

    if (targets.isEmpty()) {
        // TODO: usage
        System.err.println("No script provided!");
        return 1;
    }
    final String scriptName = targets.get(0);

    Script script = new Script(scriptName, Files.toByteArray(new File(scriptName)));
    script.setPipeline(getPipeline());
    script.getAnalytic();

    PipelineResult result = this.run();

    return result.succeeded() ? 0 : 1;
}

From source file:com.google.cloud.genomics.localrepo.vcf.VCFReader.java

/**
 * Read the VCF data and pass the data to the provided {@code Callback}.
 */// w ww .j  a v a  2 s.  c o m
public <X> X read(Callback<X> callback) throws Exception {
    final PeekingIterator<String> iterator = Iterators.peekingIterator(new AbstractIterator<String>() {
        @Override
        protected String computeNext() {
            try {
                String line = in.readLine();
                return null == line ? endOfData() : line;
            } catch (IOException e) {
                throw Throwables.propagate(e);
            }
        }
    });
    return callback.readVcf(parseMetaInfo(iterator), parseHeader(iterator), new FluentIterable<VCFRecord>() {
        @Override
        public Iterator<VCFRecord> iterator() {
            return Iterators.transform(Iterators.filter(iterator, IS_NOT_EMPTY), PARSE_RECORD);
        }
    });
}

From source file:org.kiji.schema.impl.cassandra.CassandraQualifierPager.java

/**
 * Initialize the row iterator.//from   ww  w  .jav a2 s.co m
 */
private void initializeRowIterator() {
    // Issue a paged SELECT statement to get all of the qualifiers for this map family from C*.
    // Get the Cassandra table name for this column family
    String cassandraTableName = KijiManagedCassandraTableName
            .getKijiTableName(mTable.getURI(), mTable.getName()).toString();

    // Get the translated name for the column family.
    String translatedLocalityGroup = null;
    String translatedFamily = null;
    try {
        translatedLocalityGroup = mColumnNameTranslator.toCassandraLocalityGroup(mFamily);
        translatedFamily = mColumnNameTranslator.toCassandraColumnFamily(mFamily);
    } catch (NoSuchColumnException nsce) {
        // TODO: Do something here!
        assert (false);
        return;
    }
    BoundStatement boundStatement;
    // Need to get versions here so that we can filter out versions that don't match the data
    // request.  Sadly, there is no way to put the version range restriction into this query, since
    // we aren't restricting the qualifiers at all.
    String queryString = String.format("SELECT %s, %s from %s WHERE %s=? AND %s=? AND %s=?",
            CQLUtils.QUALIFIER_COL, CQLUtils.VERSION_COL, cassandraTableName, CQLUtils.RAW_KEY_COL,
            CQLUtils.LOCALITY_GROUP_COL, CQLUtils.FAMILY_COL);

    // TODO: Make this code more robust for different kinds of filters.
    KijiColumnFilter columnFilter = mColumnRequest.getFilter();

    CassandraAdmin admin = mTable.getAdmin();

    if (null == columnFilter) {

        PreparedStatement preparedStatement = admin.getPreparedStatement(queryString);
        boundStatement = preparedStatement.bind(CassandraByteUtil.bytesToByteBuffer(mEntityId.getHBaseRowKey()),
                translatedLocalityGroup, translatedFamily);
    } else if (columnFilter instanceof KijiColumnRangeFilter) {
        KijiColumnRangeFilter rangeFilter = (KijiColumnRangeFilter) columnFilter;
        boundStatement = createBoundStatementForFilter(admin, rangeFilter, queryString, translatedLocalityGroup,
                translatedFamily);

    } else {
        throw new UnsupportedOperationException(
                "CassandraQualifierPager supports only column ranger filters, not " + columnFilter.getClass());
    }
    boundStatement.setFetchSize(mColumnRequest.getPageSize());
    mRowIterator = Iterators.peekingIterator(admin.execute(boundStatement).iterator());
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunnerImpl.java

@Override
public SweepResults run(String tableName, int batchSize, @Nullable byte[] startRow) {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkState(!AtlasDbConstants.hiddenTables.contains(tableName));

    if (tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX)) {
        // this happens sometimes; I think it's because some places in the code can
        // start this sweeper without doing the full normally ordered KVSModule startup.
        // I did check and sweep.stats did contain the FQ table name for all of the tables,
        // so it is at least broken in some way that still allows namespaced tables to eventually be swept.
        log.warn("The sweeper should not be run on tables passed through namespace mapping.");
        return SweepResults.EMPTY_SWEEP;
    }//from   w  w  w. j  ava  2s  .  co m
    if (keyValueService.getMetadataForTable(tableName).length == 0) {
        log.warn("The sweeper tried to sweep table '{}', but the table does not exist. Skipping table.",
                tableName);
        return SweepResults.EMPTY_SWEEP;
    }

    // Earliest start timestamp of any currently open transaction, with two caveats:
    // (1) unreadableTimestamps are calculated via wall-clock time, and so may not be correct
    //     under pathological clock conditions
    // (2) immutableTimestamps do not account for locks have timed out after checking their locks;
    //     such a transaction may have a start timestamp less than the immutableTimestamp, and it
    //     could still get successfully committed (its commit timestamp may or may not be less than
    //     the immutableTimestamp
    // Note that this is fine, because we'll either
    // (1) force old readers to abort (if they read a garbage collection sentinel), or
    // (2) force old writers to retry (note that we must roll back any uncommitted transactions that
    //     we encounter
    SweepStrategy sweepStrategy = sweepStrategyManager.get().get(tableName);
    if (sweepStrategy == null) {
        sweepStrategy = SweepStrategy.CONSERVATIVE;
    } else if (sweepStrategy == SweepStrategy.NOTHING) {
        return SweepResults.EMPTY_SWEEP;
    }
    if (startRow == null) {
        startRow = new byte[0];
    }
    RangeRequest rangeRequest = RangeRequest.builder().startRowInclusive(startRow).batchHint(batchSize).build();

    long sweepTimestamp = getSweepTimestamp(sweepStrategy);
    ClosableIterator<RowResult<Value>> valueResults;
    if (sweepStrategy == SweepStrategy.CONSERVATIVE) {
        valueResults = ClosableIterators.wrap(ImmutableList.<RowResult<Value>>of().iterator());
    } else {
        valueResults = keyValueService.getRange(tableName, rangeRequest, sweepTimestamp);
    }

    ClosableIterator<RowResult<Set<Long>>> rowResults = keyValueService.getRangeOfTimestamps(tableName,
            rangeRequest, sweepTimestamp);

    try {
        List<RowResult<Set<Long>>> rowResultTimestamps = ImmutableList
                .copyOf(Iterators.limit(rowResults, batchSize));
        PeekingIterator<RowResult<Value>> peekingValues = Iterators.peekingIterator(valueResults);
        Set<Cell> sentinelsToAdd = Sets.newHashSet();
        Multimap<Cell, Long> rowTimestamps = getTimestampsFromRowResults(rowResultTimestamps, sweepStrategy);
        Multimap<Cell, Long> cellTsPairsToSweep = getCellTsPairsToSweep(rowTimestamps, peekingValues,
                sweepTimestamp, sweepStrategy, sentinelsToAdd);
        sweepCells(tableName, cellTsPairsToSweep, sentinelsToAdd);
        byte[] nextRow = rowResultTimestamps.size() < batchSize ? null
                : RangeRequests.getNextStartRow(false, Iterables.getLast(rowResultTimestamps).getRowName());
        return new SweepResults(nextRow, rowResultTimestamps.size(), cellTsPairsToSweep.size());
    } finally {
        rowResults.close();
        valueResults.close();
    }
}

From source file:org.apache.cassandra.db.view.TableViews.java

/**
 * Given some updates on the base table of this object and the existing values for the rows affected by that update, generates the
 * mutation to be applied to the provided views.
 *
 * @param views the views potentially affected by {@code updates}.
 * @param updates the base table updates being applied.
 * @param existings the existing values for the rows affected by {@code updates}. This is used to decide if a view is
 * obsoleted by the update and should be removed, gather the values for columns that may not be part of the update if
 * a new view entry needs to be created, and compute the minimal updates to be applied if the view entry isn't changed
 * but has simply some updated values. This will be empty for view building as we want to assume anything we'll pass
 * to {@code updates} is new./*www  .ja v  a2 s.c o  m*/
 * @param nowInSec the current time in seconds.
 * @return the mutations to apply to the {@code views}. This can be empty.
 */
public Collection<Mutation> generateViewUpdates(Collection<View> views, UnfilteredRowIterator updates,
        UnfilteredRowIterator existings, int nowInSec) {
    assert updates.metadata().cfId.equals(baseTableMetadata.cfId);

    List<ViewUpdateGenerator> generators = new ArrayList<>(views.size());
    for (View view : views)
        generators.add(new ViewUpdateGenerator(view, updates.partitionKey(), nowInSec));

    DeletionTracker existingsDeletion = new DeletionTracker(existings.partitionLevelDeletion());
    DeletionTracker updatesDeletion = new DeletionTracker(updates.partitionLevelDeletion());

    /*
     * We iterate through the updates and the existing rows in parallel. This allows us to know the consequence
     * on the view of each update.
     */
    PeekingIterator<Unfiltered> existingsIter = Iterators.peekingIterator(existings);
    PeekingIterator<Unfiltered> updatesIter = Iterators.peekingIterator(updates);

    while (existingsIter.hasNext() && updatesIter.hasNext()) {
        Unfiltered existing = existingsIter.peek();
        Unfiltered update = updatesIter.peek();

        Row existingRow;
        Row updateRow;
        int cmp = baseTableMetadata.comparator.compare(update, existing);
        if (cmp < 0) {
            // We have an update where there was nothing before
            if (update.isRangeTombstoneMarker()) {
                updatesDeletion.update(updatesIter.next());
                continue;
            }

            updateRow = ((Row) updatesIter.next()).withRowDeletion(updatesDeletion.currentDeletion());
            existingRow = emptyRow(updateRow.clustering(), existingsDeletion.currentDeletion());
        } else if (cmp > 0) {
            // We have something existing but no update (which will happen either because it's a range tombstone marker in
            // existing, or because we've fetched the existing row due to some partition/range deletion in the updates)
            if (existing.isRangeTombstoneMarker()) {
                existingsDeletion.update(existingsIter.next());
                continue;
            }

            existingRow = ((Row) existingsIter.next()).withRowDeletion(existingsDeletion.currentDeletion());
            updateRow = emptyRow(existingRow.clustering(), updatesDeletion.currentDeletion());

            // The way we build the read command used for existing rows, we should always have updatesDeletion.currentDeletion()
            // that is not live, since we wouldn't have read the existing row otherwise. And we could assert that, but if we ever
            // change the read method so that it can slightly over-read in some case, that would be an easily avoiding bug lurking,
            // so we just handle the case.
            if (updateRow == null)
                continue;
        } else {
            // We're updating a row that had pre-existing data
            if (update.isRangeTombstoneMarker()) {
                assert existing.isRangeTombstoneMarker();
                updatesDeletion.update(updatesIter.next());
                existingsDeletion.update(existingsIter.next());
                continue;
            }

            assert !existing.isRangeTombstoneMarker();
            existingRow = ((Row) existingsIter.next()).withRowDeletion(existingsDeletion.currentDeletion());
            updateRow = ((Row) updatesIter.next()).withRowDeletion(updatesDeletion.currentDeletion());
        }

        addToViewUpdateGenerators(existingRow, updateRow, generators, nowInSec);
    }

    // We only care about more existing rows if the update deletion isn't live, i.e. if we had a partition deletion
    if (!updatesDeletion.currentDeletion().isLive()) {
        while (existingsIter.hasNext()) {
            Unfiltered existing = existingsIter.next();
            // If it's a range tombstone, we don't care, we're only looking for existing entry that gets deleted by
            // the new partition deletion
            if (existing.isRangeTombstoneMarker())
                continue;

            Row existingRow = (Row) existing;
            addToViewUpdateGenerators(existingRow,
                    emptyRow(existingRow.clustering(), updatesDeletion.currentDeletion()), generators,
                    nowInSec);
        }
    }
    while (updatesIter.hasNext()) {
        Unfiltered update = updatesIter.next();
        // If it's a range tombstone, it removes nothing pre-exisiting, so we can ignore it for view updates
        if (update.isRangeTombstoneMarker())
            continue;

        Row updateRow = (Row) update;
        addToViewUpdateGenerators(emptyRow(updateRow.clustering(), DeletionTime.LIVE), updateRow, generators,
                nowInSec);
    }

    return buildMutations(baseTableMetadata, generators);
}

From source file:org.sosy_lab.cpachecker.core.algorithm.RestartAlgorithm.java

@Override
public boolean run(ReachedSet pReached) throws CPAException, InterruptedException {
    checkArgument(pReached instanceof ForwardingReachedSet, "RestartAlgorithm needs ForwardingReachedSet");
    checkArgument(pReached.size() <= 1,
            "RestartAlgorithm does not support being called several times with the same reached set");
    checkArgument(!pReached.isEmpty(), "RestartAlgorithm needs non-empty reached set");

    ForwardingReachedSet reached = (ForwardingReachedSet) pReached;

    CFANode mainFunction = AbstractStates.extractLocation(pReached.getFirstState());
    assert mainFunction != null : "Location information needed";

    PeekingIterator<Path> configFilesIterator = Iterators.peekingIterator(configFiles.iterator());

    while (configFilesIterator.hasNext()) {
        stats.totalTime.start();/*w ww  .  ja va2s. c o m*/
        @Nullable
        ConfigurableProgramAnalysis currentCpa = null;
        ReachedSet currentReached;
        ShutdownNotifier singleShutdownNotifier = ShutdownNotifier.createWithParent(shutdownNotifier);

        boolean lastAnalysisInterrupted = false;
        boolean lastAnalysisFailed = false;
        boolean lastAnalysisTerminated = false;
        boolean recursionFound = false;

        try {
            Path singleConfigFileName = configFilesIterator.next();
            // extract first part out of file name
            singleConfigFileName = Paths.get(
                    CONFIG_FILE_CONDITION_SPLITTER.split(singleConfigFileName.toString()).iterator().next());

            try {
                Triple<Algorithm, ConfigurableProgramAnalysis, ReachedSet> currentAlg = createNextAlgorithm(
                        singleConfigFileName, mainFunction, singleShutdownNotifier);
                currentAlgorithm = currentAlg.getFirst();
                currentCpa = currentAlg.getSecond();
                currentReached = currentAlg.getThird();
            } catch (InvalidConfigurationException e) {
                logger.logUserException(Level.WARNING, e,
                        "Skipping one analysis because the configuration file "
                                + singleConfigFileName.toString() + " is invalid");
                continue;
            } catch (IOException e) {
                logger.logUserException(Level.WARNING, e,
                        "Skipping one analysis because the configuration file "
                                + singleConfigFileName.toString() + " could not be read");
                continue;
            }

            reached.setDelegate(currentReached);

            if (currentAlgorithm instanceof StatisticsProvider) {
                ((StatisticsProvider) currentAlgorithm).collectStatistics(stats.getSubStatistics());
            }
            shutdownNotifier.shutdownIfNecessary();

            stats.noOfAlgorithmsUsed++;

            // run algorithm
            try {
                boolean sound = currentAlgorithm.run(currentReached);

                if (from(currentReached).anyMatch(IS_TARGET_STATE)) {
                    return sound;
                }

                if (!sound) {
                    // if the analysis is not sound and we can proceed with
                    // another algorithm, continue with the next algorithm
                    logger.log(Level.INFO, "Analysis result was unsound.");

                } else if (currentReached.hasWaitingState()) {
                    // if there are still states in the waitlist, the result is unknown
                    // continue with the next algorithm
                    logger.log(Level.INFO, "Analysis not completed: There are still states to be processed.");

                } else {
                    // sound analysis and completely finished, terminate
                    return true;
                }
                lastAnalysisTerminated = true;

            } catch (CPAException e) {
                lastAnalysisFailed = true;
                if (configFilesIterator.hasNext()) {
                    logger.logUserException(Level.WARNING, e, "Analysis not completed");
                    if (e.getMessage().contains("Unsupported C feature (recursion)")) {
                        recursionFound = true;
                    }
                } else {
                    throw e;
                }
            } catch (InterruptedException e) {
                lastAnalysisInterrupted = true;
                shutdownNotifier.shutdownIfNecessary(); // check if we should also stop
                logger.logUserException(Level.WARNING, e, "Analysis " + stats.noOfAlgorithmsUsed + " stopped");
            }
        } finally {
            singleShutdownNotifier.requestShutdown("Analysis terminated"); // shutdown any remaining components
            stats.totalTime.stop();
        }

        shutdownNotifier.shutdownIfNecessary();

        if (configFilesIterator.hasNext()) {
            // Check if the next config file has a condition,
            // and if it has a condition, check if it matches.
            boolean foundConfig;
            do {
                foundConfig = true;
                String nextConfigFile = configFilesIterator.peek().toString();
                List<String> parts = CONFIG_FILE_CONDITION_SPLITTER.splitToList(nextConfigFile);
                if (parts.size() == 2) {
                    String condition = parts.get(1);
                    switch (condition) {
                    case "if-interrupted":
                        foundConfig = lastAnalysisInterrupted;
                        break;
                    case "if-failed":
                        foundConfig = lastAnalysisFailed;
                        break;
                    case "if-terminated":
                        foundConfig = lastAnalysisTerminated;
                        break;
                    case "if-recursive":
                        foundConfig = recursionFound;
                        break;
                    default:
                        logger.logf(Level.WARNING, "Ignoring invalid restart condition '%s'.", condition);
                        foundConfig = true;
                    }
                    if (!foundConfig) {
                        logger.logf(Level.INFO,
                                "Ignoring restart configuration '%s' because condition %s did not match.",
                                parts.get(0), condition);
                        configFilesIterator.next();
                        stats.noOfAlgorithmsUsed++;
                    }
                }
            } while (!foundConfig && configFilesIterator.hasNext());
        }

        if (configFilesIterator.hasNext()) {
            stats.printIntermediateStatistics(System.out, Result.UNKNOWN, currentReached);
            stats.resetSubStatistics();

            if (currentCpa != null) {
                CPAs.closeCpaIfPossible(currentCpa, logger);
            }
            CPAs.closeIfPossible(currentAlgorithm, logger);

            logger.log(Level.INFO, "RestartAlgorithm switches to the next configuration...");
        }
    }

    // no further configuration available, and analysis has not finished
    logger.log(Level.INFO, "No further configuration available.");
    return false;
}

From source file:org.apache.cassandra.db.rows.Rows.java

/**
 * Given the result ({@code merged}) of merging multiple {@code inputs}, signals the difference between
 * each input and {@code merged} to {@code diffListener}.
 * <p>/*from   w w  w. j  a v a  2  s. c  o  m*/
 * Note that this method doesn't only emit cells etc where there's a difference. The listener is informed
 * of every corresponding entity between the merged and input rows, including those that are equal.
 *
 * @param diffListener the listener to which to signal the differences between the inputs and the merged result.
 * @param merged the result of merging {@code inputs}.
 * @param inputs the inputs whose merge yielded {@code merged}.
 */
public static void diff(RowDiffListener diffListener, Row merged, Row... inputs) {
    Clustering clustering = merged.clustering();
    LivenessInfo mergedInfo = merged.primaryKeyLivenessInfo().isEmpty() ? null
            : merged.primaryKeyLivenessInfo();
    Row.Deletion mergedDeletion = merged.deletion().isLive() ? null : merged.deletion();
    for (int i = 0; i < inputs.length; i++) {
        Row input = inputs[i];
        LivenessInfo inputInfo = input == null || input.primaryKeyLivenessInfo().isEmpty() ? null
                : input.primaryKeyLivenessInfo();
        Row.Deletion inputDeletion = input == null || input.deletion().isLive() ? null : input.deletion();

        if (mergedInfo != null || inputInfo != null)
            diffListener.onPrimaryKeyLivenessInfo(i, clustering, mergedInfo, inputInfo);
        if (mergedDeletion != null || inputDeletion != null)
            diffListener.onDeletion(i, clustering, mergedDeletion, inputDeletion);
    }

    List<Iterator<ColumnData>> inputIterators = new ArrayList<>(1 + inputs.length);
    inputIterators.add(merged.iterator());
    for (Row row : inputs)
        inputIterators.add(row == null ? Collections.emptyIterator() : row.iterator());

    Iterator<?> iter = MergeIterator.get(inputIterators, ColumnData.comparator,
            new MergeIterator.Reducer<ColumnData, Object>() {
                ColumnData mergedData;
                ColumnData[] inputDatas = new ColumnData[inputs.length];

                public void reduce(int idx, ColumnData current) {
                    if (idx == 0)
                        mergedData = current;
                    else
                        inputDatas[idx - 1] = current;
                }

                protected Object getReduced() {
                    for (int i = 0; i != inputDatas.length; i++) {
                        ColumnData input = inputDatas[i];
                        if (mergedData != null || input != null) {
                            ColumnDefinition column = (mergedData != null ? mergedData : input).column;
                            if (column.isSimple()) {
                                diffListener.onCell(i, clustering, (Cell) mergedData, (Cell) input);
                            } else {
                                ComplexColumnData mergedData = (ComplexColumnData) this.mergedData;
                                ComplexColumnData inputData = (ComplexColumnData) input;
                                if (mergedData == null) {
                                    // Everything in inputData has been shadowed
                                    if (!inputData.complexDeletion().isLive())
                                        diffListener.onComplexDeletion(i, clustering, column, null,
                                                inputData.complexDeletion());
                                    for (Cell inputCell : inputData)
                                        diffListener.onCell(i, clustering, null, inputCell);
                                } else if (inputData == null) {
                                    // Everything in inputData is new
                                    if (!mergedData.complexDeletion().isLive())
                                        diffListener.onComplexDeletion(i, clustering, column,
                                                mergedData.complexDeletion(), null);
                                    for (Cell mergedCell : mergedData)
                                        diffListener.onCell(i, clustering, mergedCell, null);
                                } else {

                                    if (!mergedData.complexDeletion().isLive()
                                            || !inputData.complexDeletion().isLive())
                                        diffListener.onComplexDeletion(i, clustering, column,
                                                mergedData.complexDeletion(), inputData.complexDeletion());

                                    PeekingIterator<Cell> mergedCells = Iterators
                                            .peekingIterator(mergedData.iterator());
                                    PeekingIterator<Cell> inputCells = Iterators
                                            .peekingIterator(inputData.iterator());
                                    while (mergedCells.hasNext() && inputCells.hasNext()) {
                                        int cmp = column.cellPathComparator().compare(mergedCells.peek().path(),
                                                inputCells.peek().path());
                                        if (cmp == 0)
                                            diffListener.onCell(i, clustering, mergedCells.next(),
                                                    inputCells.next());
                                        else if (cmp < 0)
                                            diffListener.onCell(i, clustering, mergedCells.next(), null);
                                        else // cmp > 0
                                            diffListener.onCell(i, clustering, null, inputCells.next());
                                    }
                                    while (mergedCells.hasNext())
                                        diffListener.onCell(i, clustering, mergedCells.next(), null);
                                    while (inputCells.hasNext())
                                        diffListener.onCell(i, clustering, null, inputCells.next());
                                }
                            }
                        }

                    }
                    return null;
                }

                protected void onKeyChange() {
                    mergedData = null;
                    Arrays.fill(inputDatas, null);
                }
            });

    while (iter.hasNext())
        iter.next();
}