Example usage for com.google.common.collect Iterators limit

List of usage examples for com.google.common.collect Iterators limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterators limit.

Prototype

public static <T> Iterator<T> limit(final Iterator<T> iterator, final int limitSize) 

Source Link

Document

Creates an iterator returning the first limitSize elements of the given iterator.

Usage

From source file:org.janusgraph.graphdb.tinkerpop.optimize.JanusGraphPropertiesStep.java

@Override
protected Iterator<E> flatMap(final Traverser.Admin<Element> traverser) {
    if (useMultiQuery) { //it is guaranteed that all elements are vertices
        assert multiQueryResults != null;
        return convertIterator(multiQueryResults.get(traverser.get()));
    } else if (traverser.get() instanceof JanusGraphVertex || traverser.get() instanceof WrappedVertex) {
        JanusGraphVertexQuery query = makeQuery(
                (JanusGraphTraversalUtil.getJanusGraphVertex(traverser)).query());
        return convertIterator(query.properties());
    } else {/*from www .  ja  va  2  s  .  c  om*/
        //It is some other element (edge or vertex property)
        Iterator<E> iter;
        if (getReturnType().forValues()) {
            assert orders.isEmpty() && hasContainers.isEmpty();
            iter = traverser.get().values(getPropertyKeys());
        } else {
            //this asks for properties
            assert orders.isEmpty();
            //HasContainers don't apply => empty result set
            if (!hasContainers.isEmpty())
                return Collections.emptyIterator();
            iter = (Iterator<E>) traverser.get().properties(getPropertyKeys());
        }
        if (limit != Query.NO_LIMIT)
            iter = Iterators.limit(iter, limit);
        return iter;
    }
}

From source file:com.thinkbiganalytics.metadata.core.dataset.InMemoryDatasourceProvider.java

@Override
public List<Datasource> getDatasources(DatasourceCriteria criteria) {
    // TODO replace cast with copy method
    DatasetCriteriaImpl critImpl = (DatasetCriteriaImpl) criteria;
    Iterator<Datasource> filtered = Iterators.filter(this.datasets.values().iterator(), critImpl);
    Iterator<Datasource> limited = Iterators.limit(filtered, critImpl.getLimit());
    List<Datasource> list = Lists.newArrayList(limited);

    Collections.sort(list, critImpl);
    return list;//from   w w  w .  j av  a 2s  .  co  m
}

From source file:org.fcrepo.kernel.api.utils.iterators.RdfStream.java

/**
 * As {@link Iterators#limit(Iterator, int)} while maintaining context.
 *
 * @param limit the limit/*from  w w  w .  ja va 2s.c  om*/
 * @return RDFStream
 */
public RdfStream limit(final Integer limit) {
    return (limit == -1) ? this : withThisContext(Iterators.limit(this, limit));
}

From source file:org.springframework.data.cassandra.core.QueryUtils.java

/**
 * Read a {@link Slice} of data from the {@link ResultSet} for a {@link Pageable}.
 *
 * @param resultSet must not be {@literal null}.
 * @param mapper must not be {@literal null}.
 * @param page//from  w ww .  j  a  va 2  s.  c  o m
 * @param pageSize
 * @return the resulting {@link Slice}.
 */
static <T> Slice<T> readSlice(ResultSet resultSet, RowMapper<T> mapper, int page, int pageSize) {

    int toRead = resultSet.getAvailableWithoutFetching();

    return readSlice(() -> Iterators.limit(resultSet.iterator(), toRead),
            resultSet.getExecutionInfo().getPagingState(), mapper, page, pageSize);
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunner.java

public SweepResults run(String tableName, @Nullable byte[] startRow) {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkState(!tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX),
            "The sweeper should not be run on tables passed through namespace mapping.");
    Preconditions.checkState(!AtlasDbConstants.hiddenTables.contains(tableName));

    // Earliest start timestamp of any currently open transaction, with two caveats:
    // (1) unreadableTimestamps are calculated via wall-clock time, and so may not be correct
    //     under pathological clock conditions
    // (2) immutableTimestamps do not account for locks have timed out after checking their locks;
    //     such a transaction may have a start timestamp less than the immutableTimestamp, and it
    //     could still get successfully committed (its commit timestamp may or may not be less than
    //     the immutableTimestamp
    // Note that this is fine, because we'll either
    // (1) force old readers to abort (if they read a garbage collection sentinel), or
    // (2) force old writers to retry (note that we must roll back any uncommitted transactions that
    //     we encounter
    SweepStrategy sweepStrategy = sweepStrategyManager.get().get(tableName);
    if (sweepStrategy == null) {
        sweepStrategy = SweepStrategy.CONSERVATIVE;
    } else if (sweepStrategy == SweepStrategy.NOTHING) {
        return new SweepResults(null, 0, 0);
    }/*from  w ww . j  a  va2s  . c o m*/
    if (startRow == null) {
        startRow = new byte[0];
    }
    int batchSize = batchSizeSupplier.get();
    RangeRequest rangeRequest = RangeRequest.builder().startRowInclusive(startRow).batchHint(batchSize).build();

    long sweepTimestamp = getSweepTimestamp(tableName);
    ClosableIterator<RowResult<Value>> valueResults;
    if (sweepStrategy == SweepStrategy.CONSERVATIVE) {
        valueResults = ClosableIterators.wrap(ImmutableList.<RowResult<Value>>of().iterator());
    } else {
        valueResults = keyValueService.getRange(tableName, rangeRequest, sweepTimestamp);
    }

    ClosableIterator<RowResult<Set<Long>>> rowResults = keyValueService.getRangeOfTimestamps(tableName,
            rangeRequest, sweepTimestamp);

    try {
        List<RowResult<Set<Long>>> rowResultTimestamps = ImmutableList
                .copyOf(Iterators.limit(rowResults, batchSize));
        PeekingIterator<RowResult<Value>> peekingValues = Iterators.peekingIterator(valueResults);
        Set<Cell> sentinelsToAdd = Sets.newHashSet();
        Multimap<Cell, Long> rowTimestamps = getTimestampsFromRowResults(rowResultTimestamps, sweepStrategy);
        Multimap<Cell, Long> cellTsPairsToSweep = getCellTsPairsToSweep(rowTimestamps, peekingValues,
                sweepTimestamp, sweepStrategy, sentinelsToAdd);
        sweepCells(tableName, cellTsPairsToSweep, sentinelsToAdd);
        byte[] nextRow = rowResultTimestamps.size() < batchSize ? null
                : RangeRequests.getNextStartRow(false, Iterables.getLast(rowResultTimestamps).getRowName());
        return new SweepResults(nextRow, rowResultTimestamps.size(), cellTsPairsToSweep.size());
    } finally {
        rowResults.close();
        valueResults.close();
    }
}

From source file:co.cask.cdap.api.dataset.lib.TimeseriesTable.java

/**
 * Reads entries for a given time range and returns an <code>Iterator<Entry></code>.
 * Provides the same functionality as {@link #read(byte[], long, long, byte[][]) read(byte[], long, long, byte[]...)} 
 * but accepts additional parameters for pagination purposes.
 * NOTE: A limit is placed on the max number of time intervals to be scanned during a read, as defined by
 * {@link #MAX_ROWS_TO_SCAN_PER_READ}.//w  ww  .j av  a2s.  com
 *
 * @param key key of the entries to read
 * @param startTime defines start of the time range to read, inclusive
 * @param endTime defines end of the time range to read, inclusive
 * @param offset the number of initial entries to ignore and not add to the results
 * @param limit upper limit on number of results returned. If limit is exceeded, the first <code>limit</code> results
 *              are returned
 * @param tags a set of tags which entries returned must contain. Tags for entries are defined at write-time and an
 *             entry is only returned if it contains all of these tags.
 *
 * @return an iterator over entries that satisfy provided conditions
 * @throws IllegalArgumentException when provided condition is incorrect
 */
public final Iterator<Entry> read(byte[] key, long startTime, long endTime, int offset, int limit,
        byte[]... tags) {
    Iterator<Entry> iterator = read(key, startTime, endTime, tags);
    iterator = Iterators.limit(iterator, limit + offset);
    Iterators.advance(iterator, offset);
    return iterator;
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunnerImpl.java

@Override
public SweepResults run(String tableName, int batchSize, @Nullable byte[] startRow) {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkState(!AtlasDbConstants.hiddenTables.contains(tableName));

    if (tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX)) {
        // this happens sometimes; I think it's because some places in the code can
        // start this sweeper without doing the full normally ordered KVSModule startup.
        // I did check and sweep.stats did contain the FQ table name for all of the tables,
        // so it is at least broken in some way that still allows namespaced tables to eventually be swept.
        log.warn("The sweeper should not be run on tables passed through namespace mapping.");
        return SweepResults.EMPTY_SWEEP;
    }//from   www  .j av  a 2s  . c  o  m
    if (keyValueService.getMetadataForTable(tableName).length == 0) {
        log.warn("The sweeper tried to sweep table '{}', but the table does not exist. Skipping table.",
                tableName);
        return SweepResults.EMPTY_SWEEP;
    }

    // Earliest start timestamp of any currently open transaction, with two caveats:
    // (1) unreadableTimestamps are calculated via wall-clock time, and so may not be correct
    //     under pathological clock conditions
    // (2) immutableTimestamps do not account for locks have timed out after checking their locks;
    //     such a transaction may have a start timestamp less than the immutableTimestamp, and it
    //     could still get successfully committed (its commit timestamp may or may not be less than
    //     the immutableTimestamp
    // Note that this is fine, because we'll either
    // (1) force old readers to abort (if they read a garbage collection sentinel), or
    // (2) force old writers to retry (note that we must roll back any uncommitted transactions that
    //     we encounter
    SweepStrategy sweepStrategy = sweepStrategyManager.get().get(tableName);
    if (sweepStrategy == null) {
        sweepStrategy = SweepStrategy.CONSERVATIVE;
    } else if (sweepStrategy == SweepStrategy.NOTHING) {
        return SweepResults.EMPTY_SWEEP;
    }
    if (startRow == null) {
        startRow = new byte[0];
    }
    RangeRequest rangeRequest = RangeRequest.builder().startRowInclusive(startRow).batchHint(batchSize).build();

    long sweepTimestamp = getSweepTimestamp(sweepStrategy);
    ClosableIterator<RowResult<Value>> valueResults;
    if (sweepStrategy == SweepStrategy.CONSERVATIVE) {
        valueResults = ClosableIterators.wrap(ImmutableList.<RowResult<Value>>of().iterator());
    } else {
        valueResults = keyValueService.getRange(tableName, rangeRequest, sweepTimestamp);
    }

    ClosableIterator<RowResult<Set<Long>>> rowResults = keyValueService.getRangeOfTimestamps(tableName,
            rangeRequest, sweepTimestamp);

    try {
        List<RowResult<Set<Long>>> rowResultTimestamps = ImmutableList
                .copyOf(Iterators.limit(rowResults, batchSize));
        PeekingIterator<RowResult<Value>> peekingValues = Iterators.peekingIterator(valueResults);
        Set<Cell> sentinelsToAdd = Sets.newHashSet();
        Multimap<Cell, Long> rowTimestamps = getTimestampsFromRowResults(rowResultTimestamps, sweepStrategy);
        Multimap<Cell, Long> cellTsPairsToSweep = getCellTsPairsToSweep(rowTimestamps, peekingValues,
                sweepTimestamp, sweepStrategy, sentinelsToAdd);
        sweepCells(tableName, cellTsPairsToSweep, sentinelsToAdd);
        byte[] nextRow = rowResultTimestamps.size() < batchSize ? null
                : RangeRequests.getNextStartRow(false, Iterables.getLast(rowResultTimestamps).getRowName());
        return new SweepResults(nextRow, rowResultTimestamps.size(), cellTsPairsToSweep.size());
    } finally {
        rowResults.close();
        valueResults.close();
    }
}

From source file:com.yandex.yoctodb.v1.immutable.V1CompositeDatabase.java

@Override
public int executeAndUnlimitedCount(@NotNull final Query query, @NotNull final DocumentProcessor processor) {
    int result = 0;
    final Iterator<ScoredDocument<?>> iterator;

    // Doing merging iff there is sorting
    if (query.hasSorting()) {
        final List<Iterator<? extends ScoredDocument<?>>> results = new ArrayList<>(databases.size());
        for (IndexedDatabase db : databases) {
            final BitSet docs = query.filteredUnlimited(db, bitSetPool);
            if (docs != null) {
                assert !docs.isEmpty();

                final int dbSize = db.getDocumentCount();
                final int count = docs.cardinality();
                final BitSet filter;
                if (count == dbSize) {
                    filter = new ReadOnlyOneBitSet(dbSize);
                } else {
                    filter = docs;//  ww  w. j ava 2 s .  c o  m
                }
                results.add(query.sortedUnlimited(filter, db, bitSetPool));
                result += count;
            }
        }

        if (results.isEmpty()) {
            return 0;
        }

        iterator = Iterators.mergeSorted(results, SCORED_DOCUMENT_COMPARATOR);
    } else {
        final List<QueryContext> results = new ArrayList<>(databases.size());
        for (IndexedDatabase db : databases) {
            final BitSet docs = query.filteredUnlimited(db, bitSetPool);
            if (docs != null) {
                assert !docs.isEmpty();

                final int dbSize = db.getDocumentCount();
                final int count = docs.cardinality();
                final BitSet filter;
                if (count == dbSize) {
                    filter = new ReadOnlyOneBitSet(dbSize);
                } else {
                    filter = docs;
                }
                results.add(new QueryContext(filter, db, bitSetPool));
                result += count;
            }
        }

        if (results.isEmpty()) {
            return 0;
        }

        iterator = Iterators.concat(new SortResultIterator(query, results.iterator()));
    }

    // Skipping values
    if (query.getSkip() != 0) {
        Iterators.advance(iterator, query.getSkip());
    }

    // Limited
    final Iterator<ScoredDocument<?>> limitedIterator;
    if (query.getLimit() == Integer.MAX_VALUE) {
        limitedIterator = iterator;
    } else {
        limitedIterator = Iterators.limit(iterator, query.getLimit());
    }

    while (limitedIterator.hasNext()) {
        final ScoredDocument<?> document = limitedIterator.next();
        if (!processor.process(document.getDocument(), document.getDatabase())) {
            return result;
        }
    }

    return result;
}

From source file:com.palantir.atlasdb.cleaner.KeyValueServiceScrubberStore.java

@Override
public int getNumberRemainingScrubCells(int maxCellsToScan) {
    ClosableIterator<RowResult<Value>> iterator = getIteratorToScrub(maxCellsToScan, Long.MAX_VALUE, null,
            null);/* www  .ja v a  2s.co  m*/
    try {
        return Iterators.size(Iterators.limit(iterator, maxCellsToScan));
    } finally {
        iterator.close();
    }
}

From source file:org.geogit.api.porcelain.LogOp.java

/**
 * Executes the log operation.//from   w ww .j a va2 s. c  om
 * 
 * @return the list of commits that satisfy the query criteria, most recent first.
 * @see org.geogit.api.AbstractGeoGitOp#call()
 */
@Override
public Iterator<RevCommit> call() {

    ObjectId newestCommitId;
    ObjectId oldestCommitId;
    {
        if (this.until == null) {
            newestCommitId = command(RevParse.class).setRefSpec(Ref.HEAD).call().get();
        } else {
            if (!repository.commitExists(this.until)) {
                throw new IllegalArgumentException(
                        "Provided 'until' commit id does not exist: " + until.toString());
            }
            newestCommitId = this.until;
        }
        if (this.since == null) {
            oldestCommitId = ObjectId.NULL;
        } else {
            if (!repository.commitExists(this.since)) {
                throw new IllegalArgumentException(
                        "Provided 'since' commit id does not exist: " + since.toString());
            }
            oldestCommitId = this.since;
        }
    }

    Iterator<RevCommit> history;
    if (firstParent) {
        history = new LinearHistoryIterator(newestCommitId, repository);
    } else {
        if (commits.isEmpty()) {
            commits.add(newestCommitId);
        }
        if (topo) {
            history = new TopologicalHistoryIterator(commits, repository, graphDb);
        } else {
            history = new ChronologicalHistoryIterator(commits, repository);
        }
    }
    LogFilter filter = new LogFilter(oldestCommitId, timeRange, paths, author, commiter);
    Iterator<RevCommit> filteredCommits = Iterators.filter(history, filter);
    if (skip != null) {
        Iterators.advance(filteredCommits, skip.intValue());
    }
    if (limit != null) {
        filteredCommits = Iterators.limit(filteredCommits, limit.intValue());
    }
    return filteredCommits;
}