Example usage for com.google.common.collect Iterators limit

List of usage examples for com.google.common.collect Iterators limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterators limit.

Prototype

public static <T> Iterator<T> limit(final Iterator<T> iterator, final int limitSize) 

Source Link

Document

Creates an iterator returning the first limitSize elements of the given iterator.

Usage

From source file:com.yandex.yoctodb.v1.immutable.V1Database.java

@Override
public int executeAndUnlimitedCount(@NotNull final Query query, @NotNull final DocumentProcessor processor) {
    final BitSet docs = query.filteredUnlimited(this, bitSetPool);
    if (docs == null) {
        return 0;
    }//from  www.ja  v  a  2  s . c om

    assert !docs.isEmpty();

    final int result = docs.cardinality();
    final Iterator<? extends ScoredDocument<?>> unlimited;
    if (result == getDocumentCount()) {
        unlimited = query.sortedUnlimited(new ReadOnlyOneBitSet(getDocumentCount()), this, bitSetPool);
    } else {
        unlimited = query.sortedUnlimited(docs, this, bitSetPool);
    }

    if (query.getSkip() != 0) {
        Iterators.advance(unlimited, query.getSkip());
    }

    final Iterator<? extends ScoredDocument<?>> limited;
    if (query.getLimit() == Integer.MAX_VALUE) {
        limited = unlimited;
    } else {
        limited = Iterators.limit(unlimited, query.getLimit());
    }

    while (limited.hasNext()) {
        if (!processor.process(limited.next().getDocument(), this)) {
            return result;
        }
    }

    return result;
}

From source file:co.cask.cdap.api.dataset.lib.CounterTimeseriesTable.java

/**
 * Reads entries for a given time range and returns an <code>Iterator<Counter></code>.
 * Provides the same functionality as {@link #read(byte[], long, long, byte[][]) read(byte[], long, long, byte[]...)}
 * but accepts additional parameters for pagination purposes.
 *
 * @param counter name of the counter to read
 * @param startTime defines start of the time range to read, inclusive
 * @param endTime defines end of the time range to read, inclusive
 * @param offset the number of initial entries to ignore and not add to the results
 * @param limit upper limit on number of results returned. If limit is exceeded, the first <code>limit</code> results
 *              are returned./*www  .j  a  v  a 2  s  .  c  o  m*/
 * @param tags a set of tags which entries returned must contain. Tags for entries are defined at write-time and an
 *             entry is only returned if it contains all of these tags.
 * @return an iterator over entries that satisfy provided conditions
 */
public Iterator<Counter> read(byte[] counter, long startTime, long endTime, int offset, int limit,
        byte[]... tags) {
    Iterator<Counter> iterator = read(counter, startTime, endTime, tags);
    iterator = Iterators.limit(iterator, limit + offset);
    Iterators.advance(iterator, offset);
    return iterator;
}

From source file:com.palantir.common.base.BatchingVisitables.java

public static <T> boolean isEqual(BatchingVisitable<T> v, final Iterator<T> it) {
    boolean ret = v.batchAccept(DEFAULT_BATCH_SIZE, new AbortingVisitor<List<T>, RuntimeException>() {
        @Override/*from  w ww  .  j  a  v  a 2 s  . co  m*/
        public boolean visit(List<T> batch) {
            Iterator<T> toMatch = Iterators.limit(it, batch.size());
            return Iterators.elementsEqual(toMatch, batch.iterator());
        }
    });
    if (it.hasNext()) {
        return false;
    }
    return ret;
}

From source file:io.druid.extendedset.intset.ImmutableConciseSet.java

public static ImmutableConciseSet union(Iterator<ImmutableConciseSet> sets) {
    ImmutableConciseSet partialResults = doUnion(Iterators.limit(sets, CHUNK_SIZE));
    while (sets.hasNext()) {
        final UnmodifiableIterator<ImmutableConciseSet> partialIter = Iterators
                .singletonIterator(partialResults);
        partialResults = doUnion(//from w w w. j av  a2 s .  c o m
                Iterators.<ImmutableConciseSet>concat(partialIter, Iterators.limit(sets, CHUNK_SIZE)));
    }
    return partialResults;
}

From source file:org.apache.druid.extendedset.intset.ImmutableConciseSet.java

public static ImmutableConciseSet union(Iterator<ImmutableConciseSet> sets) {
    ImmutableConciseSet partialResults = doUnion(Iterators.limit(sets, CHUNK_SIZE));
    while (sets.hasNext()) {
        final UnmodifiableIterator<ImmutableConciseSet> partialIter = Iterators
                .singletonIterator(partialResults);
        partialResults = doUnion(Iterators.concat(partialIter, Iterators.limit(sets, CHUNK_SIZE)));
    }/*from  w w  w . j a  v a 2s. c  om*/
    return partialResults;
}

From source file:org.geogit.api.LogOp.java

/**
 * @return the list of commits that satisfy the query criteria, most recent first.
 * @see org.geogit.api.AbstractGeoGitOp#call()
 *//*from w w  w  .  ja  v a  2 s. co m*/
@Override
public Iterator<RevCommit> call() throws Exception {
    final Repository repository = getRepository();

    ObjectId newestCommitId;
    ObjectId oldestCommitId;
    {
        if (this.until == null) {
            Ref head = repository.getRef(Ref.HEAD);
            newestCommitId = head.getObjectId();
        } else {
            if (!repository.commitExists(this.until)) {
                throw new IllegalStateException(
                        "Provided 'until' commit id does not exist: " + until.toString());
            }
            newestCommitId = this.until;
        }
        if (this.since == null) {
            oldestCommitId = ObjectId.NULL;
        } else {
            if (!repository.commitExists(this.since)) {
                throw new IllegalStateException(
                        "Provided 'since' commit id does not exist: " + since.toString());
            }
            oldestCommitId = this.since;
        }
    }

    Iterator<RevCommit> linearHistory = new LinearHistoryIterator(newestCommitId, repository);
    LogFilter filter = new LogFilter(repository, oldestCommitId, timeRange, paths);
    Iterator<RevCommit> filteredCommits = Iterators.filter(linearHistory, filter);
    if (limit != null) {
        filteredCommits = Iterators.limit(filteredCommits, limit.intValue());
    }
    return filteredCommits;
}

From source file:org.commoncrawl.mapred.ec2.parser.EC2ParserTask.java

public EC2ParserTask(Configuration conf) throws Exception {

    super(conf);/*from  w w w .ja v a 2s.com*/

    if (!conf.getBoolean(CONF_PARAM_TEST_MODE, false)) {
        conf.set(VALID_SEGMENTS_PATH_PROPERTY, VALID_SEGMENTS_PATH);
        conf.set(SEGMENT_PATH_PROPERTY, SEGMENTS_PATH);
        conf.set(JOB_LOGS_PATH_PROPERTY, JOB_LOGS_PATH);
        conf.set(CHECKPOIINTS_PATH_PROPERTY, CHECKPOINTS_PATH);

        jobThreadSemaphore = new Semaphore(-(MAX_SIMULTANEOUS_JOBS - 1));

    } else {
        conf.set(VALID_SEGMENTS_PATH_PROPERTY, TEST_VALID_SEGMENTS_PATH);
        conf.set(SEGMENT_PATH_PROPERTY, TEST_SEGMENTS_PATH);
        conf.set(JOB_LOGS_PATH_PROPERTY, TEST_JOB_LOGS_PATH);

        jobThreadSemaphore = new Semaphore(0);
        maxSimultaneousJobs = 1;
    }

    FileSystem fs = FileSystem.get(new URI("s3n://aws-publicdatasets"), conf);
    LOG.info(
            "FileSystem is:" + fs.getUri() + " Scanning for candidates at path:" + CRAWL_LOG_INTERMEDIATE_PATH);
    TreeSet<Path> candidateSet = buildCandidateList(fs, new Path(CRAWL_LOG_INTERMEDIATE_PATH));
    LOG.info("Scanning for completed segments");
    List<Path> processedLogs = scanForCompletedSegments(fs, conf);
    LOG.info("Found " + processedLogs.size() + " processed logs");
    // remove processed from candidate set ... 
    candidateSet.removeAll(processedLogs);
    // ok we are ready to go .. 
    LOG.info("There are: " + candidateSet.size() + " logs in need of parsing");
    while (candidateSet.size() != 0) {
        ImmutableList.Builder<Path> pathBuilder = new ImmutableList.Builder<Path>();
        Iterator<Path> iterator = Iterators.limit(candidateSet.iterator(), LOGS_PER_ITERATION);
        while (iterator.hasNext()) {
            pathBuilder.add(iterator.next());
            iterator.remove();
        }
        LOG.info("Queueing Parse");
        queue(fs, conf, pathBuilder.build());
        LOG.info("Queued Parse");

        // in test mode, queue only a single segment's worth of data 
        if (conf.getBoolean(CONF_PARAM_TEST_MODE, false)) {
            LOG.info("Test Mode - Queueing only a single Item");
            break;
        }
    }

    // queue shutdown items 
    for (int i = 0; i < maxSimultaneousJobs; ++i) {
        _queue.put(new QueueItem());
    }
}

From source file:org.alfresco.jive.community.ws.AlfrescoServiceImpl.java

protected Collection<SpaceEntity> getSubSpaces(Community parentSpace, int offset, int limit) {
    //  Iterator<Community> subspaces = communityManager.getCommunities(parentSpace, offset, limit); does not work with hidden spaces
    Iterator<Community> subspaces = communityManager.getCommunities(parentSpace);
    Iterators.skip(subspaces, offset);/*from w  ww. j  a v a 2s  .  com*/
    subspaces = Iterators.limit(subspaces, limit);

    List<SpaceEntity> entityList = new ArrayList<SpaceEntity>();
    while (subspaces.hasNext()) {
        Community community = subspaces.next();
        SpaceEntity entity = createEntity(community);
        entityList.add(entity);
    }

    return ImmutableList.copyOf(entityList);
}

From source file:io.druid.extendedset.intset.ImmutableConciseSet.java

public static ImmutableConciseSet intersection(Iterator<ImmutableConciseSet> sets) {
    ImmutableConciseSet partialResults = doIntersection(Iterators.limit(sets, CHUNK_SIZE));
    while (sets.hasNext()) {
        final UnmodifiableIterator<ImmutableConciseSet> partialIter = Iterators
                .singletonIterator(partialResults);
        partialResults = doIntersection(
                Iterators.<ImmutableConciseSet>concat(Iterators.limit(sets, CHUNK_SIZE), partialIter));
    }//from   w  w w. ja  v  a  2  s  .  c  o  m
    return partialResults;
}

From source file:org.apache.druid.extendedset.intset.ImmutableConciseSet.java

public static ImmutableConciseSet intersection(Iterator<ImmutableConciseSet> sets) {
    ImmutableConciseSet partialResults = doIntersection(Iterators.limit(sets, CHUNK_SIZE));
    while (sets.hasNext()) {
        final UnmodifiableIterator<ImmutableConciseSet> partialIter = Iterators
                .singletonIterator(partialResults);
        partialResults = doIntersection(Iterators.concat(Iterators.limit(sets, CHUNK_SIZE), partialIter));
    }/*from  w  w  w  . j a v  a  2s.  c o m*/
    return partialResults;
}