Example usage for com.google.common.collect PeekingIterator next

List of usage examples for com.google.common.collect PeekingIterator next

Introduction

In this page you can find the example usage for com.google.common.collect PeekingIterator next.

Prototype

@Override
E next();

Source Link

Document

The objects returned by consecutive calls to #peek() then #next() are guaranteed to be equal to each other.

Usage

From source file:org.apache.accumulo.gc.GarbageCollectionAlgorithm.java

protected void confirmDeletesFromReplication(Iterator<Entry<String, Status>> replicationNeededIterator,
        Iterator<Entry<String, String>> candidateMapIterator) {
    PeekingIterator<Entry<String, Status>> pendingReplication = Iterators
            .peekingIterator(replicationNeededIterator);
    PeekingIterator<Entry<String, String>> candidates = Iterators.peekingIterator(candidateMapIterator);
    while (pendingReplication.hasNext() && candidates.hasNext()) {
        Entry<String, Status> pendingReplica = pendingReplication.peek();
        Entry<String, String> candidate = candidates.peek();

        String filePendingReplication = pendingReplica.getKey();
        String fullPathCandidate = candidate.getValue();

        int comparison = filePendingReplication.compareTo(fullPathCandidate);
        if (comparison < 0) {
            pendingReplication.next();
        } else if (comparison > 1) {
            candidates.next();//  ww  w.  j a va 2s  .c o  m
        } else {
            // We want to advance both, and try to delete the candidate if we can
            candidates.next();
            pendingReplication.next();

            // We cannot delete a file if it is still needed for replication
            if (!StatusUtil.isSafeForRemoval(pendingReplica.getValue())) {
                // If it must be replicated, we must remove it from the candidate set to prevent deletion
                candidates.remove();
            }
        }
    }
}

From source file:org.apache.cassandra.db.view.TableViews.java

/**
 * Given some updates on the base table of this object and the existing values for the rows affected by that update, generates the
 * mutation to be applied to the provided views.
 *
 * @param views the views potentially affected by {@code updates}.
 * @param updates the base table updates being applied.
 * @param existings the existing values for the rows affected by {@code updates}. This is used to decide if a view is
 * obsoleted by the update and should be removed, gather the values for columns that may not be part of the update if
 * a new view entry needs to be created, and compute the minimal updates to be applied if the view entry isn't changed
 * but has simply some updated values. This will be empty for view building as we want to assume anything we'll pass
 * to {@code updates} is new./*from   ww w.  j a  v a  2 s  .  c  om*/
 * @param nowInSec the current time in seconds.
 * @return the mutations to apply to the {@code views}. This can be empty.
 */
public Collection<Mutation> generateViewUpdates(Collection<View> views, UnfilteredRowIterator updates,
        UnfilteredRowIterator existings, int nowInSec) {
    assert updates.metadata().cfId.equals(baseTableMetadata.cfId);

    List<ViewUpdateGenerator> generators = new ArrayList<>(views.size());
    for (View view : views)
        generators.add(new ViewUpdateGenerator(view, updates.partitionKey(), nowInSec));

    DeletionTracker existingsDeletion = new DeletionTracker(existings.partitionLevelDeletion());
    DeletionTracker updatesDeletion = new DeletionTracker(updates.partitionLevelDeletion());

    /*
     * We iterate through the updates and the existing rows in parallel. This allows us to know the consequence
     * on the view of each update.
     */
    PeekingIterator<Unfiltered> existingsIter = Iterators.peekingIterator(existings);
    PeekingIterator<Unfiltered> updatesIter = Iterators.peekingIterator(updates);

    while (existingsIter.hasNext() && updatesIter.hasNext()) {
        Unfiltered existing = existingsIter.peek();
        Unfiltered update = updatesIter.peek();

        Row existingRow;
        Row updateRow;
        int cmp = baseTableMetadata.comparator.compare(update, existing);
        if (cmp < 0) {
            // We have an update where there was nothing before
            if (update.isRangeTombstoneMarker()) {
                updatesDeletion.update(updatesIter.next());
                continue;
            }

            updateRow = ((Row) updatesIter.next()).withRowDeletion(updatesDeletion.currentDeletion());
            existingRow = emptyRow(updateRow.clustering(), existingsDeletion.currentDeletion());
        } else if (cmp > 0) {
            // We have something existing but no update (which will happen either because it's a range tombstone marker in
            // existing, or because we've fetched the existing row due to some partition/range deletion in the updates)
            if (existing.isRangeTombstoneMarker()) {
                existingsDeletion.update(existingsIter.next());
                continue;
            }

            existingRow = ((Row) existingsIter.next()).withRowDeletion(existingsDeletion.currentDeletion());
            updateRow = emptyRow(existingRow.clustering(), updatesDeletion.currentDeletion());

            // The way we build the read command used for existing rows, we should always have updatesDeletion.currentDeletion()
            // that is not live, since we wouldn't have read the existing row otherwise. And we could assert that, but if we ever
            // change the read method so that it can slightly over-read in some case, that would be an easily avoiding bug lurking,
            // so we just handle the case.
            if (updateRow == null)
                continue;
        } else {
            // We're updating a row that had pre-existing data
            if (update.isRangeTombstoneMarker()) {
                assert existing.isRangeTombstoneMarker();
                updatesDeletion.update(updatesIter.next());
                existingsDeletion.update(existingsIter.next());
                continue;
            }

            assert !existing.isRangeTombstoneMarker();
            existingRow = ((Row) existingsIter.next()).withRowDeletion(existingsDeletion.currentDeletion());
            updateRow = ((Row) updatesIter.next()).withRowDeletion(updatesDeletion.currentDeletion());
        }

        addToViewUpdateGenerators(existingRow, updateRow, generators, nowInSec);
    }

    // We only care about more existing rows if the update deletion isn't live, i.e. if we had a partition deletion
    if (!updatesDeletion.currentDeletion().isLive()) {
        while (existingsIter.hasNext()) {
            Unfiltered existing = existingsIter.next();
            // If it's a range tombstone, we don't care, we're only looking for existing entry that gets deleted by
            // the new partition deletion
            if (existing.isRangeTombstoneMarker())
                continue;

            Row existingRow = (Row) existing;
            addToViewUpdateGenerators(existingRow,
                    emptyRow(existingRow.clustering(), updatesDeletion.currentDeletion()), generators,
                    nowInSec);
        }
    }
    while (updatesIter.hasNext()) {
        Unfiltered update = updatesIter.next();
        // If it's a range tombstone, it removes nothing pre-exisiting, so we can ignore it for view updates
        if (update.isRangeTombstoneMarker())
            continue;

        Row updateRow = (Row) update;
        addToViewUpdateGenerators(emptyRow(updateRow.clustering(), DeletionTime.LIVE), updateRow, generators,
                nowInSec);
    }

    return buildMutations(baseTableMetadata, generators);
}

From source file:org.jasig.portal.portlets.statistics.BaseStatisticsReportController.java

/**
 * Build the aggregation {@link DataTable}
 *//* w ww .  ja v a 2  s . c o m*/
protected final DataTable buildAggregationReport(F form) throws TypeMismatchException {
    //Pull data out of form for per-group fetching
    final AggregationInterval interval = form.getInterval();
    final DateMidnight start = form.getStart();
    final DateMidnight end = form.getEnd();

    final DateTime startDateTime = start.toDateTime();
    //Use a query end of the end date at 23:59:59
    final DateTime endDateTime = end.plusDays(1).toDateTime().minusSeconds(1);

    //Get the list of DateTimes used on the X axis in the report
    final List<DateTime> reportTimes = this.intervalHelper.getIntervalStartDateTimesBetween(interval,
            startDateTime, endDateTime, maxIntervals);

    final Map<D, SortedSet<T>> groupedAggregations = createColumnDiscriminatorMap(form);

    //Determine the ValueType of the date/time column. Use the most specific column type possible
    final ValueType dateTimeColumnType;
    if (interval.isHasTimePart()) {
        //If start/end are the same day just display the time
        if (startDateTime.toDateMidnight().equals(endDateTime.toDateMidnight())) {
            dateTimeColumnType = ValueType.TIMEOFDAY;
        }
        //interval has time data and start/end are on different days, show full date time
        else {
            dateTimeColumnType = ValueType.DATETIME;
        }
    }
    //interval is date only
    else {
        dateTimeColumnType = ValueType.DATE;
    }

    //Setup the date/time column description
    final ColumnDescription dateTimeColumn;
    switch (dateTimeColumnType) {
    case TIMEOFDAY: {
        dateTimeColumn = new ColumnDescription("time", dateTimeColumnType, "Time");
        break;
    }
    default: {
        dateTimeColumn = new ColumnDescription("date", dateTimeColumnType, "Date");
    }
    }

    final DataTable table = new JsonDataTable();
    table.addColumn(dateTimeColumn);

    //Setup columns in the DataTable 
    final Set<D> columnGroups = groupedAggregations.keySet();
    for (final D columnMapping : columnGroups) {
        final Collection<ColumnDescription> columnDescriptions = this.getColumnDescriptions(columnMapping,
                form);
        table.addColumns(columnDescriptions);
    }

    //Query for all aggregation data in the time range for all groups.  Only the
    //interval and discriminator data is used from the keys.
    final Set<K> keys = createAggregationsQueryKeyset(columnGroups, form);
    final BaseAggregationDao<T, K> baseAggregationDao = this.getBaseAggregationDao();
    final Collection<T> aggregations = baseAggregationDao.getAggregations(startDateTime, endDateTime, keys,
            extractGroupsArray(columnGroups));

    //Organize the results by group and sort them chronologically by adding them to the sorted set
    for (final T aggregation : aggregations) {
        final D discriminator = aggregation.getAggregationDiscriminator();
        final SortedSet<T> results = groupedAggregations.get(discriminator);
        results.add(aggregation);
    }

    //Build Map from discriminator column mapping to result iterator to allow putting results into
    //the correct column AND the correct time slot in the column
    Comparator<? super D> comparator = getDiscriminatorComparator();
    final Map<D, PeekingIterator<T>> groupedAggregationIterators = new TreeMap<D, PeekingIterator<T>>(
            (comparator));
    for (final Entry<D, SortedSet<T>> groupedAggregationEntry : groupedAggregations.entrySet()) {
        groupedAggregationIterators.put(groupedAggregationEntry.getKey(),
                Iterators.peekingIterator(groupedAggregationEntry.getValue().iterator()));
    }

    /*
     * populate the data, filling in blank spots. The full list of interval DateTimes is used to create every row in the
     * query range. Then the iterator
     */
    for (final DateTime rowTime : reportTimes) {
        // create the row
        final TableRow row = new TableRow();

        // add the date to the first cell
        final Value dateTimeValue;
        switch (dateTimeColumnType) {
        case DATE: {
            dateTimeValue = new DateValue(rowTime.getYear(), rowTime.getMonthOfYear() - 1,
                    rowTime.getDayOfMonth());
            break;
        }
        case TIMEOFDAY: {
            dateTimeValue = new TimeOfDayValue(rowTime.getHourOfDay(), rowTime.getMinuteOfHour(), 0);
            break;
        }
        default: {
            dateTimeValue = new DateTimeValue(rowTime.getYear(), rowTime.getMonthOfYear() - 1,
                    rowTime.getDayOfMonth(), rowTime.getHourOfDay(), rowTime.getMinuteOfHour(), 0, 0);
            break;
        }
        }
        row.addCell(new TableCell(dateTimeValue));

        for (final PeekingIterator<T> groupedAggregationIteratorEntry : groupedAggregationIterators.values()) {
            List<Value> values = null;

            if (groupedAggregationIteratorEntry.hasNext()) {
                final T aggr = groupedAggregationIteratorEntry.peek();
                if (rowTime.equals(aggr.getDateTime())) {
                    //Data is for the correct time slot, advance the iterator
                    groupedAggregationIteratorEntry.next();

                    values = createRowValues(aggr, form);
                }
            }

            //Gap in the data, fill it in using a null aggregation
            if (values == null) {
                values = createRowValues(null, form);
            }

            //Add the values to the row
            for (final Value value : values) {
                row.addCell(value);
            }
        }

        table.addRow(row);
    }

    return table;
}

From source file:org.eclipse.xtext.ide.editor.contentassist.IndentationAwareCompletionPrefixProvider.java

protected INode findBestEndToken(INode root, INode candidate, int completionColumn,
        boolean candidateIsEndToken) {
    LinkedList<ILeafNode> sameGrammarElement = Lists.newLinkedList();
    PeekingIterator<ILeafNode> iterator = createReversedLeafIterator(root, candidate, sameGrammarElement);
    if (!iterator.hasNext()) {
        return candidate;
    }//www .j a va 2  s  .  c  o  m
    // collect all candidates that belong to the same offset
    LinkedList<ILeafNode> sameOffset = candidateIsEndToken
            ? collectLeafsWithSameOffset((ILeafNode) candidate, iterator)
            : Lists.newLinkedList();
    // continue until we find a paired leaf with length 0 that is at the correct offset
    EObject grammarElement = tryGetGrammarElementAsRule(
            candidateIsEndToken || sameGrammarElement.isEmpty() ? candidate : sameGrammarElement.getLast());
    ILeafNode result = candidateIsEndToken ? null : (ILeafNode) candidate;
    int sameOffsetSize = sameOffset.size();
    while (iterator.hasNext()) {
        ILeafNode next = iterator.next();
        if (result == null || result.isHidden()) {
            result = next;
        }
        if (next.getTotalLength() == 0) {
            // potential indentation token
            EObject rule = tryGetGrammarElementAsRule(next);
            if (rule != grammarElement) {
                LineAndColumn lineAndColumn = NodeModelUtils.getLineAndColumn(root, next.getTotalOffset());
                if (lineAndColumn.getColumn() <= completionColumn) {
                    return result;
                } else {
                    if (sameOffset.isEmpty()) {
                        if (sameGrammarElement.isEmpty()) {
                            result = null;
                        } else {
                            result = sameGrammarElement.removeLast();
                        }

                    } else {
                        if (sameOffsetSize >= sameOffset.size()) {
                            result = sameOffset.removeLast();
                        } else {
                            sameOffset.removeLast();
                        }
                    }
                }
            } else {
                sameOffset.add(next);
            }
        }
    }
    return candidate;
}

From source file:com.facebook.stats.QuantileDigest.java

/**
 * Gets the values at the specified quantiles +/- maxError. The list of quantiles must be sorted
 * in increasing order, and each value must be in the range [0, 1]
 *///from  w  w w.  j  a va 2 s  .c  o m
public synchronized List<Long> getQuantiles(List<Double> quantiles) {
    checkArgument(Ordering.natural().isOrdered(quantiles), "quantiles must be sorted in increasing order");
    for (double quantile : quantiles) {
        checkArgument(quantile >= 0 && quantile <= 1, "quantile must be between [0,1]");
    }

    final ImmutableList.Builder<Long> builder = ImmutableList.builder();
    final PeekingIterator<Double> iterator = Iterators.peekingIterator(quantiles.iterator());

    postOrderTraversal(root, new Callback() {
        private double sum = 0;

        public boolean process(Node node) {
            sum += node.weightedCount;

            while (iterator.hasNext() && sum > iterator.peek() * weightedCount) {
                iterator.next();

                // we know the max value ever seen, so cap the percentile to provide better error
                // bounds in this case
                long value = Math.min(node.getUpperBound(), max);

                builder.add(value);
            }

            return iterator.hasNext();
        }
    });

    // we finished the traversal without consuming all quantiles. This means the remaining quantiles
    // correspond to the max known value
    while (iterator.hasNext()) {
        builder.add(max);
        iterator.next();
    }

    return builder.build();
}

From source file:com.techcavern.pircbotz.InputParser.java

/**
 * Called when the mode of a channel is set. We process this in
 * order to call the appropriate onOp, onDeop, etc method before
 * finally calling the override-able onMode method.
 * <p>/*from w  w  w. jav  a2 s .  c  o  m*/
 * Note that this method is private and is not intended to appear
 * in the javadoc generated documentation.
 *
 * @param target The channel or nick that the mode operation applies to.
 * @param sourceNick The nick of the user that set the mode.
 * @param sourceLogin The login of the user that set the mode.
 * @param sourceHostname The hostname of the user that set the mode.
 * @param mode The mode that has been set.
 */
public void processMode(User user, String target, String mode) {
    if (configuration.getChannelPrefixes().indexOf(target.charAt(0)) >= 0) {
        // The mode of a channel is being changed.
        Channel channel = bot.getUserChannelDao().getChannel(target);
        channel.parseMode(mode);
        ImmutableList<String> modeParsed = ImmutableList.copyOf(StringUtils.split(mode, ' '));
        PeekingIterator<String> params = Iterators.peekingIterator(modeParsed.iterator());

        //Process modes letter by letter, grabbing paramaters as needed
        boolean adding = true;
        String modeLetters = params.next();
        for (int i = 0; i < modeLetters.length(); i++) {
            char curModeChar = modeLetters.charAt(i);
            if (curModeChar == '+')
                adding = true;
            else if (curModeChar == '-')
                adding = false;
            else {
                ChannelModeHandler modeHandler = configuration.getChannelModeHandlers().get(curModeChar);
                if (modeHandler != null)
                    modeHandler.handleMode(bot, channel, user, params, adding, true);
            }
        }
        configuration.getListenerManager()
                .dispatchEvent(new ModeEvent<PircBotZ>(bot, channel, user, mode, modeParsed));
    } else
        // The mode of a user is being changed.
        configuration.getListenerManager().dispatchEvent(
                new UserModeEvent<PircBotZ>(bot, user, bot.getUserChannelDao().getUser(target), mode));
}

From source file:com.google.gerrit.server.mail.receive.TextParser.java

/**
 * Parses comments from plaintext email.
 *
 * @param email MailMessage as received from the email service.
 * @param comments Comments previously persisted on the change that caused the original
 *     notification email to be sent out. Ordering must be the same as in the outbound email
 * @param changeUrl Canonical change url that points to the change on this Gerrit instance.
 *     Example: https://go-review.googlesource.com/#/c/91570
 * @return List of MailComments parsed from the plaintext part of the email.
 *//*from ww  w  . jav  a 2  s  .c o  m*/
public static List<MailComment> parse(MailMessage email, Collection<Comment> comments, String changeUrl) {
    String body = email.textContent();
    // Replace CR-LF by \n
    body = body.replace("\r\n", "\n");

    List<MailComment> parsedComments = new ArrayList<>();

    // Some email clients (like GMail) use >> for enquoting text when there are
    // inline comments that the users typed. These will then be enquoted by a
    // single >. We sanitize this by unifying it into >. Inline comments typed
    // by the user will not be enquoted.
    //
    // Example:
    // Some comment
    // >> Quoted Text
    // >> Quoted Text
    // > A comment typed in the email directly
    String singleQuotePattern = "\n> ";
    String doubleQuotePattern = "\n>> ";
    if (countOccurrences(body, doubleQuotePattern) > countOccurrences(body, singleQuotePattern)) {
        body = body.replace(doubleQuotePattern, singleQuotePattern);
    }

    PeekingIterator<Comment> iter = Iterators.peekingIterator(comments.iterator());

    String[] lines = body.split("\n");
    MailComment currentComment = null;
    String lastEncounteredFileName = null;
    Comment lastEncounteredComment = null;
    for (String line : lines) {
        if (line.equals(">")) {
            // Skip empty lines
            continue;
        }
        if (line.startsWith("> ")) {
            line = line.substring("> ".length()).trim();
            // This is not a comment, try to advance the file/comment pointers and
            // add previous comment to list if applicable
            if (currentComment != null) {
                if (currentComment.type == MailComment.CommentType.CHANGE_MESSAGE) {
                    currentComment.message = ParserUtil.trimQuotation(currentComment.message);
                }
                if (!Strings.isNullOrEmpty(currentComment.message)) {
                    parsedComments.add(currentComment);
                }
                currentComment = null;
            }

            if (!iter.hasNext()) {
                continue;
            }
            Comment perspectiveComment = iter.peek();
            if (line.equals(ParserUtil.filePath(changeUrl, perspectiveComment))) {
                if (lastEncounteredFileName == null
                        || !lastEncounteredFileName.equals(perspectiveComment.key.filename)) {
                    // This is the annotation of a file
                    lastEncounteredFileName = perspectiveComment.key.filename;
                    lastEncounteredComment = null;
                } else if (perspectiveComment.lineNbr == 0) {
                    // This was originally a file-level comment
                    lastEncounteredComment = perspectiveComment;
                    iter.next();
                }
            } else if (ParserUtil.isCommentUrl(line, changeUrl, perspectiveComment)) {
                lastEncounteredComment = perspectiveComment;
                iter.next();
            }
        } else {
            // This is a comment. Try to append to previous comment if applicable or
            // create a new comment.
            if (currentComment == null) {
                // Start new comment
                currentComment = new MailComment();
                currentComment.message = line;
                if (lastEncounteredComment == null) {
                    if (lastEncounteredFileName == null) {
                        // Change message
                        currentComment.type = MailComment.CommentType.CHANGE_MESSAGE;
                    } else {
                        // File comment not sent in reply to another comment
                        currentComment.type = MailComment.CommentType.FILE_COMMENT;
                        currentComment.fileName = lastEncounteredFileName;
                    }
                } else {
                    // Comment sent in reply to another comment
                    currentComment.inReplyTo = lastEncounteredComment;
                    currentComment.type = MailComment.CommentType.INLINE_COMMENT;
                }
            } else {
                // Attach to previous comment
                currentComment.message += "\n" + line;
            }
        }
    }
    // There is no need to attach the currentComment after this loop as all
    // emails have footers and other enquoted text after the last comment
    // appeared and the last comment will have already been added to the list
    // at this point.

    return parsedComments;
}

From source file:org.zanata.rest.service.TMXStreamingOutput.java

/**
 * Goes through the translation units returned by this object's iterator
 * (see {@link #TMXStreamingOutput(Iterator, TMXExportStrategy)} and writes
 * each one to the OutputStream in TMX form.
 * <p>/*  w w w  .j  a va 2s  .  c  o  m*/
 * Any resources associated with the iterator will be closed before this
 * method exits.
 */
@Override
public void write(OutputStream output) throws IOException, WebApplicationException {
    int tuCount = 0;
    try {
        log.info("streaming output started for: {}", jobName);
        PeekingIterator<T> iter = Iterators.peekingIterator(tuIter);
        // Fetch the first result, so that we can fail fast, before
        // writing any output. This should enable RESTEasy to return an
        // error instead of simply aborting the output stream.
        if (iter.hasNext())
            iter.peek();
        StreamSerializer stream = new StreamSerializer(output);
        stream.writeXMLDeclaration();
        stream.write(new DocType("tmx", "http://www.lisa.org/tmx/tmx14.dtd"));
        stream.writeNewLine();
        Element tmx = new Element("tmx");
        tmx.addAttribute(new Attribute("version", "1.4"));
        startElem(stream, tmx);
        indent(stream);
        writeElem(stream, exportStrategy.buildHeader());
        indent(stream);
        Element body = new Element("body");
        startElem(stream, body);
        while (iter.hasNext()) {
            T tu = iter.next();
            writeIfComplete(stream, tu);
            ++tuCount;
        }
        indent(stream);
        endElem(stream, body);
        endElem(stream, tmx);
        stream.flush();
    } finally {
        close();
        log.info("streaming output stopped for: {}, TU count={}", jobName, tuCount);
    }
}

From source file:co.cask.cdap.metrics.query.MetricsRequestExecutor.java

private void computeProcessBusyness(MetricsRequest metricsRequest, TimeSeriesResponse.Builder builder)
        throws OperationException {
    MetricsScanQuery scanQuery = new MetricsScanQueryBuilder().setContext(metricsRequest.getContextPrefix())
            .setMetric("process.tuples.read").build(metricsRequest.getStartTime(), metricsRequest.getEndTime());
    MetricsScope scope = metricsRequest.getScope();

    PeekingIterator<TimeValue> tuplesReadItor = Iterators
            .peekingIterator(queryTimeSeries(scope, scanQuery, metricsRequest.getInterpolator()));

    scanQuery = new MetricsScanQueryBuilder().setContext(metricsRequest.getContextPrefix())
            .setMetric("process.events.processed")
            .build(metricsRequest.getStartTime(), metricsRequest.getEndTime());

    PeekingIterator<TimeValue> eventsProcessedItor = Iterators
            .peekingIterator(queryTimeSeries(scope, scanQuery, metricsRequest.getInterpolator()));

    for (int i = 0; i < metricsRequest.getCount(); i++) {
        long resultTime = metricsRequest.getStartTime() + i;
        int tupleRead = 0;
        int eventProcessed = 0;
        if (tuplesReadItor.hasNext() && tuplesReadItor.peek().getTime() == resultTime) {
            tupleRead = tuplesReadItor.next().getValue();
        }/* w w w  .j ava  2s  .co m*/
        if (eventsProcessedItor.hasNext() && eventsProcessedItor.peek().getTime() == resultTime) {
            eventProcessed = eventsProcessedItor.next().getValue();
        }
        if (eventProcessed != 0) {
            int busyness = (int) ((float) tupleRead / eventProcessed * 100);
            builder.addData(resultTime, busyness > 100 ? 100 : busyness);
        } else {
            builder.addData(resultTime, 0);
        }
    }
}

From source file:edu.si.sidora.tabularmetadata.TabularMetadataGenerator.java

/**
 * The main entry point to application workflow.
 * /*from   www  .j av a2 s  . com*/
 * @param dataUrl Where to find some tabular data.
 * @param withHeaders whether this tabular data has a header row
 * @return The results of metadata extraction.
 * @throws IOException
 */
public TabularMetadata getMetadata(final URL dataUrl, final Boolean withHeaders) throws IOException {
    try (final CSVParser csvParser = parse(dataUrl, CHARACTER_ENCODING, format)) {
        final PeekingIterator<CSVRecord> parser = peekingIterator(csvParser.iterator());
        // TODO allow a HeaderHeuristic to use more information than the
        // first line of data
        final CSVRecord firstLine = parser.peek();
        final boolean hasHeaders;
        if (withHeaders == null) {
            log.debug("Checking for the existence of headers.");
            for (final String field : firstLine) {
                headerStrategy.accept(field);
            }
            hasHeaders = headerStrategy.results();
            headerStrategy.reset();
        } else {
            hasHeaders = withHeaders;
            log.debug("Accepted information that headers is {}.", hasHeaders);
        }
        final List<String> headerNames;
        if (hasHeaders) {
            headerNames = newArrayList(firstLine);
            log.debug("Found headers: {}", headerNames);
            if (parser.hasNext())
                parser.next();
        } else {
            headerNames = emptyHeaders(firstLine.size());
            log.debug("Found no headers.");
        }
        // scan values up to the limit
        final TabularScanner scanner = new TabularScanner(parser, typeStrategy, rangeStrategy, enumStrategy);
        scanner.scan(scanLimit);

        final List<TypeDeterminingHeuristic<?>> typeStrategies = scanner.getTypeStrategies();
        final List<RangeDeterminingHeuristic<?>> rangeStrategies = scanner.getRangeStrategies();
        final List<EnumeratedValuesHeuristic<?>> enumStrategies = scanner.getEnumStrategies();

        // extract the results for each field
        final List<DataType> columnTypes = typeStrategies.stream().map(Heuristic::results).collect(toList());
        final List<Map<DataType, Range<?>>> minMaxes = rangeStrategies.stream().map(Heuristic::results)
                .collect(toList());
        final List<Map<DataType, Set<String>>> enumValues = enumStrategies.stream().map(Heuristic::results)
                .collect(toList());
        final List<Ratio> valuesSeen = typeStrategies.stream()
                .map(h -> new Ratio(h.valuesSeen() - h.parseableValuesSeen(), h.valuesSeen()))
                .collect(toList());
        return new TabularMetadata(headerNames, valuesSeen, columnTypes, minMaxes, enumValues);
    } catch (final NoSuchElementException e) {
        throw new EmptyDataFileException(dataUrl + " has no data in it!");
    }
}