Example usage for com.google.common.collect Iterables consumingIterable

List of usage examples for com.google.common.collect Iterables consumingIterable

Introduction

In this page you can find the example usage for com.google.common.collect Iterables consumingIterable.

Prototype

public static <T> Iterable<T> consumingIterable(final Iterable<T> iterable) 

Source Link

Document

Returns a view of the supplied iterable that wraps each generated Iterator through Iterators#consumingIterator(Iterator) .

Usage

From source file:org.apache.aurora.scheduler.async.GatingDelayExecutor.java

private synchronized void flush() {
    for (Runnable work : Iterables.consumingIterable(queue)) {
        work.run();
    }
}

From source file:com.palantir.common.collect.IterableView.java

public IterableView<T> consumingIterable() {
    return of(Iterables.consumingIterable(delegate()));
}

From source file:com.b2international.snowowl.core.validation.ValidateRequest.java

private ValidationResult doValidate(BranchContext context, Writer index) throws IOException {
    final String branchPath = context.branchPath();

    ValidationRuleSearchRequestBuilder req = ValidationRequests.rules().prepareSearch();

    if (!CompareUtils.isEmpty(ruleIds)) {
        req.filterByIds(ruleIds);/*from  w ww  .  j  ava  2  s. co m*/
    }

    final ValidationRules rules = req.all().build().execute(context);

    final ValidationThreadPool pool = context.service(ValidationThreadPool.class);
    final BlockingQueue<IssuesToPersist> issuesToPersistQueue = Queues.newLinkedBlockingDeque();
    final List<Promise<Object>> validationPromises = Lists.newArrayList();
    // evaluate selected rules
    for (ValidationRule rule : rules) {
        checkArgument(rule.getCheckType() != null, "CheckType is missing for rule " + rule.getId());
        final ValidationRuleEvaluator evaluator = ValidationRuleEvaluator.Registry.get(rule.getType());
        if (evaluator != null) {
            validationPromises.add(pool.submit(rule.getCheckType(), () -> {
                Stopwatch w = Stopwatch.createStarted();

                try {
                    LOG.info("Executing rule '{}'...", rule.getId());
                    final List<ComponentIdentifier> componentIdentifiers = evaluator.eval(context, rule,
                            ruleParameters);
                    issuesToPersistQueue.offer(new IssuesToPersist(rule.getId(), componentIdentifiers));
                    LOG.info("Execution of rule '{}' successfully completed in '{}'.", rule.getId(), w);
                    // TODO report successfully executed validation rule
                } catch (Exception e) {
                    // TODO report failed validation rule
                    LOG.info("Execution of rule '{}' failed after '{}'.", rule.getId(), w, e);
                }
            }));
        }
    }

    final Set<String> ruleIds = rules.stream().map(ValidationRule::getId).collect(Collectors.toSet());
    final Multimap<String, ComponentIdentifier> whiteListedEntries = fetchWhiteListEntries(context, ruleIds);

    final Promise<List<Object>> promise = Promise.all(validationPromises);

    while (!promise.isDone() || !issuesToPersistQueue.isEmpty()) {
        if (!issuesToPersistQueue.isEmpty()) {
            final Collection<IssuesToPersist> issuesToPersist = newArrayList();
            issuesToPersistQueue.drainTo(issuesToPersist);
            if (!issuesToPersist.isEmpty()) {
                final List<String> rulesToPersist = issuesToPersist.stream().map(itp -> itp.ruleId)
                        .collect(Collectors.toList());
                LOG.info("Persisting issues generated by rules '{}'...", rulesToPersist);
                // persist new issues generated by rules so far, extending them using the Issue Extension API
                int persistedIssues = 0;

                final Multimap<String, ValidationIssue> issuesToExtendWithDetailsByToolingId = HashMultimap
                        .create();
                for (IssuesToPersist ruleIssues : Iterables.consumingIterable(issuesToPersist)) {
                    final String ruleId = ruleIssues.ruleId;
                    final List<ValidationIssue> existingRuleIssues = ValidationRequests.issues().prepareSearch()
                            .all().filterByBranchPath(branchPath).filterByRule(ruleId).build().execute(context)
                            .getItems();

                    final Set<String> issueIdsToDelete = Sets.newHashSet();

                    final Map<ComponentIdentifier, ValidationIssue> existingIsssuesByComponentIdentifier = new HashMap<>();

                    for (ValidationIssue issue : existingRuleIssues) {
                        if (existingIsssuesByComponentIdentifier.containsKey(issue.getAffectedComponent())) {
                            issueIdsToDelete.add(issue.getId());
                        } else {
                            existingIsssuesByComponentIdentifier.put(issue.getAffectedComponent(), issue);
                        }
                    }

                    // remove all processed whitelist entries 
                    final Collection<ComponentIdentifier> ruleWhiteListEntries = whiteListedEntries
                            .removeAll(ruleId);
                    final String toolingId = rules.stream().filter(rule -> ruleId.equals(rule.getId()))
                            .findFirst().get().getToolingId();
                    for (ComponentIdentifier componentIdentifier : ruleIssues.affectedComponentIds) {

                        if (!existingIsssuesByComponentIdentifier.containsKey(componentIdentifier)) {
                            final ValidationIssue validationIssue = new ValidationIssue(
                                    UUID.randomUUID().toString(), ruleId, branchPath, componentIdentifier,
                                    ruleWhiteListEntries.contains(componentIdentifier));

                            issuesToExtendWithDetailsByToolingId.put(toolingId, validationIssue);
                            persistedIssues++;
                        } else {
                            final ValidationIssue issueToCopy = existingIsssuesByComponentIdentifier
                                    .get(componentIdentifier);

                            final ValidationIssue validationIssue = new ValidationIssue(issueToCopy.getId(),
                                    issueToCopy.getRuleId(), issueToCopy.getBranchPath(),
                                    issueToCopy.getAffectedComponent(),
                                    ruleWhiteListEntries.contains(issueToCopy.getAffectedComponent()));
                            validationIssue.setDetails(Maps.newHashMap());

                            issuesToExtendWithDetailsByToolingId.put(toolingId, validationIssue);
                            persistedIssues++;
                            existingIsssuesByComponentIdentifier.remove(componentIdentifier);
                        }
                    }

                    existingRuleIssues.stream()
                            .filter(issue -> existingIsssuesByComponentIdentifier
                                    .containsKey(issue.getAffectedComponent()))
                            .forEach(issue -> issueIdsToDelete.add(issue.getId()));

                    if (!issueIdsToDelete.isEmpty()) {
                        index.removeAll(Collections.singletonMap(ValidationIssue.class, issueIdsToDelete));
                    }

                }

                for (String toolingId : issuesToExtendWithDetailsByToolingId.keySet()) {
                    final ValidationIssueDetailExtension extensions = ValidationIssueDetailExtensionProvider.INSTANCE
                            .getExtensions(toolingId);
                    final Collection<ValidationIssue> issues = issuesToExtendWithDetailsByToolingId
                            .removeAll(toolingId);
                    extensions.extendIssues(context, issues);
                    for (ValidationIssue issue : issues) {
                        index.put(issue.getId(), issue);
                    }
                }
                index.commit();
                LOG.info("Persisted '{}' issues generated by rules '{}'.", persistedIssues, rulesToPersist);
            }
        }

        try {
            Thread.sleep(1000L);
        } catch (InterruptedException e) {
            throw new SnowowlRuntimeException(e);
        }
    }

    // TODO return ValidationResult object with status and new issue IDs as set
    return new ValidationResult(context.id(), context.branchPath());
}

From source file:com.b2international.snowowl.snomed.importer.rf2.refset.RefSetMemberLookup.java

public void registerNewMemberStorageKeys() {
    // Consume each element while it is being registered
    if (newMembers != null) {
        for (final SnomedRefSetMember newMember : Iterables.consumingIterable(newMembers.values())) {
            registerMemberStorageKey(newMember.getUuid(), CDOIDUtil.getLong(newMember.cdoID()));
        }/*from  w ww  . ja  va2s. co m*/
        newMembers = null;
    }
}

From source file:com.continuuity.weave.internal.logging.KafkaAppender.java

private ListenableFuture<Integer> publishLogs() {
    // If the publisher is not available, simply returns a completed future.
    PreparePublish publisher = KafkaAppender.this.publisher.get();
    if (publisher == null) {
        return Futures.immediateFuture(0);
    }/*from  w w w.j a va2  s.  co  m*/

    int count = 0;
    for (String json : Iterables.consumingIterable(buffer)) {
        publisher.add(Charsets.UTF_8.encode(json), 0);
        count++;
    }
    // Nothing to publish, simply returns a completed future.
    if (count == 0) {
        return Futures.immediateFuture(0);
    }

    bufferedSize.set(0);
    final int finalCount = count;
    return Futures.transform(publisher.publish(), new Function<Object, Integer>() {
        @Override
        public Integer apply(Object input) {
            return finalCount;
        }
    });
}

From source file:co.cask.cdap.test.internal.DefaultApplicationManager.java

@Override
public void stopAll() {
    try {// w ww. j a  v a2 s .c  o  m
        for (Id.Program programId : Iterables.consumingIterable(runningProcesses)) {
            // have to do a check, since mapreduce jobs could stop by themselves earlier, and appFabricServer.stop will
            // throw error when you stop something that is not running.
            if (isRunning(programId)) {
                appFabricClient.stopProgram(application.getNamespaceId(), programId.getApplicationId(),
                        programId.getId(), programId.getType());
            }
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.pircbotx.ReplayServer.java

public static void replay(Configuration.Builder config, InputStream input, String title) throws Exception {
    log.info("---Replaying {}---", title);
    StopWatch timer = new StopWatch();
    timer.start();/*  ww w.j a va 2 s .c o m*/

    //Wrap listener manager with ours that siphons off events
    final Queue<Event> eventQueue = Lists.newLinkedList();
    WrapperListenerManager newManager = new WrapperListenerManager(config.getListenerManager(), eventQueue);
    config.setListenerManager(newManager);
    config.addListener(new ReplayListener());

    final LinkedList<String> outputQueue = Lists.newLinkedList();
    ReplayPircBotX bot = new ReplayPircBotX(config.buildConfiguration(), outputQueue);

    BufferedReader fileInput = new BufferedReader(new InputStreamReader(input));
    boolean skippedHeader = false;
    while (true) {
        String lineRaw = fileInput.readLine();
        if (bot.isClosed() && StringUtils.isNotBlank(lineRaw)) {
            throw new RuntimeException("bot is closed but file still has line " + lineRaw);
        } else if (!bot.isClosed() && StringUtils.isBlank(lineRaw)) {
            throw new RuntimeException("bot is not closed but file doesn't have any more lines");
        } else if (bot.isClosed() && StringUtils.isBlank(lineRaw)) {
            log.debug("(done) Bot is closed and file doesn't have any more lines");
            break;
        }

        log.debug("(line) " + lineRaw);
        String[] lineParts = StringUtils.split(lineRaw, " ", 2);
        String command = lineParts[0];
        String line = lineParts[1];

        //For now skip the info lines PircBotX is supposed to send on connect
        //They are only sent when connect() is called which requires multithreading
        if (!skippedHeader) {
            if (command.equals("pircbotx.output"))
                continue;
            else if (command.equals("pircbotx.input")) {
                log.debug("Finished skipping header");
                skippedHeader = true;
            } else
                throw new RuntimeException("Unknown line " + lineRaw);
        }

        if (command.equals("pircbotx.input")) {
            bot.getInputParser().handleLine(line);
        } else if (command.equals("pircbotx.output")) {
            String lastOutput = outputQueue.isEmpty() ? null : outputQueue.pop();
            if (StringUtils.startsWith(line, "JOIN")) {
                log.debug("Skipping JOIN output, server should send its own JOIN");
            } else if (StringUtils.startsWith(line, "QUIT")) {
                log.debug("Skipping QUIT output, server should send its own QUIT");
            } else if (!line.equals(lastOutput)) {
                log.error("Expected last output: " + line);
                log.error("Given last output: " + lastOutput);
                for (String curOutput : outputQueue) {
                    log.error("Queued output: " + curOutput);
                }
                throw new RuntimeException("Failed to verify output (see log)");
            }
        } else {
            throw new RuntimeException("Unknown line " + lineRaw);
        }

        for (Event curEvent : Iterables.consumingIterable(eventQueue))
            log.debug("(events) " + curEvent);

        log.debug("");
    }

    timer.stop();
    log.debug("---Replay successful in {}---",
            DurationFormatUtils.formatDuration(timer.getTime(), "mm'min'ss'sec'SSS'ms'"));
}

From source file:org.apache.twill.internal.logging.KafkaAppender.java

/**
 * Publishes buffered logs to Kafka, within the given timeout.
 *
 * @return Number of logs published./*  w  w  w.  j a v a  2  s.c o  m*/
 * @throws TimeoutException If timeout reached before publish completed.
 */
private int publishLogs(long timeout, TimeUnit timeoutUnit) throws TimeoutException {
    List<ByteBuffer> logs = Lists.newArrayListWithExpectedSize(bufferedSize.get());

    for (String json : Iterables.consumingIterable(buffer)) {
        logs.add(Charsets.UTF_8.encode(json));
    }

    long backOffTime = timeoutUnit.toNanos(timeout) / 10;
    if (backOffTime <= 0) {
        backOffTime = 1;
    }

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long publishTimeout = timeout;

        do {
            try {
                int published = doPublishLogs(logs).get(publishTimeout, timeoutUnit);
                bufferedSize.addAndGet(-published);
                return published;
            } catch (ExecutionException e) {
                addError("Failed to publish logs to Kafka.", e);
                TimeUnit.NANOSECONDS.sleep(backOffTime);
                publishTimeout -= stopwatch.elapsedTime(timeoutUnit);
                stopwatch.reset();
                stopwatch.start();
            }
        } while (publishTimeout > 0);
    } catch (InterruptedException e) {
        addWarn("Logs publish to Kafka interrupted.", e);
    }
    return 0;
}

From source file:com.b2international.index.es.EsDocumentWriter.java

@Override
public void commit() throws IOException {
    if (indexOperations.isEmpty() && deleteOperations.isEmpty() && updateOperations.isEmpty()) {
        return;//from www.  j a  va2  s  .c  o  m
    }

    final Set<DocumentMapping> mappingsToRefresh = Collections.synchronizedSet(newHashSet());
    final EsClient client = admin.client();
    // apply bulk updates first
    final ListeningExecutorService executor;
    if (updateOperations.size() > 1) {
        executor = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(Math.min(4, updateOperations.size())));
    } else {
        executor = MoreExecutors.newDirectExecutorService();
    }
    final List<ListenableFuture<?>> updateFutures = newArrayList();
    for (BulkUpdate<?> update : updateOperations) {
        updateFutures.add(executor.submit(() -> bulkUpdate(client, update, mappingsToRefresh)));
    }
    try {
        executor.shutdown();
        Futures.allAsList(updateFutures).get();
        executor.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException | ExecutionException e) {
        throw new IndexException("Couldn't execute bulk updates", e);
    }

    // then bulk indexes/deletes
    if (!indexOperations.isEmpty() || !deleteOperations.isEmpty()) {
        final BulkProcessor processor = client.bulk(new BulkProcessor.Listener() {
            @Override
            public void beforeBulk(long executionId, BulkRequest request) {
                admin.log().debug("Sending bulk request {}", request.numberOfActions());
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                admin.log().error("Failed bulk request", failure);
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                admin.log().debug("Successfully processed bulk request ({}) in {}.", request.numberOfActions(),
                        response.getTook());
                if (response.hasFailures()) {
                    for (BulkItemResponse itemResponse : response.getItems()) {
                        checkState(!itemResponse.isFailed(), "Failed to commit bulk request in index '%s', %s",
                                admin.name(), itemResponse.getFailureMessage());
                    }
                }
            }
        }).setConcurrentRequests(getConcurrencyLevel()).setBulkActions(10_000)
                .setBulkSize(new ByteSizeValue(10L, ByteSizeUnit.MB)).build();

        for (Class<?> type : ImmutableSet.copyOf(indexOperations.rowKeySet())) {
            final Map<String, Object> indexOperationsForType = indexOperations.row(type);

            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (Entry<String, Object> entry : Iterables.consumingIterable(indexOperationsForType.entrySet())) {
                final String id = entry.getKey();
                if (!deleteOperations.containsValue(id)) {
                    final Object obj = entry.getValue();
                    final Set<String> hashedFields = mapping.getHashedFields();
                    final byte[] _source;

                    if (!hashedFields.isEmpty()) {
                        final ObjectNode objNode = mapper.valueToTree(obj);
                        final ObjectNode hashedNode = mapper.createObjectNode();

                        // Preserve property order, share references with objNode
                        for (String hashedField : hashedFields) {
                            JsonNode value = objNode.get(hashedField);
                            if (value != null && !value.isNull()) {
                                hashedNode.set(hashedField, value);
                            }
                        }

                        final byte[] hashedBytes = mapper.writeValueAsBytes(hashedNode);
                        final HashCode hashCode = Hashing.sha1().hashBytes(hashedBytes);

                        // Inject the result as an extra field into the to-be-indexed JSON content
                        objNode.put(DocumentMapping._HASH, hashCode.toString());
                        _source = mapper.writeValueAsBytes(objNode);

                    } else {
                        _source = mapper.writeValueAsBytes(obj);
                    }

                    processor.add(new IndexRequest(typeIndex, typeString, id).opType(OpType.INDEX)
                            .source(_source, XContentType.JSON));
                }
            }

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        // Remaining delete operations can be executed on their own
        for (Class<?> type : ImmutableSet.copyOf(deleteOperations.keySet())) {
            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        try {
            processor.awaitClose(5, TimeUnit.MINUTES);
        } catch (InterruptedException e) {
            throw new IndexException("Interrupted bulk processing part of the commit", e);
        }
    }

    // refresh the index if there were only updates
    admin.refresh(mappingsToRefresh);
}

From source file:com.twitter.aurora.scheduler.state.StateManagerImpl.java

private static Map<String, Integer> getNameMappedPorts(Set<String> portNames, Set<Integer> allocatedPorts) {

    Preconditions.checkNotNull(portNames);

    // Expand ports.
    Map<String, Integer> ports = Maps.newHashMap();
    Set<Integer> portsRemaining = Sets.newHashSet(allocatedPorts);
    Iterator<Integer> portConsumer = Iterables.consumingIterable(portsRemaining).iterator();

    for (String portName : portNames) {
        Preconditions.checkArgument(portConsumer.hasNext(),
                "Allocated ports %s were not sufficient to expand task.", allocatedPorts);
        int portNumber = portConsumer.next();
        ports.put(portName, portNumber);
    }//from w  ww  . j  a v a2  s.  c om

    if (!portsRemaining.isEmpty()) {
        LOG.warning("Not all allocated ports were used to map ports!");
    }

    return ports;
}