Example usage for com.google.common.collect Iterators all

List of usage examples for com.google.common.collect Iterators all

Introduction

In this page you can find the example usage for com.google.common.collect Iterators all.

Prototype

public static <T> boolean all(Iterator<T> iterator, Predicate<? super T> predicate) 

Source Link

Document

Returns true if every element returned by iterator satisfies the given predicate.

Usage

From source file:org.eclipse.sirius.diagram.ui.business.api.query.ConnectionQuery.java

/**
 * Return the constraint of the connection as list of RelativeBendpoint only
 * if the constraint is a list of absolute bendpoints.
 * //from  w w w .j  a  v  a 2s.co  m
 * @return an optional list of {@link AbsoluteBendpoint}
 */
public Option<List<AbsoluteBendpoint>> getAbsoluteBendpointsConstraint() {

    Object cons = connection.getRoutingConstraint();
    if (cons instanceof List) {
        List<?> constraintsList = (List<?>) cons;
        if (Iterators.all(constraintsList.iterator(), Predicates.instanceOf(AbsoluteBendpoint.class))) {
            List<AbsoluteBendpoint> result = Lists.newLinkedList();
            for (Object object : constraintsList) {
                result.add((AbsoluteBendpoint) object);
            }
            return Options.newSome(result);
        }
    }
    return Options.newNone();
}

From source file:org.eclipse.sirius.diagram.ui.business.api.query.ConnectionQuery.java

/**
 * Return the constraint of the connection as list of RelativeBendpoint only
 * if the constraint is a list of relative bendpoints.
 * //from  w  ww.j a  v  a 2  s  . c o  m
 * @return an optional list of {@link RelativeBendpoint}
 */
public Option<List<RelativeBendpoint>> getRelativeBendpointsConstraint() {

    Object cons = connection.getRoutingConstraint();
    if (cons instanceof List) {
        List<?> constraintsList = (List<?>) cons;
        if (Iterators.all(constraintsList.iterator(), Predicates.instanceOf(RelativeBendpoint.class))) {
            List<RelativeBendpoint> result = Lists.newLinkedList();
            for (Object object : constraintsList) {
                result.add((RelativeBendpoint) object);
            }
            return Options.newSome(result);
        }
    }
    return Options.newNone();
}

From source file:org.apache.cassandra.io.sstable.format.SSTableReader.java

/**
 * Calculate approximate key count.//from w  w  w .j a v a  2  s .  c  om
 * If cardinality estimator is available on all given sstables, then this method use them to estimate
 * key count.
 * If not, then this uses index summaries.
 *
 * @param sstables SSTables to calculate key count
 * @return estimated key count
 */
public static long getApproximateKeyCount(Collection<SSTableReader> sstables) {
    long count = -1;

    // check if cardinality estimator is available for all SSTables
    boolean cardinalityAvailable = !sstables.isEmpty()
            && Iterators.all(sstables.iterator(), new Predicate<SSTableReader>() {
                public boolean apply(SSTableReader sstable) {
                    return sstable.descriptor.version.hasNewStatsFile();
                }
            });

    // if it is, load them to estimate key count
    if (cardinalityAvailable) {
        boolean failed = false;
        ICardinality cardinality = null;
        for (SSTableReader sstable : sstables) {
            if (sstable.openReason == OpenReason.EARLY)
                continue;

            try {
                CompactionMetadata metadata = (CompactionMetadata) sstable.descriptor.getMetadataSerializer()
                        .deserialize(sstable.descriptor, MetadataType.COMPACTION);
                // If we can't load the CompactionMetadata, we are forced to estimate the keys using the index
                // summary. (CASSANDRA-10676)
                if (metadata == null) {
                    logger.warn("Reading cardinality from Statistics.db failed for {}", sstable.getFilename());
                    failed = true;
                    break;
                }

                if (cardinality == null)
                    cardinality = metadata.cardinalityEstimator;
                else
                    cardinality = cardinality.merge(metadata.cardinalityEstimator);
            } catch (IOException e) {
                logger.warn("Reading cardinality from Statistics.db failed.", e);
                failed = true;
                break;
            } catch (CardinalityMergeException e) {
                logger.warn("Cardinality merge failed.", e);
                failed = true;
                break;
            }
        }
        if (cardinality != null && !failed)
            count = cardinality.cardinality();
    }

    // if something went wrong above or cardinality is not available, calculate using index summary
    if (count < 0) {
        for (SSTableReader sstable : sstables)
            count += sstable.estimatedKeys();
    }
    return count;
}

From source file:org.eclipse.viatra.query.patternlanguage.emf.validation.PatternLanguageValidator.java

/**
 * Checks if an aggregator expression has the correct number (0 or 1) aggregate variables.
 * /*from w  w  w  .j  a  v a2 s. c  om*/
 * @param expression
 *            the aggregator expression
 * @since 1.4
 */
@Check
public void checkAggregatorExpression(AggregatedValue expression) {
    JvmDeclaredType aggregator = expression.getAggregator();
    final Class<IAggregatorFactory> clazz = IAggregatorFactory.class;
    if (aggregator != null && !aggregator.eIsProxy()) {
        if (typeReferences.is(aggregator, clazz)) {
            return;
        }
        Iterator<JvmTypeReference> it = aggregator.getSuperTypes().iterator();
        if (Iterators.all(it, input -> input == null || input.eIsProxy() || !typeReferences.is(input, clazz))) {
            error(String.format("%s is not an aggregator definition.", aggregator.getSimpleName()),
                    PatternLanguagePackage.Literals.AGGREGATED_VALUE__AGGREGATOR,
                    IssueCodes.INVALID_AGGREGATOR);
            return;
        }
        List<VariableReference> references = AggregatorUtil.getAllAggregatorVariables(expression);
        if (AggregatorUtil.mustHaveAggregatorVariables(expression)) {
            if (references.isEmpty()) {
                error(String.format(MISSING_AGGREGATE_MESSAGE, aggregator.getSimpleName()), expression,
                        PatternLanguagePackage.Literals.AGGREGATED_VALUE__CALL,
                        IssueCodes.INVALID_AGGREGATOR_PARAMETER);
            }
            if (references.size() > 1) {
                for (VariableReference reference : references) {
                    error(String.format(EXACTLY_ONE_AGGREGATE_MESSAGE, aggregator.getSimpleName()), reference,
                            null, IssueCodes.INVALID_AGGREGATOR_PARAMETER);
                }
            }
        } else {
            for (VariableReference reference : references) {
                error(String.format(UNEXPECTED_AGGREGATE_MESSAGE, reference.getVar(),
                        aggregator.getSimpleName()), reference, null, IssueCodes.INVALID_AGGREGATOR_PARAMETER);
            }
        }

    }
}

From source file:edu.ucsb.eucalyptus.cloud.entities.VolumeToken.java

/**
 * Invalidate the export for this token for the given ip and iqn
 * Does not remove any info, just sets invalidate
 * @param ip/*from   w  w  w.j av  a2 s  .c o  m*/
 * @param iqn
 */
public void invalidateExport(final String ip, final String iqn) throws EucalyptusCloudException {
    Function<VolumeToken, VolumeToken> deactivateExport = new Function<VolumeToken, VolumeToken>() {
        @Override
        public VolumeToken apply(VolumeToken tok) {
            VolumeToken tokenEntity = Entities.merge(tok);
            try {
                for (VolumeExportRecord rec : tokenEntity.getExportRecords()) {
                    if (rec.getIsActive() && rec.getHostIp().equals(ip) && rec.getHostIqn().equals(iqn)) {
                        rec.setIsActive(Boolean.FALSE);
                        break;
                    }
                }

                Predicate<VolumeExportRecord> notActive = new Predicate<VolumeExportRecord>() {
                    @Override
                    public boolean apply(VolumeExportRecord record) {
                        return !record.getIsActive();
                    }
                };

                //If no records are active, then invalidate the token
                if (Iterators.all(tokenEntity.getExportRecords().iterator(), notActive)) {
                    //Invalidate the token as well.
                    tok.setIsValid(Boolean.FALSE);
                }
                Entities.flush(tokenEntity);
                return tokenEntity;
            } catch (Exception e) {
                LOG.error("Could not invalidate export record for volume " + tok.getVolume().getVolumeId()
                        + " token " + tok.getToken() + " ip " + ip + " iqn " + iqn, e);
            }
            return null;
        }
    };

    try {
        if (Entities.asTransaction(VolumeExportRecord.class, deactivateExport).apply(this) == null) {
            throw new Exception("Failed to invalidate export, got null result from deactivation");
        }
    } catch (Exception e) {
        LOG.error("Failed to invalidate export: " + e.getMessage(), e);
        throw new EucalyptusCloudException("Failed to invalidate export");
    }
}

From source file:com.datatorrent.contrib.kafka.AbstractPartitionableKafkaInputOperator.java

/**
 * Check if all the statistics within the windows break the upper bound hard limit in msgs/s or bytes/s
 *
 * @param kmss//from   www  . j a va2 s.c  om
 * @return True if all the statistics within the windows break the upper bound hard limit in msgs/s or bytes/s.
 */
private boolean breakHardConstraint(List<KafkaMeterStats> kmss) {
    // Only care about the KafkaMeterStats

    // if there is no kafka meter stats at all, don't repartition
    if (kmss == null || kmss.size() == 0) {
        return false;
    }
    // if all the stats within the window have msgs/s above the upper bound threshold (hard limit)
    boolean needRP = Iterators.all(kmss.iterator(), new Predicate<KafkaMeterStats>() {
        @Override
        public boolean apply(KafkaMeterStats kms) {
            // If there are more than 1 kafka partition and the total msg/s reach the limit
            return kms.partitionStats.size() > 1 && kms.totalMsgPerSec > msgRateUpperBound;
        }
    });

    // or all the stats within the window have bytes/s above the upper bound threshold (hard limit)
    needRP = needRP || Iterators.all(kmss.iterator(), new Predicate<KafkaMeterStats>() {
        @Override
        public boolean apply(KafkaMeterStats kms) {
            //If there are more than 1 kafka partition and the total bytes/s reach the limit
            return kms.partitionStats.size() > 1 && kms.totalBytesPerSec > byteRateUpperBound;
        }
    });

    return needRP;

}

From source file:org.eclipse.emf.compare.ide.ui.internal.structuremergeviewer.EMFCompareStructureMergeViewer.java

private boolean hasOnlyPseudoConflicts(List<Diff> differences) {
    return Iterators.all(differences.iterator(), EMFComparePredicates.hasConflict(ConflictKind.PSEUDO));
}