Example usage for org.apache.commons.collections PredicateUtils notNullPredicate

List of usage examples for org.apache.commons.collections PredicateUtils notNullPredicate

Introduction

In this page you can find the example usage for org.apache.commons.collections PredicateUtils notNullPredicate.

Prototype

public static Predicate notNullPredicate() 

Source Link

Document

Gets a Predicate that checks if the input object passed in is not null.

Usage

From source file:com.jxt.web.service.AgentInfoServiceImpl.java

@Override
public Set<AgentInfo> getAgentsByApplicationNameWithoutStatus(String applicationName, long timestamp) {
    if (applicationName == null) {
        throw new NullPointerException("applicationName must not be null");
    }//w  w w . j  ava 2  s.co  m
    if (timestamp < 0) {
        throw new IllegalArgumentException("timestamp must not be less than 0");
    }

    List<String> agentIds = this.applicationIndexDao.selectAgentIds(applicationName);
    List<AgentInfo> agentInfos = this.agentInfoDao.getAgentInfos(agentIds, timestamp);
    CollectionUtils.filter(agentInfos, PredicateUtils.notNullPredicate());
    if (CollectionUtils.isEmpty(agentInfos)) {
        return Collections.emptySet();
    }
    return new HashSet<>(agentInfos);
}

From source file:io.wcm.config.core.persistence.impl.ToolsConfigPagePersistenceProvider.java

@SuppressWarnings("unchecked")
private Iterator<Resource> getResourceInheritanceChainInternal(final String configName,
        final Iterator<String> paths, final ResourceResolver resourceResolver) {

    // find all matching items among all configured paths
    Iterator<Resource> matchingResources = IteratorUtils.transformedIterator(paths, new Transformer() {
        @Override/*from w w w  .  jav  a2  s.c om*/
        public Object transform(Object input) {
            String configPath = buildResourcePath((String) input, configName);
            Resource resource = resourceResolver.getResource(configPath);
            if (resource != null) {
                log.trace("+ Found matching config resource for inheritance chain: {}", configPath);
            } else {
                log.trace("- No matching config resource for inheritance chain: {}", configPath);
            }
            return resource;
        }
    });
    Iterator<Resource> result = IteratorUtils.filteredIterator(matchingResources,
            PredicateUtils.notNullPredicate());
    if (result.hasNext()) {
        return result;
    } else {
        return null;
    }
}

From source file:eu.europa.ec.fisheries.uvms.rules.service.business.AbstractFact.java

/**
 * Checks if one of the String... array elements exists in the idTypes list.
 *
 * @param idTypes/*from  w  ww .j  a  v  a2s.c  o  m*/
 * @param values
 * @return
 */
public boolean schemeIdContainsAny(List<IdType> idTypes, String... values) {
    if (values == null || values.length == 0 || CollectionUtils.isEmpty(idTypes)) {
        return true;
    }

    idTypes = new ArrayList<>(idTypes);
    CollectionUtils.filter(idTypes, PredicateUtils.notNullPredicate());

    for (String val : values) {
        for (IdType IdType : idTypes) {
            if (val.equals(IdType.getSchemeId())) {
                return false;
            }
        }
    }
    return true;
}

From source file:eu.europa.ec.fisheries.uvms.rules.service.business.AbstractFact.java

public boolean isAllSchemeIdsPresent(List<IdType> idTypes) {
    if (CollectionUtils.isEmpty(idTypes)) {
        return false;
    }/* w w  w.  j  a  v  a2 s  .c o m*/

    idTypes = new ArrayList<>(idTypes);
    CollectionUtils.filter(idTypes, PredicateUtils.notNullPredicate());

    for (IdType idType : idTypes) {
        if (!isSchemeIdPresent(idType)) {
            return true;
        }
    }

    return false;
}

From source file:io.wcm.config.core.persistence.impl.ToolsConfigPagePersistenceProvider.java

@SuppressWarnings("unchecked")
private Iterator<String> findConfigRefs(Resource startResource) {
    // collect all context path resources without config ref, and expand to config page path
    Iterator<ContextResource> contextResources = contextPathStrategy.findContextResources(startResource);
    return new FilterIterator(new TransformIterator(contextResources, new Transformer() {
        @Override/*from   w ww.  j av a2  s  .co  m*/
        public Object transform(Object input) {
            ContextResource contextResource = (ContextResource) input;
            if (contextResource.getConfigRef() == null) {
                String configPath = getConfigPagePath(contextResource.getResource().getPath());
                log.trace("+ Found reference for context path {}: {}", contextResource.getResource().getPath(),
                        configPath);
                return configPath;
            }
            return null;
        }
    }), PredicateUtils.notNullPredicate());
}

From source file:io.wcm.caconfig.extensions.persistence.impl.ToolsConfigPagePersistenceStrategy.java

/**
 * Searches the resource hierarchy upwards for all config references and returns them.
 *///from w  ww.  ja  v  a  2s.c om
@SuppressWarnings("unchecked")
private Iterator<String> findConfigRefs(@NotNull final Resource startResource,
        @NotNull final Collection<String> bucketNames) {

    // collect all context path resources (but filter out those without config reference)
    final Iterator<ContextResource> contextResources = new FilterIterator(
            contextPathStrategy.findContextResources(startResource), new Predicate() {
                @Override
                public boolean evaluate(Object object) {
                    ContextResource contextResource = (ContextResource) object;
                    return StringUtils.isNotBlank(contextResource.getConfigRef());
                }
            });

    // get config resource path for each context resource, filter out items where not reference could be resolved
    final Iterator<String> configPaths = new TransformIterator(contextResources, new Transformer() {
        @Override
        public Object transform(Object input) {
            final ContextResource contextResource = (ContextResource) input;
            String val = checkPath(contextResource, contextResource.getConfigRef(), bucketNames);
            if (val != null) {
                log.trace("+ Found reference for context path {}: {}", contextResource.getResource().getPath(),
                        val);
            }
            return val;
        }
    });
    return new FilterIterator(configPaths, PredicateUtils.notNullPredicate());
}

From source file:com.bigdata.dastor.db.CompactionManager.java

/**
 * For internal use and testing only.  The rest of the system should go through the submit* methods,
 * which are properly serialized.//from w w  w. j  a va  2s . co m
 */
int doCompaction(ColumnFamilyStore cfs, Collection<SSTableReader> sstables, int gcBefore) throws IOException {
    // The collection of sstables passed may be empty (but not null); even if
    // it is not empty, it may compact down to nothing if all rows are deleted.
    Table table = cfs.getTable();
    if (DatabaseDescriptor.isSnapshotBeforeCompaction())
        table.snapshot("compact-" + cfs.columnFamily_);
    logger.info("Compacting [" + StringUtils.join(sstables, ",") + "]");
    String compactionFileLocation = table.getDataFileLocation(cfs.getExpectedCompactedFileSize(sstables));
    // If the compaction file path is null that means we have no space left for this compaction.
    // try again w/o the largest one.
    List<SSTableReader> smallerSSTables = new ArrayList<SSTableReader>(sstables);
    while (compactionFileLocation == null && smallerSSTables.size() > 1) {
        logger.warn(
                "insufficient space to compact all requested files " + StringUtils.join(smallerSSTables, ", "));
        smallerSSTables.remove(cfs.getMaxSizeFile(smallerSSTables));
        compactionFileLocation = table.getDataFileLocation(cfs.getExpectedCompactedFileSize(smallerSSTables));
    }
    if (compactionFileLocation == null) {
        logger.error("insufficient space to compact even the two smallest files, aborting");
        return 0;
    }
    sstables = smallerSSTables;

    // new sstables from flush can be added during a compaction, but only the compaction can remove them,
    // so in our single-threaded compaction world this is a valid way of determining if we're compacting
    // all the sstables (that existed when we started)
    boolean major = cfs.isCompleteSSTables(sstables);

    long startTime = System.currentTimeMillis();
    long totalkeysWritten = 0;

    // TODO the int cast here is potentially buggy
    int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
            (int) SSTableReader.getApproximateKeyCount(sstables));
    if (logger.isDebugEnabled())
        logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);

    SSTableWriter writer;
    CompactionIterator ci = new CompactionIterator(cfs, sstables, gcBefore, major); // retain a handle so we can call close()
    Iterator<CompactionIterator.CompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate());
    getExecutor(cfs).beginCompaction(cfs, ci);

    try {
        if (!nni.hasNext()) {
            // don't mark compacted in the finally block, since if there _is_ nondeleted data,
            // we need to sync it (via closeAndOpen) first, so there is no period during which
            // a crash could cause data loss.
            cfs.markCompacted(sstables);
            return 0;
        }

        String newFilename = new File(compactionFileLocation, cfs.getTempSSTableFileName()).getAbsolutePath();
        writer = new SSTableWriter(newFilename, expectedBloomFilterSize, StorageService.getPartitioner());
        while (nni.hasNext()) {
            CompactionIterator.CompactedRow row = nni.next();
            long prevpos = writer.getFilePointer();

            writer.append(row.key, row.headerBuffer, row.buffer);
            totalkeysWritten++;

            long rowsize = writer.getFilePointer() - prevpos;
            if (rowsize > DatabaseDescriptor.getRowWarningThreshold())
                logger.warn("Large row " + row.key.key + " in " + cfs.getColumnFamilyName() + " " + rowsize
                        + " bytes");
            cfs.addToCompactedRowStats(rowsize);
        }
    } finally {
        ci.close();
    }

    SSTableReader ssTable = writer.closeAndOpenReader();
    cfs.replaceCompactedSSTables(sstables, Arrays.asList(ssTable));
    submitMinorIfNeeded(cfs);

    String format = "Compacted to %s.  %d/%d bytes for %d keys.  Time: %dms.";
    long dTime = System.currentTimeMillis() - startTime;
    logger.info(String.format(format, writer.getFilename(), SSTable.getTotalBytes(sstables), ssTable.length(),
            totalkeysWritten, dTime));
    return sstables.size();
}

From source file:io.wcm.caconfig.extensions.persistence.impl.ToolsConfigPagePersistenceStrategy.java

@SuppressWarnings("unchecked")
private Iterator<Resource> getResourceInheritanceChainInternal(final Collection<String> bucketNames,
        final String configName, final Iterator<String> paths, final ResourceResolver resourceResolver) {

    // find all matching items among all configured paths
    Iterator<Resource> matchingResources = IteratorUtils.transformedIterator(paths, new Transformer() {

        @Override//ww w.j  a va 2  s. c  o m
        public Object transform(Object input) {
            String path = (String) input;
            for (String bucketName : bucketNames) {
                final String name = bucketName + "/" + configName;
                final String configPath = buildResourcePath(path, name);
                Resource resource = resourceResolver.getResource(configPath);
                if (resource != null) {
                    log.trace("+ Found matching config resource for inheritance chain: {}", configPath);
                    return resource;
                } else {
                    log.trace("- No matching config resource for inheritance chain: {}", configPath);
                }
            }
            return null;
        }
    });
    Iterator<Resource> result = IteratorUtils.filteredIterator(matchingResources,
            PredicateUtils.notNullPredicate());
    if (result.hasNext()) {
        return result;
    }
    return null;
}

From source file:com.bigdata.dastor.db.CompactionManager.java

/**
 * This function is used to do the anti compaction process , it spits out the file which has keys that belong to a given range
 * If the target is not specified it spits out the file as a compacted file with the unecessary ranges wiped out.
 *
 * @param cfs/*  w  ww. jav a2  s  .  c  om*/
 * @param sstables
 * @param ranges
 * @param target
 * @return
 * @throws java.io.IOException
 */
private List<SSTableReader> doAntiCompaction(ColumnFamilyStore cfs, Collection<SSTableReader> sstables,
        Collection<Range> ranges, InetAddress target) throws IOException {
    Table table = cfs.getTable();
    logger.info("AntiCompacting [" + StringUtils.join(sstables, ",") + "]");
    // Calculate the expected compacted filesize
    long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(sstables) / 2;
    String compactionFileLocation = table.getDataFileLocation(expectedRangeFileSize);
    if (compactionFileLocation == null) {
        throw new UnsupportedOperationException("disk full");
    }
    if (target != null) {
        // compacting for streaming: send to subdirectory
        compactionFileLocation = compactionFileLocation + File.separator + DatabaseDescriptor.STREAMING_SUBDIR;
    }
    List<SSTableReader> results = new ArrayList<SSTableReader>();

    long startTime = System.currentTimeMillis();
    long totalkeysWritten = 0;

    int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
            (int) (SSTableReader.getApproximateKeyCount(sstables) / 2));
    if (logger.isDebugEnabled())
        logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);

    SSTableWriter writer = null;
    CompactionIterator ci = new AntiCompactionIterator(cfs, sstables, ranges, getDefaultGCBefore(),
            cfs.isCompleteSSTables(sstables));
    Iterator<CompactionIterator.CompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate());
    getExecutor(cfs).beginCompaction(cfs, ci);

    try {
        if (!nni.hasNext()) {
            return results;
        }

        while (nni.hasNext()) {
            CompactionIterator.CompactedRow row = nni.next();
            if (writer == null) {
                FileUtils.createDirectory(compactionFileLocation);
                String newFilename = new File(compactionFileLocation, cfs.getTempSSTableFileName())
                        .getAbsolutePath();
                writer = new SSTableWriter(newFilename, expectedBloomFilterSize,
                        StorageService.getPartitioner());
            }
            writer.append(row.key, row.headerBuffer, row.buffer);
            totalkeysWritten++;
        }
    } finally {
        ci.close();
    }

    if (writer != null) {
        results.add(writer.closeAndOpenReader());
        String format = "AntiCompacted to %s.  %d/%d bytes for %d keys.  Time: %dms.";
        long dTime = System.currentTimeMillis() - startTime;
        logger.info(String.format(format, writer.getFilename(), SSTable.getTotalBytes(sstables),
                results.get(0).length(), totalkeysWritten, dTime));
    }

    return results;
}

From source file:com.bigdata.dastor.db.CompactionManager.java

/**
 * Performs a readonly "compaction" of all sstables in order to validate complete rows,
 * but without writing the merge result//from   www .j a v a  2 s .com
 */
private void doValidationCompaction(ColumnFamilyStore cfs, AntiEntropyService.Validator validator)
        throws IOException {
    Collection<SSTableReader> sstables = cfs.getSSTables();
    CompactionIterator ci = new CompactionIterator(cfs, sstables, getDefaultGCBefore(), true);
    getExecutor(cfs).beginCompaction(cfs, ci);
    try {
        Iterator<CompactionIterator.CompactedRow> nni = new FilterIterator(ci,
                PredicateUtils.notNullPredicate());

        // validate the CF as we iterate over it
        validator.prepare(cfs);
        while (nni.hasNext()) {
            CompactionIterator.CompactedRow row = nni.next();
            validator.add(row);
        }
        validator.complete();
    } finally {
        ci.close();
    }
}