List of usage examples for org.apache.commons.collections PredicateUtils notNullPredicate
public static Predicate notNullPredicate()
From source file:org.apache.cassandra.db.compaction.CompactionManager.java
/** * For internal use and testing only. The rest of the system should go through the submit* methods, * which are properly serialized./*from ww w. j a v a 2 s . co m*/ */ int doCompactionWithoutSizeEstimation(ColumnFamilyStore cfs, Collection<SSTableReader> sstables, int gcBefore, String compactionFileLocation, boolean forceDeserialize) throws IOException { // The collection of sstables passed may be empty (but not null); even if // it is not empty, it may compact down to nothing if all rows are deleted. assert sstables != null; Table table = cfs.table; if (DatabaseDescriptor.isSnapshotBeforeCompaction()) table.snapshot(System.currentTimeMillis() + "-" + "compact-" + cfs.columnFamily); // sanity check: all sstables must belong to the same cfs for (SSTableReader sstable : sstables) assert sstable.descriptor.cfname.equals(cfs.columnFamily); CompactionController controller = new CompactionController(cfs, sstables, gcBefore, forceDeserialize); // new sstables from flush can be added during a compaction, but only the compaction can remove them, // so in our single-threaded compaction world this is a valid way of determining if we're compacting // all the sstables (that existed when we started) CompactionType type = controller.isMajor() ? CompactionType.MAJOR : CompactionType.MINOR; logger.info("Compacting {}: {}", type, sstables); long startTime = System.currentTimeMillis(); long totalkeysWritten = 0; // TODO the int cast here is potentially buggy int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int) SSTableReader.getApproximateKeyCount(sstables)); if (logger.isDebugEnabled()) logger.debug("Expected bloom filter size : " + expectedBloomFilterSize); SSTableWriter writer; CompactionIterator ci = new CompactionIterator(type, sstables, controller); // retain a handle so we can call close() Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate()); Map<DecoratedKey, Long> cachedKeys = new HashMap<DecoratedKey, Long>(); executor.beginCompaction(ci); try { if (!nni.hasNext()) { // don't mark compacted in the finally block, since if there _is_ nondeleted data, // we need to sync it (via closeAndOpen) first, so there is no period during which // a crash could cause data loss. cfs.markCompacted(sstables); return 0; } writer = cfs.createCompactionWriter(expectedBloomFilterSize, compactionFileLocation, sstables); while (nni.hasNext()) { AbstractCompactedRow row = nni.next(); if (row.isEmpty()) continue; long position = writer.append(row); totalkeysWritten++; if (DatabaseDescriptor.getPreheatKeyCache()) { for (SSTableReader sstable : sstables) { if (sstable.getCachedPosition(row.key) != null) { cachedKeys.put(row.key, position); break; } } } } } finally { ci.close(); executor.finishCompaction(ci); } SSTableReader ssTable = writer.closeAndOpenReader(getMaxDataAge(sstables)); cfs.replaceCompactedSSTables(sstables, Arrays.asList(ssTable)); for (Entry<DecoratedKey, Long> entry : cachedKeys.entrySet()) // empty if preheat is off ssTable.cacheKey(entry.getKey(), entry.getValue()); submitMinorIfNeeded(cfs); long dTime = System.currentTimeMillis() - startTime; long startsize = SSTable.getTotalBytes(sstables); long endsize = ssTable.length(); double ratio = (double) endsize / (double) startsize; logger.info( String.format("Compacted to %s. %,d to %,d (~%d%% of original) bytes for %,d keys. Time: %,dms.", writer.getFilename(), startsize, endsize, (int) (ratio * 100), totalkeysWritten, dTime)); return sstables.size(); }
From source file:org.apache.cassandra.db.compaction.CompactionManager.java
/** * Performs a readonly "compaction" of all sstables in order to validate complete rows, * but without writing the merge result//w w w . j av a2 s . c o m */ private void doValidationCompaction(ColumnFamilyStore cfs, AntiEntropyService.Validator validator) throws IOException { // flush first so everyone is validating data that is as similar as possible try { StorageService.instance.forceTableFlush(cfs.table.name, cfs.getColumnFamilyName()); } catch (ExecutionException e) { throw new IOException(e); } catch (InterruptedException e) { throw new AssertionError(e); } CompactionIterator ci = new ValidationCompactionIterator(cfs, validator.request.range); validationExecutor.beginCompaction(ci); try { Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate()); // validate the CF as we iterate over it validator.prepare(cfs); while (nni.hasNext()) { AbstractCompactedRow row = nni.next(); validator.add(row); } validator.complete(); } finally { ci.close(); validationExecutor.finishCompaction(ci); } }
From source file:org.apache.cassandra.db.CompactionManager.java
/** * For internal use and testing only. The rest of the system should go through the submit* methods, * which are properly serialized./*from ww w .ja v a2s. co m*/ */ int doCompactionWithoutSizeEstimation(ColumnFamilyStore cfs, Collection<SSTableReader> sstables, int gcBefore, String compactionFileLocation) throws IOException { // The collection of sstables passed may be empty (but not null); even if // it is not empty, it may compact down to nothing if all rows are deleted. assert sstables != null; Table table = cfs.table; if (DatabaseDescriptor.isSnapshotBeforeCompaction()) table.snapshot(System.currentTimeMillis() + "-" + "compact-" + cfs.columnFamily); // sanity check: all sstables must belong to the same cfs for (SSTableReader sstable : sstables) assert sstable.descriptor.cfname.equals(cfs.columnFamily); // new sstables from flush can be added during a compaction, but only the compaction can remove them, // so in our single-threaded compaction world this is a valid way of determining if we're compacting // all the sstables (that existed when we started) boolean major = cfs.isCompleteSSTables(sstables); CompactionType type = major ? CompactionType.MAJOR : CompactionType.MINOR; logger.info("Compacting {}: {}", type, sstables); long startTime = System.currentTimeMillis(); long totalkeysWritten = 0; // TODO the int cast here is potentially buggy int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int) SSTableReader.getApproximateKeyCount(sstables)); if (logger.isDebugEnabled()) logger.debug("Expected bloom filter size : " + expectedBloomFilterSize); SSTableWriter writer; CompactionController controller = new CompactionController(cfs, sstables, major, gcBefore, false); CompactionIterator ci = new CompactionIterator(type, sstables, controller); // retain a handle so we can call close() Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate()); Map<DecoratedKey, Long> cachedKeys = new HashMap<DecoratedKey, Long>(); executor.beginCompaction(ci); try { if (!nni.hasNext()) { // don't mark compacted in the finally block, since if there _is_ nondeleted data, // we need to sync it (via closeAndOpen) first, so there is no period during which // a crash could cause data loss. cfs.markCompacted(sstables); return 0; } writer = cfs.createCompactionWriter(expectedBloomFilterSize, compactionFileLocation, sstables); while (nni.hasNext()) { AbstractCompactedRow row = nni.next(); long position = writer.append(row); totalkeysWritten++; if (DatabaseDescriptor.getPreheatKeyCache()) { for (SSTableReader sstable : sstables) { if (sstable.getCachedPosition(row.key) != null) { cachedKeys.put(row.key, position); break; } } } } } finally { ci.close(); executor.finishCompaction(ci); } SSTableReader ssTable = writer.closeAndOpenReader(getMaxDataAge(sstables)); cfs.replaceCompactedSSTables(sstables, Arrays.asList(ssTable)); for (Entry<DecoratedKey, Long> entry : cachedKeys.entrySet()) // empty if preheat is off ssTable.cacheKey(entry.getKey(), entry.getValue()); submitMinorIfNeeded(cfs); long dTime = System.currentTimeMillis() - startTime; long startsize = SSTable.getTotalBytes(sstables); long endsize = ssTable.length(); double ratio = (double) endsize / (double) startsize; logger.info( String.format("Compacted to %s. %,d to %,d (~%d%% of original) bytes for %,d keys. Time: %,dms.", writer.getFilename(), startsize, endsize, (int) (ratio * 100), totalkeysWritten, dTime)); return sstables.size(); }
From source file:org.apache.cassandra.db.CompactionManager.java
/** * Performs a readonly "compaction" of all sstables in order to validate complete rows, * but without writing the merge result/*w w w. j av a 2s . c om*/ */ private void doValidationCompaction(ColumnFamilyStore cfs, AntiEntropyService.Validator validator) throws IOException { // flush first so everyone is validating data that is as similar as possible try { StorageService.instance.forceTableFlush(cfs.table.name, cfs.getColumnFamilyName()); } catch (ExecutionException e) { throw new IOException(e); } catch (InterruptedException e) { throw new AssertionError(e); } CompactionIterator ci = new ValidationCompactionIterator(cfs, validator.request.range); executor.beginCompaction(ci); try { Iterator<AbstractCompactedRow> nni = new FilterIterator(ci, PredicateUtils.notNullPredicate()); // validate the CF as we iterate over it validator.prepare(cfs); while (nni.hasNext()) { AbstractCompactedRow row = nni.next(); validator.add(row); } validator.complete(); } finally { ci.close(); executor.finishCompaction(ci); } }
From source file:org.apache.sling.contextaware.config.resource.impl.def.DefaultConfigurationResourceResolvingStrategy.java
/** * Searches the resource hierarchy upwards for all config references and returns them. * @param refs List to add found resources to * @param startResource Resource to start searching *///from w w w .j a v a 2 s .co m @SuppressWarnings("unchecked") private Iterator<String> findConfigRefs(final Resource startResource) { Iterator<Resource> contextResources = contextPathStrategy.findContextResources(startResource); // get config resource path for each context resource, filter out items where not reference could be resolved return new FilterIterator(new TransformIterator(contextResources, new Transformer() { @Override public Object transform(Object input) { return getReference((Resource) input); } }), PredicateUtils.notNullPredicate()); }
From source file:org.brushingbits.jnap.common.bean.cloning.BeanCloner.java
private Collection<?> cloneCollection(Collection<?> collection, Class<?> type) { Collection<Object> collectionCopy = (Collection<Object>) BeanUtils.instantiate(type); for (Object item : collection) { collectionCopy.add(clone(item)); }//from w w w . ja v a 2s. co m CollectionUtils.filter(collectionCopy, PredicateUtils.notNullPredicate()); if (collectionCopy.isEmpty()) { collectionCopy = null; } return collectionCopy; }