List of usage examples for com.google.common.collect Iterators asEnumeration
public static <T> Enumeration<T> asEnumeration(final Iterator<T> iterator)
From source file:org.liveSense.core.service.DynamicClassLoader.java
/** * @see java.lang.ClassLoader#getResources(java.lang.String) */// w w w . j a v a2 s .co m @Override @SuppressWarnings("unchecked") public Enumeration<URL> getResources(final String name) throws IOException { return Iterators.asEnumeration(Iterators.concat(new EnumerationIterator<URL>(super.getResources(name)), new EnumerationIterator<URL>(getResourcesImpl(name)))); }
From source file:ezbake.common.http.request.Request.java
@Override public Enumeration getAttributeNames() { return Iterators.asEnumeration(attributes.keySet().iterator()); }
From source file:com.facebook.presto.server.PluginClassLoader.java
@Override public Enumeration<URL> getResources(String name) throws IOException { List<Iterator<URL>> resources = new ArrayList<>(); // If this is not a parent first resource, add resources from local urls first if (!isParentFirstResource(name)) { Iterator<URL> myResources = Iterators.forEnumeration(findResources(name)); resources.add(myResources);/* w w w . j a v a 2s .com*/ } // Add parent resources if (!isHiddenResource(name)) { Iterator<URL> parentResources = Iterators.forEnumeration(getParent().getResources(name)); resources.add(parentResources); } // If this is a parent first resource, now add resources from local urls if (isParentFirstResource(name)) { Iterator<URL> myResources = Iterators.forEnumeration(findResources(name)); resources.add(myResources); } return Iterators.asEnumeration(Iterators.concat(resources.iterator())); }
From source file:com.nesscomputing.tracking.MockedHttpServletRequest.java
@Override public Enumeration<String> getHeaders(String name) { final List<String> headerValues = headers.get(name); return headerValues == null ? null : Iterators.asEnumeration(headerValues.iterator()); }
From source file:com.nesscomputing.tracking.MockedHttpServletRequest.java
@Override public Enumeration<String> getHeaderNames() { return Iterators.asEnumeration(headers.keySet().iterator()); }
From source file:ezbake.common.http.request.Request.java
@Override public Enumeration getParameterNames() { return Iterators.asEnumeration(parameters.keySet().iterator()); }
From source file:com.eucalyptus.auth.GroupEntity.java
@Override public Enumeration<? extends Principal> members() { return Iterators.asEnumeration(this.userList.iterator()); }
From source file:io.pravega.service.server.logs.OperationLogTestBase.java
/** * Given a list of Log Operations, generates an InputStream for each encountered StreamSegment that contains the final * contents of that StreamSegment. Only considers operations of type StreamSegmentAppendOperation and MergeTransactionOperation. *//* www . j a va 2s . c o m*/ private AbstractMap<Long, InputStream> getExpectedContents(Collection<OperationWithCompletion> operations) { HashMap<Long, List<ByteArrayInputStream>> partialContents = new HashMap<>(); for (OperationWithCompletion o : operations) { Assert.assertTrue("Operation is not completed.", o.completion.isDone()); if (o.completion.isCompletedExceptionally()) { // This is failed operation; ignore it. continue; } if (o.operation instanceof StreamSegmentAppendOperation) { StreamSegmentAppendOperation appendOperation = (StreamSegmentAppendOperation) o.operation; List<ByteArrayInputStream> segmentContents = partialContents .get(appendOperation.getStreamSegmentId()); if (segmentContents == null) { segmentContents = new ArrayList<>(); partialContents.put(appendOperation.getStreamSegmentId(), segmentContents); } segmentContents.add(new ByteArrayInputStream(appendOperation.getData())); } else if (o.operation instanceof MergeTransactionOperation) { MergeTransactionOperation mergeOperation = (MergeTransactionOperation) o.operation; List<ByteArrayInputStream> targetSegmentContents = partialContents .get(mergeOperation.getStreamSegmentId()); if (targetSegmentContents == null) { targetSegmentContents = new ArrayList<>(); partialContents.put(mergeOperation.getStreamSegmentId(), targetSegmentContents); } List<ByteArrayInputStream> sourceSegmentContents = partialContents .get(mergeOperation.getTransactionSegmentId()); targetSegmentContents.addAll(sourceSegmentContents); partialContents.remove(mergeOperation.getTransactionSegmentId()); } } // Construct final result. HashMap<Long, InputStream> result = new HashMap<>(); for (Map.Entry<Long, List<ByteArrayInputStream>> e : partialContents.entrySet()) { result.put(e.getKey(), new SequenceInputStream(Iterators.asEnumeration(e.getValue().iterator()))); } return result; }
From source file:io.pravega.service.server.reading.StreamSegmentReadIndex.java
/** * Reads a contiguous sequence of bytes of the given length starting at the given offset. Every byte in the range * must meet the following conditions://from w ww . ja v a 2s .c o m * <ul> * <li> It must exist in this segment. This excludes bytes from merged transactions and future reads. * <li> It must be part of data that is not yet committed to Storage (tail part) - as such, it must be fully in the cache. * </ul> * Note: This method will not cause cache statistics to be updated. As such, Cache entry generations will not be * updated for those entries that are touched. * * @param startOffset The offset in the StreamSegment where to start reading. * @param length The number of bytes to read. * @return An InputStream containing the requested data, or null if all of the conditions of this read cannot be met. * @throws IllegalStateException If the read index is in recovery mode. * @throws IllegalArgumentException If the parameters are invalid (offset, length or offset+length are not in the Segment's range). */ InputStream readDirect(long startOffset, int length) { Preconditions.checkArgument(length >= 0, "length must be a non-negative number"); Preconditions.checkArgument(startOffset >= this.metadata.getStorageLength(), "startOffset must refer to an offset beyond the Segment's StorageLength offset."); Preconditions.checkArgument(startOffset + length <= this.metadata.getDurableLogLength(), "startOffset+length must be less than the length of the Segment."); // Get the first entry. This one is trickier because the requested start offset may not fall on an entry boundary. CompletableReadResultEntry nextEntry; synchronized (this.lock) { ReadIndexEntry indexEntry = this.indexEntries.getFloor(startOffset); if (indexEntry == null || startOffset > indexEntry.getLastStreamSegmentOffset() || !indexEntry.isDataEntry()) { // Data not available or data exist in a partially merged transaction. return null; } else { // Fetch data from the cache for the first entry, but do not update the cache hit stats. nextEntry = createMemoryRead(indexEntry, startOffset, length, false); } } // Collect the contents of congruent Index Entries into a list, as long as we still encounter data in the cache. // Since we know all entries should be in the cache and are contiguous, there is no need assert FutureHelpers.isSuccessful( nextEntry.getContent()) : "Found CacheReadResultEntry that is not completed yet: " + nextEntry; val entryContents = nextEntry.getContent().join(); ArrayList<InputStream> contents = new ArrayList<>(); contents.add(entryContents.getData()); int readLength = entryContents.getLength(); while (readLength < length) { // No need to search the index; from now on, we know each offset we are looking for is at the beginning of a cache entry. // Also, no need to acquire the lock there. The cache itself is thread safe, and if the entry we are about to fetch // has just been evicted, we'll just get null back and stop reading (which is acceptable). byte[] entryData = this.cache.get(new CacheKey(this.metadata.getId(), startOffset + readLength)); if (entryData == null) { // Could not find the 'next' cache entry: this means the requested range is not fully cached. return null; } int entryReadLength = Math.min(entryData.length, length - readLength); assert entryReadLength > 0 : "about to have fetched zero bytes from a cache entry"; contents.add(new ByteArrayInputStream(entryData, 0, entryReadLength)); readLength += entryReadLength; } // Coalesce the results into a single InputStream and return the result. return new SequenceInputStream(Iterators.asEnumeration(contents.iterator())); }
From source file:io.pravega.segmentstore.server.reading.StreamSegmentReadIndex.java
/** * Reads a contiguous sequence of bytes of the given length starting at the given offset. Every byte in the range * must meet the following conditions:// ww w . j ava2 s . c o m * <ul> * <li> It must exist in this segment. This excludes bytes from merged transactions and future reads. * <li> It must be part of data that is not yet committed to Storage (tail part) - as such, it must be fully in the cache. * </ul> * Note: This method will not cause cache statistics to be updated. As such, Cache entry generations will not be * updated for those entries that are touched. * * @param startOffset The offset in the StreamSegment where to start reading. * @param length The number of bytes to read. * @return An InputStream containing the requested data, or null if all of the conditions of this read cannot be met. * @throws IllegalStateException If the read index is in recovery mode. * @throws IllegalArgumentException If the parameters are invalid (offset, length or offset+length are not in the Segment's range). */ InputStream readDirect(long startOffset, int length) { Exceptions.checkNotClosed(this.closed, this); Preconditions.checkState(!this.recoveryMode, "StreamSegmentReadIndex is in Recovery Mode."); Preconditions.checkArgument(length >= 0, "length must be a non-negative number"); Preconditions.checkArgument(startOffset >= this.metadata.getStorageLength(), "startOffset must refer to an offset beyond the Segment's StorageLength offset."); Preconditions.checkArgument(startOffset + length <= this.metadata.getDurableLogLength(), "startOffset+length must be less than the length of the Segment."); // Get the first entry. This one is trickier because the requested start offset may not fall on an entry boundary. CompletableReadResultEntry nextEntry; synchronized (this.lock) { ReadIndexEntry indexEntry = this.indexEntries.getFloor(startOffset); if (indexEntry == null || startOffset > indexEntry.getLastStreamSegmentOffset() || !indexEntry.isDataEntry()) { // Data not available or data exist in a partially merged transaction. return null; } else { // Fetch data from the cache for the first entry, but do not update the cache hit stats. nextEntry = createMemoryRead(indexEntry, startOffset, length, false); } } // Collect the contents of congruent Index Entries into a list, as long as we still encounter data in the cache. // Since we know all entries should be in the cache and are contiguous, there is no need assert FutureHelpers.isSuccessful( nextEntry.getContent()) : "Found CacheReadResultEntry that is not completed yet: " + nextEntry; val entryContents = nextEntry.getContent().join(); ArrayList<InputStream> contents = new ArrayList<>(); contents.add(entryContents.getData()); int readLength = entryContents.getLength(); while (readLength < length) { // No need to search the index; from now on, we know each offset we are looking for is at the beginning of a cache entry. // Also, no need to acquire the lock there. The cache itself is thread safe, and if the entry we are about to fetch // has just been evicted, we'll just get null back and stop reading (which is acceptable). byte[] entryData = this.cache.get(new CacheKey(this.metadata.getId(), startOffset + readLength)); if (entryData == null) { // Could not find the 'next' cache entry: this means the requested range is not fully cached. return null; } int entryReadLength = Math.min(entryData.length, length - readLength); assert entryReadLength > 0 : "about to have fetched zero bytes from a cache entry"; contents.add(new ByteArrayInputStream(entryData, 0, entryReadLength)); readLength += entryReadLength; } // Coalesce the results into a single InputStream and return the result. return new SequenceInputStream(Iterators.asEnumeration(contents.iterator())); }