List of usage examples for com.google.common.collect Iterators unmodifiableIterator
@Deprecated public static <T> UnmodifiableIterator<T> unmodifiableIterator(UnmodifiableIterator<T> iterator)
From source file:org.apache.beam.runners.spark.stateful.StateSpecFunctions.java
/** * A {@link org.apache.spark.streaming.StateSpec} function to support reading from * an {@link UnboundedSource}.//from w ww . ja v a 2 s. co m * * <p>This StateSpec function expects the following: * <ul> * <li>Key: The (partitioned) Source to read from.</li> * <li>Value: An optional {@link UnboundedSource.CheckpointMark} to start from.</li> * <li>State: A byte representation of the (previously) persisted CheckpointMark.</li> * </ul> * And returns an iterator over all read values (for the micro-batch). * * <p>This stateful operation could be described as a flatMap over a single-element stream, which * outputs all the elements read from the {@link UnboundedSource} for this micro-batch. * Since micro-batches are bounded, the provided UnboundedSource is wrapped by a * {@link MicrobatchSource} that applies bounds in the form of duration and max records * (per micro-batch). * * * <p>In order to avoid using Spark Guava's classes which pollute the * classpath, we use the {@link StateSpec#function(scala.Function3)} signature which employs * scala's native {@link scala.Option}, instead of the * {@link StateSpec#function(org.apache.spark.api.java.function.Function3)} signature, * which employs Guava's {@link com.google.common.base.Optional}. * * <p>See also <a href="https://issues.apache.org/jira/browse/SPARK-4819">SPARK-4819</a>.</p> * * @param runtimeContext A serializable {@link SparkRuntimeContext}. * @param <T> The type of the input stream elements. * @param <CheckpointMarkT> The type of the {@link UnboundedSource.CheckpointMark}. * @return The appropriate {@link org.apache.spark.streaming.StateSpec} function. */ public static <T, CheckpointMarkT extends UnboundedSource.CheckpointMark> scala.Function3<Source<T>, scala.Option<CheckpointMarkT>, State<Tuple2<byte[], Instant>>, Tuple2<Iterable<byte[]>, Metadata>> mapSourceFunction( final SparkRuntimeContext runtimeContext, final String stepName) { return new SerializableFunction3<Source<T>, Option<CheckpointMarkT>, State<Tuple2<byte[], Instant>>, Tuple2<Iterable<byte[]>, Metadata>>() { @Override public Tuple2<Iterable<byte[]>, Metadata> apply(Source<T> source, scala.Option<CheckpointMarkT> startCheckpointMark, State<Tuple2<byte[], Instant>> state) { MetricsContainerStepMap metricsContainers = new MetricsContainerStepMap(); MetricsContainer metricsContainer = metricsContainers.getContainer(stepName); // Add metrics container to the scope of org.apache.beam.sdk.io.Source.Reader methods // since they may report metrics. try (Closeable ignored = MetricsEnvironment.scopedMetricsContainer(metricsContainer)) { // source as MicrobatchSource MicrobatchSource<T, CheckpointMarkT> microbatchSource = (MicrobatchSource<T, CheckpointMarkT>) source; // Initial high/low watermarks. Instant lowWatermark = BoundedWindow.TIMESTAMP_MIN_VALUE; final Instant highWatermark; // if state exists, use it, otherwise it's first time so use the startCheckpointMark. // startCheckpointMark may be EmptyCheckpointMark (the Spark Java API tries to apply // Optional(null)), which is handled by the UnboundedSource implementation. Coder<CheckpointMarkT> checkpointCoder = microbatchSource.getCheckpointMarkCoder(); CheckpointMarkT checkpointMark; if (state.exists()) { // previous (output) watermark is now the low watermark. lowWatermark = state.get()._2(); checkpointMark = CoderHelpers.fromByteArray(state.get()._1(), checkpointCoder); LOG.info("Continue reading from an existing CheckpointMark."); } else if (startCheckpointMark.isDefined() && !startCheckpointMark.get().equals(EmptyCheckpointMark.get())) { checkpointMark = startCheckpointMark.get(); LOG.info("Start reading from a provided CheckpointMark."); } else { checkpointMark = null; LOG.info("No CheckpointMark provided, start reading from default."); } // create reader. final MicrobatchSource.Reader/*<T>*/ microbatchReader; final Stopwatch stopwatch = Stopwatch.createStarted(); long readDurationMillis = 0; try { microbatchReader = (MicrobatchSource.Reader) microbatchSource .getOrCreateReader(runtimeContext.getPipelineOptions(), checkpointMark); } catch (IOException e) { throw new RuntimeException(e); } // read microbatch as a serialized collection. final List<byte[]> readValues = new ArrayList<>(); WindowedValue.FullWindowedValueCoder<T> coder = WindowedValue.FullWindowedValueCoder .of(source.getDefaultOutputCoder(), GlobalWindow.Coder.INSTANCE); try { // measure how long a read takes per-partition. boolean finished = !microbatchReader.start(); while (!finished) { final WindowedValue<T> wv = WindowedValue.of((T) microbatchReader.getCurrent(), microbatchReader.getCurrentTimestamp(), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING); readValues.add(CoderHelpers.toByteArray(wv, coder)); finished = !microbatchReader.advance(); } // end-of-read watermark is the high watermark, but don't allow decrease. final Instant sourceWatermark = microbatchReader.getWatermark(); highWatermark = sourceWatermark.isAfter(lowWatermark) ? sourceWatermark : lowWatermark; readDurationMillis = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); LOG.info("Source id {} spent {} millis on reading.", microbatchSource.getId(), readDurationMillis); // if the Source does not supply a CheckpointMark skip updating the state. @SuppressWarnings("unchecked") final CheckpointMarkT finishedReadCheckpointMark = (CheckpointMarkT) microbatchReader .getCheckpointMark(); byte[] codedCheckpoint = new byte[0]; if (finishedReadCheckpointMark != null) { codedCheckpoint = CoderHelpers.toByteArray(finishedReadCheckpointMark, checkpointCoder); } else { LOG.info("Skipping checkpoint marking because the reader failed to supply one."); } // persist the end-of-read (high) watermark for following read, where it will become // the next low watermark. state.update(new Tuple2<>(codedCheckpoint, highWatermark)); } catch (IOException e) { throw new RuntimeException("Failed to read from reader.", e); } final ArrayList<byte[]> payload = Lists .newArrayList(Iterators.unmodifiableIterator(readValues.iterator())); return new Tuple2<>((Iterable<byte[]>) payload, new Metadata(readValues.size(), lowWatermark, highWatermark, readDurationMillis, metricsContainers)); } catch (IOException e) { throw new RuntimeException(e); } } }; }
From source file:edu.harvard.med.screensaver.model.meta.RelationshipPath.java
public Iterator<PropertyNameAndValue> restrictionIterator() { return Iterators.unmodifiableIterator(_restrictions.iterator()); }
From source file:eu.numberfour.n4js.internal.FileBasedWorkspace.java
@Override public Iterator<URI> getFolderIterator(URI folderLocation) { final File sourceContainerDirectory = new File(java.net.URI.create(folderLocation.toString())); if (sourceContainerDirectory.isDirectory()) { AbstractTreeIterator<File> treeIterator = new AbstractTreeIterator<File>(sourceContainerDirectory, false) {//from w w w. j av a 2s .co m @Override protected Iterator<? extends File> getChildren(Object root) { if (root instanceof File && ((File) root).isDirectory()) { return Arrays.asList(((File) root).listFiles()).iterator(); } return Iterators.emptyIterator(); } }; return Iterators .unmodifiableIterator(Iterators.transform(Iterators.filter(treeIterator, new Predicate<File>() { @Override public boolean apply(File input) { return !input.isDirectory(); } }), new Function<File, URI>() { @Override public URI apply(File input) { return URI.createURI(input.toURI().toString()); } })); } return Iterators.emptyIterator(); }
From source file:com.b2international.snowowl.datastore.cdo.CDOContainer.java
@Override protected void doDeactivate() throws Exception { LOGGER.info("Deactivating " + this.getClass().getSimpleName() + "..."); if (null != uuidToItems) { for (final Iterator<T> itr = Iterators.unmodifiableIterator(uuidToItems.values().iterator()); itr .hasNext(); /* nothing */) { itr.next().deactivate();/*from w ww.j a va2 s .c om*/ } } if (null != nsUriToUuidMap) { nsUriToUuidMap.clear(); nsUriToUuidMap = null; } if (null != eclassToNsUriMap) { eclassToNsUriMap.cleanUp(); eclassToNsUriMap = null; } if (null != namespaceToUuidMap) { namespaceToUuidMap.clear(); namespaceToUuidMap = null; } if (null != nsUriToItems) { nsUriToItems.clear(); nsUriToItems = null; } super.deactivate(); LOGGER.info(this.getClass().getSimpleName() + " has been successfully deactivated."); }
From source file:com.facebook.stats.AbstractCompositeCounter.java
/** * @return Unmodifiable iterator across windowed event counters in ascending * (oldest first) order/*from www . j a v a 2s . c om*/ */ protected Iterator<C> eventCounterIterator() { return Iterators.unmodifiableIterator(eventCounters.iterator()); }
From source file:org.elasticsearch.index.mapper.MapperService.java
@Override public UnmodifiableIterator<DocumentMapper> iterator() { return Iterators.unmodifiableIterator(mappers.values().iterator()); }
From source file:com.b2international.snowowl.datastore.server.session.ApplicationSessionManager.java
private RpcSession getByUserId(final String userId) { Preconditions.checkNotNull(userId, "User ID argument cannot be null."); for (final Iterator<RpcSession> itr = Iterators.unmodifiableIterator( new CopyOnWriteArrayList<RpcSession>(knownSessions.values()).iterator()); itr.hasNext(); /* */) { final RpcSession rpcSession = itr.next(); if (userId.equals(rpcSession.get(KEY_USER_ID))) { return rpcSession; }/* w ww . j a v a 2s.c om*/ } return null; }
From source file:com.b2international.snowowl.datastore.cdo.CDOContainer.java
@Override protected void doAfterActivate() throws Exception { Preconditions.checkNotNull(uuidToItems, "UUID to repository cache is not initialized yet."); Preconditions.checkNotNull(nsUriToUuidMap, "Namespace URI to repository UUID cache is not initialized yet."); eclassToNsUriMap = CacheBuilder.newBuilder().build(new CacheLoader<EClass, NsUri>() { @Override//from www . j av a 2 s.c o m public NsUri load(final EClass eClass) throws Exception { Preconditions.checkNotNull(eClass, "EClass argument cannot be null."); final EPackage ePackage = eClass.getEPackage(); Preconditions.checkNotNull(ePackage, "Cannot specify package for EClass. EClass: '" + eClass + "'."); final String nsURI = ePackage.getNsURI(); Preconditions.checkState(!StringUtils.isEmpty(nsURI), "Namespace URI is not specified for EPackage. EPackage: '" + ePackage + "'."); final NsUri candidateNsUri = getOrCreateNsUri(nsURI); for (final Iterator<NsUri> itr = Iterators .unmodifiableIterator(nsUriToUuidMap.keySet().iterator()); itr.hasNext(); /* nothing */) { final NsUri $ = itr.next(); if ($.equals(candidateNsUri)) { return $; } } LOGGER.warn("Unsupported EClass type: '" + eClass + "'."); return NsUri.NULL_IMPL; } }); super.doAfterActivate(); }
From source file:com.b2international.snowowl.datastore.cdo.CDOContainer.java
/** * (non-API)// w ww .j a va 2 s . c o m * * Returns with an iterator of the managed item. Requires inactive state. * * @return */ public final Iterator<T> _iterator() { checkInactive(); return Iterators.unmodifiableIterator(nsUriToItems.values().iterator()); }
From source file:org.elasticsearch.action.get.MultiGetRequest.java
@Override public Iterator<Item> iterator() { return Iterators.unmodifiableIterator(items.iterator()); }