List of usage examples for com.google.common.collect Iterators emptyIterator
@Deprecated public static <T> UnmodifiableIterator<T> emptyIterator()
From source file:org.summer.dsl.builder.trace.AbstractTrace.java
protected Iterable<AbstractTraceRegion> getAllTraceRegions(final ITextRegion localRegion) { final AbstractTraceRegion left = findTraceRegionAtLeftOffset(localRegion.getOffset()); final int end = localRegion.getOffset() + localRegion.getLength(); if (left == null) { return Collections.emptyList(); }/*from www. j a v a2 s . c o m*/ return new Iterable<AbstractTraceRegion>() { public Iterator<AbstractTraceRegion> iterator() { AbstractTraceRegion root = getRootTraceRegion(); if (root == null) return Iterators.emptyIterator(); final Iterator<AbstractTraceRegion> allLeafs = root.leafIterator(); Iterator<AbstractTraceRegion> result = new AbstractIterator<AbstractTraceRegion>() { AbstractTraceRegion first; { while (first == null && allLeafs.hasNext()) { AbstractTraceRegion next = allLeafs.next(); if (next.getMyOffset() == left.getMyOffset()) { this.first = next; break; } } } @Override protected AbstractTraceRegion computeNext() { if (first != null) { AbstractTraceRegion result = first; first = null; return result; } if (!allLeafs.hasNext()) return endOfData(); AbstractTraceRegion candidate = allLeafs.next(); if (candidate.getMyOffset() >= end) { return endOfData(); } return candidate; } }; return result; } }; }
From source file:org.apache.marmotta.commons.sesame.repository.ResourceUtils.java
private static Iterable<Resource> listSubjectsInternal(final RepositoryConnection con, final URI property, final Value value, final URI context) { final Resource[] contexts; if (context != null) { contexts = new Resource[] { context }; } else {//from w w w .j av a 2 s . c o m contexts = new Resource[0]; } return new Iterable<Resource>() { @Override public Iterator<Resource> iterator() { try { return Iterators.filter(Iterators.transform( ResultUtils.unwrap(con.getStatements(null, property, value, true, contexts)), new Function<Statement, Resource>() { @Override public Resource apply(Statement input) { return input.getSubject(); } }), new Predicate<Resource>() { // filter duplicates by remembering hash codes of visited resources private HashSet<Integer> visited = new HashSet<Integer>(); @Override public boolean apply(Resource input) { if (!visited.contains(input.hashCode())) { visited.add(input.hashCode()); return true; } else { return false; } } }); } catch (RepositoryException e) { ExceptionUtils.handleRepositoryException(e, ResourceUtils.class); return Iterators.emptyIterator(); } } }; }
From source file:org.summer.dsl.builder.trace.AbstractTrace.java
protected Iterable<AbstractTraceRegion> getAllTraceRegions() { return new Iterable<AbstractTraceRegion>() { public Iterator<AbstractTraceRegion> iterator() { AbstractTraceRegion root = getRootTraceRegion(); if (root == null) return Iterators.emptyIterator(); final Iterator<AbstractTraceRegion> result = root.leafIterator(); return result; }//from w w w. j a v a 2 s . c om }; }
From source file:org.locationtech.geogig.api.plumbing.WriteTree2.java
private TreeDifference computeTreeDifference() { final String rightTreeish = Ref.STAGE_HEAD; final ObjectId rootTreeId = resolveRootTreeId(); final ObjectId stageRootId = index().getTree().getId(); final Supplier<Iterator<NodeRef>> leftTreeRefs; final Supplier<Iterator<NodeRef>> rightTreeRefs; if (rootTreeId.isNull()) { Iterator<NodeRef> empty = Iterators.emptyIterator(); leftTreeRefs = Suppliers.ofInstance(empty); } else {//from ww w. ja v a2s. c om leftTreeRefs = command(LsTreeOp.class).setReference(rootTreeId.toString()) .setStrategy(Strategy.DEPTHFIRST_ONLY_TREES); } rightTreeRefs = command(LsTreeOp.class).setReference(rightTreeish) .setStrategy(Strategy.DEPTHFIRST_ONLY_TREES); MutableTree leftTree = MutableTree.createFromRefs(rootTreeId, leftTreeRefs); MutableTree rightTree = MutableTree.createFromRefs(stageRootId, rightTreeRefs); TreeDifference treeDifference = TreeDifference.create(leftTree, rightTree); return treeDifference; }
From source file:com.turn.ttorrent.client.peer.PeerHandler.java
/** * Run one step of the SharingPeer finite state machine. * * <p>/*from w ww.j ava 2s .co m*/ * Re-fill the pipeline to get download the next blocks from the peer. * </p> */ // TODO: Do we want to make sure only one person enters this FSM at a time? public void run() throws IOException { // LOG.trace("Step function in " + this); Channel c = channel; boolean flush = false; try { // This locking could be more fine-grained. synchronized (lock) { BITFIELD: { if (!c.isWritable()) { LOG.debug("{}: Peer {} channel {} not writable for bitfield.", new Object[] { provider.getLocalPeerName(), this, c }); return; } if (!bitfieldSent) { flush = true; send(new PeerMessage.BitfieldMessage(provider.getCompletedPieces()), false); bitfieldSent = true; } } BitSet interesting = getAvailablePieces(); provider.andNotCompletedPieces(interesting); INTERESTING: { if (interesting.isEmpty()) notInteresting(); else interesting(); } // Expires dead requests, and marks live ones uninteresting. EXPIRE: { long then = System.currentTimeMillis() - 16000; List<PieceHandler.AnswerableRequestMessage> requestsExpired = new ArrayList<PieceHandler.AnswerableRequestMessage>(); Iterator<PieceHandler.AnswerableRequestMessage> it = requestsSent.iterator(); while (it.hasNext()) { PieceHandler.AnswerableRequestMessage requestSent = it.next(); if (requestSent.getRequestTime() < then) { if (LOG.isTraceEnabled()) LOG.trace("{}: Peer {} request {} timed out.", new Object[] { provider.getLocalPeerName(), this, requestSent }); requestsExpired.add(requestSent); requestsSentLimit = Math.max((int) (requestsSentLimit * 0.8), MIN_REQUESTS_SENT); it.remove(); } else { interesting.clear(requestSent.getPiece()); } } rejectRequests(requestsExpired, "requests expired"); } // Makes new requests. REQUEST: { while (requestsSent.size() < requestsSentLimit) { // A choke message can come in while we are iterating. if (isChoking()) { if (LOG.isTraceEnabled()) LOG.trace("{}: {}: Not sending requests because they are choking us.", new Object[] { provider.getLocalPeerName(), this }); break REQUEST; } if (!c.isWritable()) { if (LOG.isDebugEnabled()) LOG.debug("{}: Peer {} channel {} not writable for request; sent {}.", new Object[] { provider.getLocalPeerName(), this, c, requestsSent.size() }); return; } // Search for a block we can request. Ideally, this iterates 0 or 1 times. while (!requestsSource.hasNext()) { // This calls a significant piece of infrastructure elsewhere, // and needs a proof against deadlock. Iterable<PieceHandler.AnswerableRequestMessage> piece = provider .getNextPieceHandler(this, interesting); if (piece == null) { if (LOG.isTraceEnabled()) LOG.trace("{}: Peer {} has no request source; breaking request loop.", new Object[] { provider.getLocalPeerName(), this }); requestsSource = Iterators.emptyIterator(); // Allow GC. break REQUEST; } requestsSource = piece.iterator(); } PieceHandler.AnswerableRequestMessage request = requestsSource.next(); if (LOG.isTraceEnabled()) LOG.trace("{}: Adding {} from {}, queue={}/{}", new Object[] { provider.getLocalPeerName(), request, requestsSource, requestsSent.size(), requestsSentLimit }); interesting.clear(request.getPiece()); // Don't pick up the same piece on the next iteration. request.setRequestTime(); requestsSent.add(request); flush = true; send(request, false); } } } // This loop does I/O so we shouldn't hold the lock fully outside it. RESPONSE: while (c.isWritable()) { PeerMessage.RequestMessage request = requestsReceived.poll(); request = provider.getInstrumentation().instrumentBlockRequest(this, provider, request); if (request == null) break; if (!provider.isCompletedPiece(request.getPiece())) { LOG.warn("{}: Peer {} requested invalid piece {}, terminating exchange.", new Object[] { provider.getLocalPeerName(), this, request.getPiece() }); close(); break; } // At this point we agree to send the requested piece block to // the remote peer, so let's queue a message with that block ByteBuffer block = ByteBuffer.allocate(request.getLength()); provider.readBlock(block, request.getPiece(), request.getOffset()); block.flip(); // ByteBuffer block = piece.read(request.getOffset(), request.getLength()); PeerMessage.PieceMessage response = new PeerMessage.PieceMessage(request.getPiece(), request.getOffset(), block); // response = provider.getInstrumentation(). flush = true; send(response, false); upload.update(request.getLength()); activityListener.handleBlockSent(this, request.getPiece(), request.getOffset(), request.getLength()); } } finally { if (flush) channel.flush(); if (LOG.isTraceEnabled()) LOG.trace("After run: requestsSent={}", requestsSent); } }
From source file:com.chaschev.itext.ColumnTextBuilder.java
public Iterator<AtomicIncreaseResult> newAtomicIteratorFor(final RectangleBuilder modifiableRectangle, final GrowStrategy growStrategy, RectangleBuilder originalRectangle) { final int status = go(true); if (!ColumnText.hasMoreText(status)) { return Iterators.emptyIterator(); }//ww w . j ava 2 s . co m return new AtomicIterator(modifiableRectangle, growStrategy, originalRectangle); }
From source file:fr.letroll.ttorrentandroid.client.peer.PeerHandler.java
/** * Run one step of the SharingPeer finite state machine. * * <p>/*from ww w .j a v a 2 s .c o m*/ * Re-fill the pipeline to get download the next blocks from the peer. * </p> */ public void run(String reason) throws IOException { if (LOG.isTraceEnabled()) LOG.trace("{}: Step function in {}: {}", new Object[] { provider.getLocalPeerName(), this, reason }); Channel c = channel; boolean flush = false; try { // This locking could be more fine-grained. synchronized (lock) { BITFIELD: { if (!bitfieldSent) { if (!c.isWritable()) { LOG.debug("{}: Peer {} channel {} not writable for bitfield.", new Object[] { provider.getLocalPeerName(), this, c }); return; } flush = true; send(new PeerMessage.BitfieldMessage(provider.getCompletedPieces()), false); bitfieldSent = true; } } BitSet interesting = getAvailablePieces(); provider.andNotCompletedPieces(interesting); INTERESTING: { if (interesting.isEmpty()) notInteresting(); else interesting(); } // Expires dead requests, and marks live ones uninteresting. EXPIRE: { long now = System.currentTimeMillis(); if (requestsExpiredAt < now - MAX_REQUESTS_TIME >> 2) { long then = now - MAX_REQUESTS_TIME; List<PieceHandler.AnswerableRequestMessage> requestsExpired = new ArrayList<PieceHandler.AnswerableRequestMessage>(); Iterator<PieceHandler.AnswerableRequestMessage> it = requestsSent.iterator(); while (it.hasNext()) { PieceHandler.AnswerableRequestMessage requestSent = it.next(); if (requestSent.getRequestTime() < then) { if (LOG.isTraceEnabled()) LOG.trace("{}: Peer {} request {} timed out.", new Object[] { provider.getLocalPeerName(), this, requestSent }); requestsExpired.add(requestSent); requestsSentLimit = Math.max((int) (requestsSentLimit * 0.8), MIN_REQUESTS_SENT); it.remove(); } else { interesting.clear(requestSent.getPiece()); } } rejectRequests(requestsExpired, "requests expired"); requestsExpiredAt = now; } } // Makes new requests. REQUEST: { while (requestsSent.size() < requestsSentLimit) { // A choke message can come in while we are iterating. if (isChoking()) { if (LOG.isTraceEnabled()) LOG.trace("{}: {}: Not sending requests because they are choking us.", new Object[] { provider.getLocalPeerName(), this }); break REQUEST; } if (!c.isWritable()) { if (LOG.isDebugEnabled()) LOG.debug("{}: Peer {} channel {} not writable for request; sent {}.", new Object[] { provider.getLocalPeerName(), this, c, requestsSent.size() }); return; } // Search for a block we can request. Ideally, this iterates 0 or 1 times. while (!requestsSource.hasNext()) { // This calls a significant piece of infrastructure elsewhere, // and needs a proof against deadlock. Iterable<PieceHandler.AnswerableRequestMessage> piece = provider .getNextPieceHandler(this, interesting); if (piece == null) { if (LOG.isTraceEnabled()) LOG.trace("{}: Peer {} has no request source; breaking request loop.", new Object[] { provider.getLocalPeerName(), this }); requestsSource = Iterators.emptyIterator(); // Allow GC. break REQUEST; } requestsSource = piece.iterator(); } PieceHandler.AnswerableRequestMessage request = requestsSource.next(); if (LOG.isTraceEnabled()) LOG.trace("{}: Adding {} from {}, queue={}/{}", new Object[] { provider.getLocalPeerName(), request, requestsSource, requestsSent.size(), requestsSentLimit }); interesting.clear(request.getPiece()); // Don't pick up the same piece on the next iteration. request.setRequestTime(); requestsSent.add(request); flush = true; send(request, false); } } } // This loop does I/O so we shouldn't hold the lock fully outside it. RESPONSE: while (c.isWritable()) { PeerMessage.RequestMessage request = requestsReceived.poll(); request = provider.getInstrumentation().instrumentBlockRequest(this, provider, request); if (request == null) break; if (!provider.isCompletedPiece(request.getPiece())) { LOG.warn("{}: Peer {} requested invalid piece {}, terminating exchange.", new Object[] { provider.getLocalPeerName(), this, request.getPiece() }); close("requested piece we don't have"); break; } // At this point we agree to send the requested piece block to // the remote peer, so let's queue a message with that block ByteBuffer block = ByteBuffer.allocate(request.getLength()); provider.readBlock(block, request.getPiece(), request.getOffset()); block.flip(); // ByteBuffer block = piece.read(request.getOffset(), request.getLength()); PeerMessage.PieceMessage response = new PeerMessage.PieceMessage(request.getPiece(), request.getOffset(), block); // response = provider.getInstrumentation(). flush = true; send(response, false); upload.update(request.getLength()); activityListener.handleBlockSent(this, request.getPiece(), request.getOffset(), request.getLength()); } } finally { if (flush) channel.flush(); if (LOG.isTraceEnabled()) LOG.trace("After run: requestsSent={}", requestsSent); } }
From source file:org.apache.cassandra.db.filter.SliceQueryFilter.java
public Iterator<RangeTombstone> getRangeTombstoneIterator(final ColumnFamily source) { final DeletionInfo delInfo = source.deletionInfo(); if (!delInfo.hasRanges() || slices.length == 0) return Iterators.emptyIterator(); return new AbstractIterator<RangeTombstone>() { private int sliceIdx = 0; private Iterator<RangeTombstone> sliceIter = currentRangeIter(); protected RangeTombstone computeNext() { while (true) { if (sliceIter.hasNext()) return sliceIter.next(); if (!nextSlice()) return endOfData(); sliceIter = currentRangeIter(); }// w w w . ja va2 s . co m } private Iterator<RangeTombstone> currentRangeIter() { ColumnSlice slice = slices[reversed ? (slices.length - 1 - sliceIdx) : sliceIdx]; return reversed ? delInfo.rangeIterator(slice.finish, slice.start) : delInfo.rangeIterator(slice.start, slice.finish); } private boolean nextSlice() { return ++sliceIdx < slices.length; } }; }
From source file:org.geogit.api.plumbing.WriteTree2.java
private TreeDifference computeTreeDifference() { final String rightTreeish = Ref.STAGE_HEAD; final ObjectId rootTreeId = resolveRootTreeId(); final ObjectId stageRootId = getIndex().getTree().getId(); final Supplier<Iterator<NodeRef>> leftTreeRefs; final Supplier<Iterator<NodeRef>> rightTreeRefs; if (rootTreeId.isNull()) { Iterator<NodeRef> empty = Iterators.emptyIterator(); leftTreeRefs = Suppliers.ofInstance(empty); } else {//from ww w . ja v a 2 s .c om leftTreeRefs = command(LsTreeOp.class).setReference(rootTreeId.toString()) .setStrategy(Strategy.DEPTHFIRST_ONLY_TREES); } rightTreeRefs = command(LsTreeOp.class).setReference(rightTreeish) .setStrategy(Strategy.DEPTHFIRST_ONLY_TREES); MutableTree leftTree = MutableTree.createFromRefs(rootTreeId, leftTreeRefs); MutableTree rightTree = MutableTree.createFromRefs(stageRootId, rightTreeRefs); TreeDifference treeDifference = TreeDifference.create(leftTree, rightTree); return treeDifference; }
From source file:org.apache.hadoop.hive.ql.exec.FetchOperator.java
/** * Clear the context, if anything needs to be done. * **///w w w. ja va 2s .com public void clearFetchContext() throws HiveException { try { if (currRecReader != null) { currRecReader.close(); currRecReader = null; } closeOperator(); if (context != null) { context.clear(); context = null; } this.currPath = null; this.iterPath = null; this.iterPartDesc = null; this.iterSplits = Iterators.emptyIterator(); } catch (Exception e) { throw new HiveException("Failed with exception " + e.getMessage() + StringUtils.stringifyException(e)); } }