List of usage examples for com.google.common.collect Iterators addAll
public static <T> boolean addAll(Collection<T> addTo, Iterator<? extends T> iterator)
From source file:org.apache.ctakes.temporal.data.analysis.CompareFeatureStructures.java
private static List<FeatureStructure> toFeatureStructures(JCas jCas, List<Class<?>> annotationClasses) { List<FeatureStructure> fsList = Lists.newArrayList(); for (Class<?> annotationClass : annotationClasses) { Type type = JCasUtil.getType(jCas, annotationClass); Iterators.addAll(fsList, jCas.getFSIndexRepository().getAllIndexedFS(type)); }/* w w w . jav a 2 s . c o m*/ return BY_TYPE_AND_OFFSETS.sortedCopy(fsList); }
From source file:org.apache.jackrabbit.oak.plugins.document.RevisionVector.java
/** * Calculates the parallel maximum of this and the given {@code vector}. * * @param vector the other vector.//from ww w. ja v a 2 s. c o m * @return the parallel maximum of the two. */ public RevisionVector pmax(@Nonnull RevisionVector vector) { // optimize single revision case if (revisions.length == 1 && vector.revisions.length == 1) { if (revisions[0].getClusterId() == vector.revisions[0].getClusterId()) { return revisions[0].compareRevisionTime(vector.revisions[0]) > 0 ? this : vector; } else { return new RevisionVector(revisions[0], vector.revisions[0]); } } int capacity = Math.max(revisions.length, vector.revisions.length); List<Revision> pmax = newArrayListWithCapacity(capacity); PeekingIterator<Revision> it = peekingIterator(vector.iterator()); for (Revision r : revisions) { while (it.hasNext() && it.peek().getClusterId() < r.getClusterId()) { pmax.add(it.next()); } Revision other = peekRevision(it, r.getClusterId()); if (other != null && other.getClusterId() == r.getClusterId()) { pmax.add(Utils.max(r, other)); it.next(); } else { // other does not have a revision with r.clusterId pmax.add(r); } } // add remaining Iterators.addAll(pmax, it); return new RevisionVector(toArray(pmax, Revision.class), false, false); }
From source file:de.fau.osr.core.vcs.impl.GitBlameOperation.java
/** * @throws GitAPIException//from w w w .j ava 2 s.c o m * @throws IOException * @return An instance of {@link AnnotatedWords} where every word from * the committed contents of path is mapped to a deduplicated list of annotations. */ public AnnotatedWords wordBlame() throws GitAPIException, IOException { /* * at the moment, just look at the state at HEAD, * could be expanded in the future to * a) parameterize the used commit * b) annotate the working copy instead */ ObjectId rootId = repo.resolve("HEAD"); RevWalk walker = new RevWalk(repo); RevCommit rootCommit = walker.parseCommit(rootId); Function<ObjectId, byte[]> readFunction = id -> { try { return client.readBlob(id); } catch (IOException | GitAPIException e) { throw new RuntimeException(e); } }; FlatSource source; try { ObjectId idAtHead = client.fileAtRev(walker, path, rootCommit); source = FlatSource.flatten(readFunction, idAtHead); } catch (Exception e) { throw new FileNotFoundException(path); } @SuppressWarnings("unchecked") List<Object>[] currentBlame = new List[source.size()]; BlameItem topBlame = new BlameItem(rootCommit, new TreeMap<>(), path); /* * initially, blame all lines on HEAD */ for (int i = 0; i < currentBlame.length; ++i) { currentBlame[i] = new ArrayList<>(); topBlame.words.put(i, i); } workQueue.add(topBlame); while (!workQueue.isEmpty()) { BlameItem cur = workQueue.pollFirst(); walker.parseCommit(cur.accused); ObjectId idAfter = client.fileAtRev(walker, cur.path, cur.accused); FlatSource after = FlatSource.flatten(readFunction, idAfter); /* * pull in custom annotations from putBlame on all suspect lines */ if (putBlame != null) { String nameOfAccused = cur.accused.name(); for (Map.Entry<Integer, Integer> entry : cur.words.entrySet()) { Iterator<? extends Object> iterator = putBlame.apply(nameOfAccused, after.getLineByWord(entry.getKey())); if (iterator != null) Iterators.addAll(currentBlame[entry.getValue()], iterator); } } RevCommit[] parents = cur.accused.getParents(); /* * found indicates if we found an unmodified copy in a parent, * if false, foundLines indicates which lines we were able to blame * down in history */ boolean found = false; HashSet<Integer> foundLines = new HashSet<>(); for (RevCommit parent : parents) { walker.parseCommit(parent); TreeWalk treeWalk = TreeWalk.forPath(repo, cur.path, cur.accused.getTree(), parent.getTree()); if (treeWalk.idEqual(0, 1)) { //the file has not changed between parent and accused BlameItem nextItem = cur.shallowClone(); nextItem.accused = parent; push(nextItem); found = true; } else { //the file has changed ObjectId idBefore = client.fileAtRev(walker, cur.path, parent); if (idBefore == null) { /* * the file does not exist at the same path in parent and accused, * so go look for identical files * * could be extended to look for similar files, but watch performance! */ treeWalk = new TreeWalk(repo); treeWalk.setRecursive(true); treeWalk.addTree(parent.getTree()); while (treeWalk.next()) { if (treeWalk.getObjectId(0).equals(idAfter)) { String pathBefore = treeWalk.getPathString(); BlameItem nextItem = cur.shallowClone(); nextItem.accused = parent; nextItem.path = pathBefore; push(nextItem); found = true; break; } } continue; } //the file is at the same location in parent byte[] byteBefore = client.readBlob(idBefore); EditList diff; FlatSource before = FlatSource.flatten(readFunction, idBefore); diff = diffAlgorithm.diff(RawTextComparator.DEFAULT, before, after); /* * The documentation does not state if diff is sorted, * just that the Edits do not overlap, but it is much * more convenient to have it sorted. */ final Comparator<Edit> compEdit = (d1, d2) -> d1.getBeginB() - d2.getBeginB(); diff.sort(compEdit); BlameItem passedBlame = new BlameItem(parent, new TreeMap<>(), cur.path); Iterator<Map.Entry<Integer, Integer>> entryIterator = cur.words.entrySet().iterator(); Iterator<Edit> editIterator = diff.iterator(); Map.Entry<Integer, Integer> curEntry = null; if (entryIterator.hasNext()) curEntry = entryIterator.next(); Edit curEdit = null; if (editIterator.hasNext()) curEdit = editIterator.next(); int offset = 0; /* * traverse diff and words simultaneously */ while (curEntry != null || entryIterator.hasNext()) { if (curEntry == null) { curEntry = entryIterator.next(); } else if (curEdit == null && editIterator.hasNext()) { curEdit = editIterator.next(); } else if (curEdit != null && curEdit.getBeginB() <= curEntry.getKey()) { if (curEdit.getEndB() > curEntry.getKey()) { /* * curEntry was erased by curEdit */ curEntry = null; } else { /* * curEdit introduced an offset before curEntry */ offset += (curEdit.getEndA() - curEdit.getBeginA()) - (curEdit.getEndB() - curEdit.getBeginB()); curEdit = null; } } else { /* * push curEntry with key corrected by offset */ foundLines.add(curEntry.getKey()); passedBlame.words.put(curEntry.getKey() + offset, curEntry.getValue()); curEntry = null; } } /* * push the lines we found in parent back to queue */ push(passedBlame); } } /* * If there is not identical parent file, we have to take * responsibility for all lines not found in some parent. */ if (!found) { for (Map.Entry<Integer, Integer> entry : cur.words.entrySet()) { if (!foundLines.contains(entry.getKey())) currentBlame[entry.getValue()].add(cur.accused.getId()); } } } /* * duplicate objects take up unneeded space, clean them up */ for (int i = 0; i < currentBlame.length; ++i) currentBlame[i] = new ArrayList<>(new HashSet<>(currentBlame[i])); return new AnnotatedWords(source, currentBlame); }
From source file:org.geoserver.importer.Importer.java
public Iterator<ImportContext> getAllContextsByUpdated() { try {//from w ww .j a va2 s . c o m return contextStore.iterator("updated"); } catch (UnsupportedOperationException e) { //fallback TreeSet sorted = new TreeSet<ImportContext>(new Comparator<ImportContext>() { @Override public int compare(ImportContext o1, ImportContext o2) { Date d1 = o1.getUpdated(); Date d2 = o2.getUpdated(); return -1 * d1.compareTo(d2); } }); Iterators.addAll(sorted, contextStore.iterator()); return sorted.iterator(); } }
From source file:com.analog.lyric.dimple.model.core.Node.java
/** * Returns the closest common ancestor graph containing both this node and {@code other} * or null if there isn't one.//from ww w .j av a 2s .co m * * @param other is another node with which to compare. * @param uncommonAncestors if non-null, then any ancestors that are not in common will * be added to this list in order from top to bottom. * * @see #getCommonAncestor(Node) */ public @Nullable FactorGraph getCommonAncestor(Node other, @Nullable List<FactorGraph> uncommonAncestors) { // First try some common special cases to avoid computation of full path to the root. FactorGraph thisParent = getParentGraph(); FactorGraph otherParent = other.getParentGraph(); if (thisParent == otherParent) { return thisParent; } if (thisParent == null || otherParent == null) { return null; } if (this == otherParent) { return otherParent; } if (other == thisParent) { return thisParent; } Iterator<FactorGraph> theseAncestors = getAncestors().iterator(); Iterator<FactorGraph> otherAncestors = other.getAncestors().iterator(); FactorGraph ancestor = null; while (theseAncestors.hasNext() && otherAncestors.hasNext()) { FactorGraph thisAncestor = theseAncestors.next(); FactorGraph otherAncestor = otherAncestors.next(); if (thisAncestor == otherAncestor) { ancestor = thisAncestor; } else { if (uncommonAncestors != null) { // Add remaining ancestors to set, if provided uncommonAncestors.add(thisAncestor); Iterators.addAll(uncommonAncestors, theseAncestors); uncommonAncestors.add(otherAncestor); Iterators.addAll(uncommonAncestors, otherAncestors); } break; } } return ancestor; }
From source file:org.apache.hadoop.hive.ql.exec.tez.DagUtils.java
private void addCredentials(MapWork mapWork, DAG dag) { Set<Path> paths = mapWork.getPathToAliases().keySet(); if (!paths.isEmpty()) { Iterator<URI> pathIterator = Iterators.transform(paths.iterator(), new Function<Path, URI>() { @Override//from w ww.jav a 2 s. c o m public URI apply(Path path) { return path.toUri(); } }); Set<URI> uris = new HashSet<URI>(); Iterators.addAll(uris, pathIterator); if (LOG.isDebugEnabled()) { for (URI uri : uris) { LOG.debug("Marking URI as needing credentials: " + uri); } } dag.addURIsForCredentials(uris); } }
From source file:gov.nih.nci.caarray.domain.sample.AbstractBioMaterial.java
/** * Return the characteristics with given category in this biomaterial. * * @param category category//from w ww . j ava 2s . c o m * @return the characteristics with given category. */ public Set<AbstractCharacteristic> getCharacteristics(final Category category) { Set<AbstractCharacteristic> chars = new HashSet<AbstractCharacteristic>(); Iterators.addAll(chars, Iterators.filter(characteristics.iterator(), new Predicate<AbstractCharacteristic>() { public boolean apply(AbstractCharacteristic input) { return category.equals(input.getCategory()); } })); chars.addAll(getBuiltInCharacteristics(category)); addUserDefinedCharacteristic(chars, category); return chars; }
From source file:org.eclipse.xtext.generator.parser.packrat.PackratParserGenUtil.java
private static List<String> getConflictingKeywordsImpl(final Grammar grammar, TerminalRule rule) { final Iterator<Keyword> conflictingKeywords = getConflictingKeywords(rule, Iterators.filter(EcoreUtil.getAllContents(grammar, true), Keyword.class)); Set<String> res = Sets.newLinkedHashSet(); Iterators.addAll(res, Iterators.transform(conflictingKeywords, new Function<Keyword, String>() { @Override// w w w. j av a2 s.c om public String apply(Keyword param) { return param.getValue(); } })); return Lists.newArrayList(res); }
From source file:de.learnlib.algorithms.ttt.base.BaseTTTLearner.java
protected Set<TTTState<I, D>> getNondetSuccessors(Collection<? extends TTTState<I, D>> states, I sym) { Set<TTTState<I, D>> result = new HashSet<>(); int symIdx = alphabet.getSymbolIndex(sym); for (TTTState<I, D> state : states) { TTTTransition<I, D> trans = state.transitions[symIdx]; if (trans.isTree()) { result.add(trans.getTreeTarget()); } else {/* w ww.j a v a 2s . co m*/ DTNode<I, D> tgtNode = trans.getNonTreeTarget(); Iterators.addAll(result, tgtNode.subtreeStatesIterator()); } } return result; }
From source file:de.tum.in.python.bluetooth.milling.machine.BluetoothMillingMachine.java
/** * Extracts provided configuration/*from w ww. ja v a2 s .c o m*/ */ private void extractConfiguration() { final String millingMachines = (String) this.m_properties.get(MILLING_MACHINES); final String DEVICE_SPLITTER = "#"; Iterators.addAll(this.m_millingMachines, Splitter.on(DEVICE_SPLITTER).split(millingMachines).iterator()); }