List of usage examples for org.eclipse.jgit.treewalk TreeWalk forPath
public static TreeWalk forPath(final @Nullable Repository repo, final ObjectReader reader, final String path, final AnyObjectId... trees) throws MissingObjectException, IncorrectObjectTypeException, CorruptObjectException, IOException
From source file:de.fau.osr.core.vcs.impl.GitBlameOperation.java
License:Open Source License
/** * @throws GitAPIException//from w w w. jav a 2 s . c om * @throws IOException * @return An instance of {@link AnnotatedWords} where every word from * the committed contents of path is mapped to a deduplicated list of annotations. */ public AnnotatedWords wordBlame() throws GitAPIException, IOException { /* * at the moment, just look at the state at HEAD, * could be expanded in the future to * a) parameterize the used commit * b) annotate the working copy instead */ ObjectId rootId = repo.resolve("HEAD"); RevWalk walker = new RevWalk(repo); RevCommit rootCommit = walker.parseCommit(rootId); Function<ObjectId, byte[]> readFunction = id -> { try { return client.readBlob(id); } catch (IOException | GitAPIException e) { throw new RuntimeException(e); } }; FlatSource source; try { ObjectId idAtHead = client.fileAtRev(walker, path, rootCommit); source = FlatSource.flatten(readFunction, idAtHead); } catch (Exception e) { throw new FileNotFoundException(path); } @SuppressWarnings("unchecked") List<Object>[] currentBlame = new List[source.size()]; BlameItem topBlame = new BlameItem(rootCommit, new TreeMap<>(), path); /* * initially, blame all lines on HEAD */ for (int i = 0; i < currentBlame.length; ++i) { currentBlame[i] = new ArrayList<>(); topBlame.words.put(i, i); } workQueue.add(topBlame); while (!workQueue.isEmpty()) { BlameItem cur = workQueue.pollFirst(); walker.parseCommit(cur.accused); ObjectId idAfter = client.fileAtRev(walker, cur.path, cur.accused); FlatSource after = FlatSource.flatten(readFunction, idAfter); /* * pull in custom annotations from putBlame on all suspect lines */ if (putBlame != null) { String nameOfAccused = cur.accused.name(); for (Map.Entry<Integer, Integer> entry : cur.words.entrySet()) { Iterator<? extends Object> iterator = putBlame.apply(nameOfAccused, after.getLineByWord(entry.getKey())); if (iterator != null) Iterators.addAll(currentBlame[entry.getValue()], iterator); } } RevCommit[] parents = cur.accused.getParents(); /* * found indicates if we found an unmodified copy in a parent, * if false, foundLines indicates which lines we were able to blame * down in history */ boolean found = false; HashSet<Integer> foundLines = new HashSet<>(); for (RevCommit parent : parents) { walker.parseCommit(parent); TreeWalk treeWalk = TreeWalk.forPath(repo, cur.path, cur.accused.getTree(), parent.getTree()); if (treeWalk.idEqual(0, 1)) { //the file has not changed between parent and accused BlameItem nextItem = cur.shallowClone(); nextItem.accused = parent; push(nextItem); found = true; } else { //the file has changed ObjectId idBefore = client.fileAtRev(walker, cur.path, parent); if (idBefore == null) { /* * the file does not exist at the same path in parent and accused, * so go look for identical files * * could be extended to look for similar files, but watch performance! */ treeWalk = new TreeWalk(repo); treeWalk.setRecursive(true); treeWalk.addTree(parent.getTree()); while (treeWalk.next()) { if (treeWalk.getObjectId(0).equals(idAfter)) { String pathBefore = treeWalk.getPathString(); BlameItem nextItem = cur.shallowClone(); nextItem.accused = parent; nextItem.path = pathBefore; push(nextItem); found = true; break; } } continue; } //the file is at the same location in parent byte[] byteBefore = client.readBlob(idBefore); EditList diff; FlatSource before = FlatSource.flatten(readFunction, idBefore); diff = diffAlgorithm.diff(RawTextComparator.DEFAULT, before, after); /* * The documentation does not state if diff is sorted, * just that the Edits do not overlap, but it is much * more convenient to have it sorted. */ final Comparator<Edit> compEdit = (d1, d2) -> d1.getBeginB() - d2.getBeginB(); diff.sort(compEdit); BlameItem passedBlame = new BlameItem(parent, new TreeMap<>(), cur.path); Iterator<Map.Entry<Integer, Integer>> entryIterator = cur.words.entrySet().iterator(); Iterator<Edit> editIterator = diff.iterator(); Map.Entry<Integer, Integer> curEntry = null; if (entryIterator.hasNext()) curEntry = entryIterator.next(); Edit curEdit = null; if (editIterator.hasNext()) curEdit = editIterator.next(); int offset = 0; /* * traverse diff and words simultaneously */ while (curEntry != null || entryIterator.hasNext()) { if (curEntry == null) { curEntry = entryIterator.next(); } else if (curEdit == null && editIterator.hasNext()) { curEdit = editIterator.next(); } else if (curEdit != null && curEdit.getBeginB() <= curEntry.getKey()) { if (curEdit.getEndB() > curEntry.getKey()) { /* * curEntry was erased by curEdit */ curEntry = null; } else { /* * curEdit introduced an offset before curEntry */ offset += (curEdit.getEndA() - curEdit.getBeginA()) - (curEdit.getEndB() - curEdit.getBeginB()); curEdit = null; } } else { /* * push curEntry with key corrected by offset */ foundLines.add(curEntry.getKey()); passedBlame.words.put(curEntry.getKey() + offset, curEntry.getValue()); curEntry = null; } } /* * push the lines we found in parent back to queue */ push(passedBlame); } } /* * If there is not identical parent file, we have to take * responsibility for all lines not found in some parent. */ if (!found) { for (Map.Entry<Integer, Integer> entry : cur.words.entrySet()) { if (!foundLines.contains(entry.getKey())) currentBlame[entry.getValue()].add(cur.accused.getId()); } } } /* * duplicate objects take up unneeded space, clean them up */ for (int i = 0; i < currentBlame.length; ++i) currentBlame[i] = new ArrayList<>(new HashSet<>(currentBlame[i])); return new AnnotatedWords(source, currentBlame); }