List of usage examples for com.google.common.collect Range lowerEndpoint
public C lowerEndpoint()
From source file:edu.mit.streamjit.impl.compiler2.SubsetBiasAverageAllocationStrategy.java
@Override public void allocateGroup(ActorGroup group, Range<Integer> iterations, List<Core> cores, Configuration config) { int numCores = 0, biasCount = 0; List<ImmutableList<? extends Integer>> coreOrders = new ArrayList<>(); float bias = 0; for (Actor a : group.actors()) { int id = a.id(); numCores += config.getParameter("Group" + id + "CoreCount", Configuration.IntParameter.class) .getValue();/*from w w w . j a v a 2 s . com*/ Configuration.PermutationParameter<Integer> coreOrderParam = config.getParameter( "Group" + id + "CoreOrder", Configuration.PermutationParameter.class, Integer.class); coreOrders.add(coreOrderParam.getUniverse()); int ourBiasCount = config.getParameter("Group" + id + "BiasCount", Configuration.IntParameter.class) .getValue(); biasCount += Math.min(ourBiasCount, numCores - 1); bias += config.getParameter("Group" + id + "Bias", Configuration.FloatParameter.class).getValue(); } numCores = IntMath.divide(numCores, group.actors().size(), RoundingMode.CEILING); biasCount = IntMath.divide(biasCount, group.actors().size(), RoundingMode.FLOOR); bias /= group.actors().size(); //Transpose coreOrders. List<Integer> coreOrder = new ArrayList<>(); for (int i = 0; i < coreOrders.get(0).size(); ++i) for (int j = 0; j < coreOrders.size(); ++j) coreOrder.add(coreOrders.get(j).get(i)); //Remove duplicates preserving order. coreOrder = new ArrayList<>(new LinkedHashSet<>(coreOrder)); List<Core> subset = new ArrayList<>(numCores); for (int i = 0; i < coreOrder.size() && subset.size() < numCores; ++i) if (coreOrder.get(i) < cores.size()) subset.add(cores.get(coreOrder.get(i))); List<Core> biasSubset = new ArrayList<>(biasCount); while (biasSubset.size() < biasCount) biasSubset.add(subset.remove(0)); float deficitFraction = biasCount * (1 - bias) / numCores, surplusFraction = 1 - deficitFraction; assert deficitFraction >= 0 && surplusFraction >= 0 : String.format("%d %d %f -> %f %f", numCores, biasCount, bias, deficitFraction, surplusFraction); iterations = iterations.canonical(DiscreteDomain.integers()); int totalIterations = iterations.upperEndpoint() - iterations.lowerEndpoint(); int biasIterations = (int) (totalIterations * deficitFraction); //We pass a null config to ensure we don't interfere with the other strategy. if (biasCount > 0) new FullDataParallelAllocationStrategy(biasCount).allocateGroup(group, Range.closedOpen(iterations.lowerEndpoint(), iterations.lowerEndpoint() + biasIterations), biasSubset, null); if (numCores - biasCount > 0) new FullDataParallelAllocationStrategy(numCores - biasCount).allocateGroup(group, Range.closedOpen(iterations.lowerEndpoint() + biasIterations, iterations.upperEndpoint()), subset, null); }
From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java
@Override public void asyncDelete(Position pos, final AsyncCallbacks.DeleteCallback callback, Object ctx) { checkArgument(pos instanceof PositionImpl); if (STATE_UPDATER.get(this) == State.Closed) { callback.deleteFailed(new ManagedLedgerException("Cursor was already closed"), ctx); return;/* w w w .j a v a 2 s . c om*/ } PositionImpl position = (PositionImpl) pos; PositionImpl previousPosition = ledger.getPreviousPosition(position); PositionImpl newMarkDeletePosition = null; lock.writeLock().lock(); try { if (log.isDebugEnabled()) { log.debug( "[{}] [{}] Deleting single message at {}. Current status: {} - md-position: {} - previous-position: {}", ledger.getName(), name, pos, individualDeletedMessages, markDeletePosition, previousPosition); } if (individualDeletedMessages.contains(position) || position.compareTo(markDeletePosition) <= 0) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Position was already deleted {}", ledger.getName(), name, position); } callback.deleteComplete(ctx); return; } if (previousPosition.compareTo(markDeletePosition) == 0 && individualDeletedMessages.isEmpty()) { if (log.isDebugEnabled()) { log.debug("[{}][{}] Immediately mark-delete to position {}", ledger.getName(), name, position); } newMarkDeletePosition = position; } else { // Add a range (prev, pos] to the set. Adding the previous entry as an open limit to the range will make // the RangeSet recognize the "continuity" between adjacent Positions individualDeletedMessages.add(Range.openClosed(previousPosition, position)); ++messagesConsumedCounter; if (log.isDebugEnabled()) { log.debug("[{}] [{}] Individually deleted messages: {}", ledger.getName(), name, individualDeletedMessages); } // If the lower bound of the range set is the current mark delete position, then we can trigger a new // mark // delete to the upper bound of the first range segment Range<PositionImpl> range = individualDeletedMessages.asRanges().iterator().next(); // Bug:7062188 - markDeletePosition can sometimes be stuck at the beginning of an empty ledger. // If the lowerBound is ahead of MarkDelete, verify if there are any entries in-between if (range.lowerEndpoint().compareTo(markDeletePosition) <= 0 || ledger .getNumberOfEntries(Range.openClosed(markDeletePosition, range.lowerEndpoint())) <= 0) { if (log.isDebugEnabled()) { log.debug("[{}] Found a position range to mark delete for cursor {}: {} ", ledger.getName(), name, range); } newMarkDeletePosition = range.upperEndpoint(); } } if (newMarkDeletePosition != null) { newMarkDeletePosition = setAcknowledgedPosition(newMarkDeletePosition); } else { newMarkDeletePosition = markDeletePosition; } } catch (Exception e) { log.warn("[{}] [{}] Error while updating individualDeletedMessages [{}]", ledger.getName(), name, e.getMessage(), e); callback.deleteFailed(new ManagedLedgerException(e), ctx); return; } finally { lock.writeLock().unlock(); } // Apply rate limiting to mark-delete operations if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) { callback.deleteComplete(ctx); return; } try { internalAsyncMarkDelete(newMarkDeletePosition, new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { callback.deleteComplete(ctx); } @Override public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { callback.deleteFailed(exception, ctx); } }, ctx); } catch (Exception e) { log.warn("[{}] [{}] Error doing asyncDelete [{}]", ledger.getName(), name, e.getMessage(), e); if (log.isDebugEnabled()) { log.debug("[{}] Consumer {} cursor asyncDelete error, counters: consumed {} mdPos {} rdPos {}", ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition); } callback.deleteFailed(new ManagedLedgerException(e), ctx); } }
From source file:io.github.msdk.features.ransacaligner.RansacAlignerMethod.java
private Hashtable<FeatureTableRow, FeatureTableRow> getAlignmentMap(FeatureTable featureTable) { // Create a table of mappings for best scores Hashtable<FeatureTableRow, FeatureTableRow> alignmentMapping = new Hashtable<FeatureTableRow, FeatureTableRow>(); // Create a sorted set of scores matching TreeSet<RowVsRowScore> scoreSet = new TreeSet<RowVsRowScore>(); // RANSAC algorithm List<AlignStructMol> list = ransacPeakLists(result, featureTable); PolynomialFunction function = this.getPolynomialFunction(list); List<FeatureTableRow> allRows = featureTable.getRows(); for (FeatureTableRow row : allRows) { // Calculate limits for a row with which the row can be aligned Range<Double> mzRange = mzTolerance.getToleranceRange(row.getMz()); double rt; try {/*from w w w . j av a2s . co m*/ rt = function.value(row.getChromatographyInfo().getRetentionTime()); } catch (NullPointerException e) { rt = row.getChromatographyInfo().getRetentionTime(); } if (Double.isNaN(rt) || rt == -1) { rt = row.getChromatographyInfo().getRetentionTime(); } Range<Double> rtRange = rtToleranceAfterCorrection.getToleranceRange(rt); // Get all rows of the aligned feature table within the m/z and // RT limits List<FeatureTableRow> candidateRows = result.getRowsInsideRange(rtRange, mzRange); for (FeatureTableRow candidateRow : candidateRows) { RowVsRowScore score; if (requireSameCharge) { FeatureTableColumn<Integer> chargeColumn1 = featureTable.getColumn(ColumnName.CHARGE, null); FeatureTableColumn<Integer> chargeColumn2 = result.getColumn(ColumnName.CHARGE, null); Integer charge1 = row.getData(chargeColumn1); Integer charge2 = candidateRow.getData(chargeColumn2); if (!charge1.equals(charge2)) continue; } // Check ion annotation if (requireSameAnnotation) { FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn1 = featureTable .getColumn(ColumnName.IONANNOTATION, null); FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn2 = result .getColumn(ColumnName.IONANNOTATION, null); List<IonAnnotation> ionAnnotations1 = row.getData(ionAnnotationColumn1); List<IonAnnotation> ionAnnotations2 = candidateRow.getData(ionAnnotationColumn2); // Check that all ion annotations in first row are in // the candidate row boolean equalIons = false; if (ionAnnotations1 != null && ionAnnotations2 != null) { for (IonAnnotation ionAnnotation : ionAnnotations1) { for (IonAnnotation targetIonAnnotation : ionAnnotations2) { if (targetIonAnnotation.compareTo(ionAnnotation) == 0) equalIons = true; } } } if (!equalIons) continue; } try { double mzLength = mzRange.upperEndpoint() - mzRange.lowerEndpoint(); double rtLength = rtRange.upperEndpoint() - rtRange.lowerEndpoint(); score = new RowVsRowScore(row, candidateRow, mzLength, rtLength, new Float(rt)); scoreSet.add(score); } catch (Exception e) { return null; } } } // Iterate scores by descending order Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator(); while (scoreIterator.hasNext()) { RowVsRowScore score = scoreIterator.next(); // Check if the row is already mapped if (alignmentMapping.containsKey(score.getFeatureTableRow())) { continue; } // Check if the aligned row is already filled if (alignmentMapping.containsValue(score.getAlignedRow())) { continue; } alignmentMapping.put(score.getFeatureTableRow(), score.getAlignedRow()); } return alignmentMapping; }
From source file:io.github.msdk.features.joinaligner.JoinAlignerMethod.java
/** {@inheritDoc} */ @Override//from w w w.j a va2 s . c o m public FeatureTable execute() throws MSDKException { // Calculate number of feature to process. Each feature will be // processed twice: first for score calculation and then for actual // alignment. for (FeatureTable featureTable : featureTables) { totalFeatures += featureTable.getRows().size() * 2; } // Iterate through all feature tables Boolean firstFeatureTable = true; for (FeatureTable featureTable : featureTables) { // Add columns from the original feature table to the result table for (FeatureTableColumn<?> column : featureTable.getColumns()) { if (firstFeatureTable) result.addColumn(column); else if (column.getSample() != null) result.addColumn(column); } firstFeatureTable = false; // Create a sorted array of matching scores between two rows List<RowVsRowScore> scoreSet = new ArrayList<RowVsRowScore>(); // Calculate scores for all possible alignments of this row for (FeatureTableRow row : featureTable.getRows()) { final Double mz = row.getMz(); if (mz == null) continue; // Calculate the m/z range limit for the current row Range<Double> mzRange = mzTolerance.getToleranceRange(mz); // Continue if no chromatography info is available ChromatographyInfo chromatographyInfo = row.getChromatographyInfo(); if (chromatographyInfo == null) continue; // Calculate the RT range limit for the current row Range<Double> rtRange = rtTolerance.getToleranceRange(chromatographyInfo.getRetentionTime()); // Get all rows of the aligned feature table within the m/z and // RT limits List<FeatureTableRow> candidateRows = result.getRowsInsideRange(rtRange, mzRange); // Calculate scores and store them for (FeatureTableRow candidateRow : candidateRows) { // Check charge if (requireSameCharge) { FeatureTableColumn<Integer> chargeColumn1 = featureTable.getColumn(ColumnName.CHARGE, null); FeatureTableColumn<Integer> chargeColumn2 = result.getColumn(ColumnName.CHARGE, null); Integer charge1 = row.getData(chargeColumn1); Integer charge2 = candidateRow.getData(chargeColumn2); if (!charge1.equals(charge2)) continue; } // Check ion annotation if (requireSameAnnotation) { FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn1 = featureTable .getColumn(ColumnName.IONANNOTATION, null); FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn2 = result .getColumn(ColumnName.IONANNOTATION, null); List<IonAnnotation> ionAnnotations1 = row.getData(ionAnnotationColumn1); List<IonAnnotation> ionAnnotations2 = candidateRow.getData(ionAnnotationColumn2); // Check that all ion annotations in first row are in // the candidate row boolean equalIons = false; if (ionAnnotations1 != null && ionAnnotations2 != null) { for (IonAnnotation ionAnnotation : ionAnnotations1) { for (IonAnnotation targetIonAnnotation : ionAnnotations2) { if (targetIonAnnotation.compareTo(ionAnnotation) == 0) equalIons = true; } } } if (!equalIons) continue; } // Calculate score double mzLength = mzRange.upperEndpoint() - mzRange.lowerEndpoint(); double rtLength = rtRange.upperEndpoint() - rtRange.lowerEndpoint(); RowVsRowScore score = new RowVsRowScore(row, candidateRow, mzLength / 2.0, mzWeight, rtLength / 2.0, rtWeight); // Add the score to the array scoreSet.add(score); } processedFeatures++; if (canceled) return null; } // Create a table of mappings for best scores Hashtable<FeatureTableRow, FeatureTableRow> alignmentMapping = new Hashtable<FeatureTableRow, FeatureTableRow>(); // Iterate scores by descending order Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator(); while (scoreIterator.hasNext()) { RowVsRowScore score = scoreIterator.next(); // Check if the row is already mapped if (alignmentMapping.containsKey(score.getFeatureTableRow())) continue; // Check if the aligned row is already filled if (alignmentMapping.containsValue(score.getAlignedRow())) continue; alignmentMapping.put(score.getFeatureTableRow(), score.getAlignedRow()); } // Align all rows using the mapping for (FeatureTableRow sourceRow : featureTable.getRows()) { FeatureTableRow targetRow = alignmentMapping.get(sourceRow); // If we have no mapping for this row, add a new one if (targetRow == null) { targetRow = MSDKObjectBuilder.getFeatureTableRow(result, newRowID); result.addRow(targetRow); FeatureTableColumn<Integer> column = result.getColumn(ColumnName.ID, null); targetRow.setData(column, newRowID); newRowID++; } // Add all features from the original row to the aligned row for (Sample sample : sourceRow.getFeatureTable().getSamples()) { FeatureTableUtil.copyFeatureValues(sourceRow, targetRow, sample); } // Combine common values from the original row with the aligned // row FeatureTableUtil.copyCommonValues(sourceRow, targetRow, true); processedFeatures++; } // Re-calculate average row averages FeatureTableUtil.recalculateAverages(result); if (canceled) return null; } // Return the new feature table return result; }
From source file:net.sourceforge.ganttproject.task.algorithm.SchedulerImpl.java
private void schedule(Node node) { Logger logger = GPLogger.getLogger(this); GPLogger.debug(logger, "Scheduling node %s", node); Range<Date> startRange = Range.all(); Range<Date> endRange = Range.all(); Range<Date> weakStartRange = Range.all(); Range<Date> weakEndRange = Range.all(); List<Date> subtaskRanges = Lists.newArrayList(); List<DependencyEdge> incoming = node.getIncoming(); GPLogger.debug(logger, ".. #incoming edges=%d", incoming.size()); for (DependencyEdge edge : incoming) { if (!edge.refresh()) { continue; }/*from w ww.j av a 2 s . co m*/ if (edge instanceof ImplicitSubSuperTaskDependency) { subtaskRanges.add(edge.getStartRange().upperEndpoint()); subtaskRanges.add(edge.getEndRange().lowerEndpoint()); } else { if (edge.isWeak()) { weakStartRange = weakStartRange.intersection(edge.getStartRange()); weakEndRange = weakEndRange.intersection(edge.getEndRange()); } else { startRange = startRange.intersection(edge.getStartRange()); endRange = endRange.intersection(edge.getEndRange()); } } if (startRange.isEmpty() || endRange.isEmpty()) { GPLogger.logToLogger("both start and end ranges were calculated as empty for task=" + node.getTask() + ". Skipping it"); } } GPLogger.debug(logger, "..Ranges: start=%s end=%s weakStart=%s weakEnd=%s", startRange, endRange, weakStartRange, weakEndRange); Range<Date> subtasksSpan = subtaskRanges.isEmpty() ? Range.closed(node.getTask().getStart().getTime(), node.getTask().getEnd().getTime()) : Range.encloseAll(subtaskRanges); Range<Date> subtreeStartUpwards = subtasksSpan .span(Range.downTo(node.getTask().getStart().getTime(), BoundType.CLOSED)); Range<Date> subtreeEndDownwards = subtasksSpan .span(Range.upTo(node.getTask().getEnd().getTime(), BoundType.CLOSED)); GPLogger.debug(logger, "..Subtasks span=%s", subtasksSpan); if (!startRange.equals(Range.all())) { startRange = startRange.intersection(weakStartRange); } else if (!weakStartRange.equals(Range.all())) { startRange = weakStartRange.intersection(subtreeStartUpwards); } if (!endRange.equals(Range.all())) { endRange = endRange.intersection(weakEndRange); } else if (!weakEndRange.equals(Range.all())) { endRange = weakEndRange.intersection(subtreeEndDownwards); } if (node.getTask().getThirdDateConstraint() == TaskImpl.EARLIESTBEGIN && node.getTask().getThird() != null) { startRange = startRange .intersection(Range.downTo(node.getTask().getThird().getTime(), BoundType.CLOSED)); GPLogger.debug(logger, ".. applying earliest start=%s. Now start range=%s", node.getTask().getThird(), startRange); } if (!subtaskRanges.isEmpty()) { startRange = startRange.intersection(subtasksSpan); endRange = endRange.intersection(subtasksSpan); } GPLogger.debug(logger, ".. finally, start range=%s", startRange); if (startRange.hasLowerBound()) { modifyTaskStart(node.getTask(), startRange.lowerEndpoint()); } if (endRange.hasUpperBound()) { GPCalendarCalc cal = node.getTask().getManager().getCalendar(); Date endDate = endRange.upperEndpoint(); TimeUnit timeUnit = node.getTask().getDuration().getTimeUnit(); if (DayMask.WORKING == (cal.getDayMask(endDate) & DayMask.WORKING)) { // in case if calculated end date falls on first day after holidays (say, on Monday) // we'll want to modify it a little bit, so that it falls on that holidays start // If we don't do this, it will be done automatically the next time task activities are recalculated, // and thus task end date will keep changing Date closestWorkingEndDate = cal.findClosest(endDate, timeUnit, GPCalendarCalc.MoveDirection.BACKWARD, GPCalendar.DayType.WORKING); Date closestNonWorkingEndDate = cal.findClosest(endDate, timeUnit, GPCalendarCalc.MoveDirection.BACKWARD, GPCalendar.DayType.NON_WORKING, closestWorkingEndDate); // If there is a non-working date between current task end and closest working date // then we're really just after holidays if (closestNonWorkingEndDate != null && closestWorkingEndDate.before(closestNonWorkingEndDate)) { // we need to adjust-right closest working date to position to the very beginning of the holidays interval Date nonWorkingPeriodStart = timeUnit.adjustRight(closestWorkingEndDate); if (nonWorkingPeriodStart.after(node.getTask().getStart().getTime())) { endDate = nonWorkingPeriodStart; } } } modifyTaskEnd(node.getTask(), endDate); } }
From source file:com.google.cloud.genomics.cba.GGAnnotateVariants.java
@org.apache.beam.sdk.transforms.DoFn.ProcessElement public void processElement(DoFn<StreamVariantsRequest, KV<String, String>>.ProcessContext c) throws Exception { Genomics genomics = GenomicsFactory.builder().build().fromOfflineAuth(auth); StreamVariantsRequest request = StreamVariantsRequest.newBuilder(c.element()).addAllCallSetIds(callSetIds) .build();//from w w w . jav a 2 s . c om if (canonicalizeRefName(request.getReferenceName()).equals("M") && supportChrM == false) { LOG.info("There is no information about Chr M in the provided AnnotationSet!"); return; } Iterator<StreamVariantsResponse> streamVariantIter = VariantStreamIterator.enforceShardBoundary(auth, request, ShardBoundary.Requirement.STRICT, VARIANT_FIELDS); if (!streamVariantIter.hasNext()) { LOG.info("region has no variants, skipping"); return; } Stopwatch stopwatch = Stopwatch.createStarted(); int varCount = 0; ListMultimap<Range<Long>, Annotation> variantAnnotationSetList = null; if (this.variantAnnotationSetIds != null) variantAnnotationSetList = retrieveVariantAnnotations(genomics, request); IntervalTree<Annotation> transcripts = null; if (this.transcriptSetIds != null) transcripts = retrieveTranscripts(genomics, request); while (streamVariantIter.hasNext()) { Iterable<Variant> varIter; if (onlySNP) varIter = FluentIterable.from(streamVariantIter.next().getVariantsList()) .filter(VariantUtils.IS_SNP); else varIter = FluentIterable.from(streamVariantIter.next().getVariantsList()); for (Variant variant : varIter) { Range<Long> pos = Range.closedOpen(variant.getStart(), variant.getEnd()); // This variable helps to keep track of alignment String VCFOutput = ""; // Keep track of Empty VCF records boolean EmptyVCF = false; // Variant Annotation Section if (variantAnnotationSetList != null) { // Sort the list of matched annotations SortedSet<String> VariantAnnotationKeys = new TreeSet<String>(VariantColInfo.keySet()); // Retrieve a list of matched variant annotations List<Annotation> listMatchedAnnotations = variantAnnotationSetList.get(pos); // Visit overlapped annotations in order, and the matches in // order (First convert to VCF format, and then add it to // VCFOutput); int index = 0; for (String key : VariantAnnotationKeys) { // The following variables help to put a semicolon // between multiple matches from the same annotationSet // e.g., allele_freq1;allele_freq2;...;allele_freqn; boolean SemiColon = false; for (Annotation match : listMatchedAnnotations) { if (match.getAnnotationSetId().compareTo(key) == 0) { // if (match.getVariant().getAlternateBases() != // null // && variant.getAlternateBasesList() != null) { // check if Variant's alternate bases are // the same as the matched annotation's // alternate bases if (compareAlternateBases(match.getVariant().getAlternateBases(), variant.getAlternateBasesList(), variant.getReferenceBases())) { EmptyVCF = true; if (DEBUG) LOG.info("MATCHED: variant: (" + variant.getStart() + ", Annotation: " + match.getStart() + ") "); if (!SemiColon) { VCFOutput += createVCFFormat(variant, match); SemiColon = true; // Activate it for the next matched // element // TESTING VCFOutput += "ALT:" + match.getVariant().getAlternateBases() + "\t"; } else { VCFOutput += ";" + createVCFFormat(variant, match); // TESTING VCFOutput += "ALT:" + match.getVariant().getAlternateBases() + "\t"; } } } } } index++; /* * formatTabs function helps to keep track of alignment * in the VCF format (e.g., if there is no match for * Variant X in AnnotationSet Y then add spaces equals * to the number of AnnotationSet Y's columns in the VCF * file) */ if (VCFOutput.isEmpty() && (VariantAnnotationKeys.size() > index || TranscriptColInfo.size() > 0)) { VCFOutput += formatTabs(VariantColInfo.get(key)); } } // end of keys if (!EmptyVCF) VCFOutput = ""; } // End of Variant Annotation // Transcript Annotation Section if (transcripts != null) { // Find all the overlapped matches and create an interval // tree Iterator<Node<Annotation>> transcriptIter = transcripts .overlappers(pos.lowerEndpoint().intValue(), pos.upperEndpoint().intValue() - 1); // Inclusive. Iterator<Node<Annotation>> StartPoint = transcriptIter; if (transcriptIter != null) { // Sort the list of matched annotations SortedSet<String> transcriptKeys = new TreeSet<String>(TranscriptColInfo.keySet()); int index = 0; // Check annotations in order, and in the case of match // convert the matches to VCF format for (String key : transcriptKeys) { transcriptIter = StartPoint; boolean SemiColon = false; while (transcriptIter.hasNext()) { Annotation transcript = transcriptIter.next().getValue(); if (transcript.getAnnotationSetId().compareTo(key) == 0) { if (!SemiColon) { VCFOutput += createVCFFormat(variant, transcript); SemiColon = true; } else VCFOutput += ";" + createVCFFormat(variant, transcript); } } index++; if (VCFOutput.isEmpty() && transcriptKeys.size() > index) { VCFOutput += formatTabs(TranscriptColInfo.get(key)); } } } } // End of Transcripts String varintALTs = ""; for (int index = 0; index < variant.getAlternateBasesCount(); index++) { if (index > 0) varintALTs += ","; varintALTs += variant.getAlternateBases(index); } // The following section helps to add genotypes /* * String VariantGenotype=""; List<VariantCall> Genotypes = * variant.getCallsList(); * * for(String CId: callSetIds){ for(VariantCall VC:Genotypes){ * if(VC.getCallSetId().equals(CId)){ * * List<Integer> GentotypeList = VC.getGenotypeList(); for(int * index=0; index < GentotypeList.size(); index++){ int Genotype * = GentotypeList.get(index); * * if(index>0) VariantGenotype += "/"; * * VariantGenotype += Genotype; } } } VariantGenotype += "\t"; } */ // Map<String, ListValue> VariantInfoMap = variant.getInfo(); /* * String VariantInfo=""; List<VariantCall> VariantCall = * variant.getCallsList(); for (Iterator<VariantCall> iter = * VariantCall.iterator(); iter.hasNext(); ) { VariantCall * element = iter.next(); Map<String, ListValue> VariantCallInfo * = element.getInfo(); for (Map.Entry<String, ListValue> entry * : VariantCallInfo.entrySet()) { VariantInfo +=entry.getKey() * + ":" + * entry.getValue().getValuesList().get(0).getStringValue() + * ";"; } } * * * * /* for (Map.Entry<String, ListValue> entry : * VariantInfoMap.entrySet()) { //System.out.println("Key = " + * entry.getKey() + ", Value = " + entry.getValue()); * VariantInfo += entry.getKey() + ":" + entry.getValue() + ";"; * } */ /* * Emit the information in the form of <Key, Value> Print out * the variant w/ or w/o any matched annotations Key: (ChromId, * Start, End) Value:(variant's <referenceName start end * referenceBases alternateBases quality>, + The content of * "VCFOutput" OR Annotation's fields */ if (this.BigQuery) { if (!VCFOutput.isEmpty()) { c.output(KV.of( variant.getReferenceName() + ";" + Long.toString(variant.getStart()) + ";" + Long.toString(variant.getEnd()), // Value VCFOutput)); } } else { if (!VCFOutput.isEmpty()) { c.output(KV.of( variant.getReferenceName() + ";" + Long.toString(variant.getStart()) + ";" + Long.toString(variant.getEnd()), // Value variant.getReferenceName() // <-- increment by 1 => convert to 1-based // --> + "\t" + (variant.getStart() + 1) + "\t" + variant.getEnd() + "\t" + variant.getReferenceBases() + "\t" + varintALTs // + "\t" + VariantInfo // + "\t" + variant.getQuality() // + "\t" + VariantGenotype + "\t" + VCFOutput)); } } varCount++; if (varCount % 1e3 == 0) { LOG.info(String.format("read %d variants (%.2f / s)", varCount, (double) varCount / stopwatch.elapsed(TimeUnit.SECONDS))); } } } LOG.info("finished reading " + varCount + " variants in " + stopwatch); }