Example usage for com.google.common.collect Lists reverse

List of usage examples for com.google.common.collect Lists reverse

Introduction

In this page you can find the example usage for com.google.common.collect Lists reverse.

Prototype

@CheckReturnValue
public static <T> List<T> reverse(List<T> list) 

Source Link

Document

Returns a reversed view of the specified list.

Usage

From source file:org.eclipse.elk.alg.layered.intermediate.wrapping.BreakingPointProcessor.java

/**
 * Improvement step, reducing the length of edges that were wrapped back.
 *///from w ww  .j  a v a 2  s .c  o  m
private void improveUnneccesarilyLongEdges(final LGraph graph, final boolean forwards) {

    final Predicate<LNode> check = forwards ? BPInfo::isEnd : BPInfo::isStart;

    boolean didsome = false;
    do {
        didsome = false;

        List<Layer> layers = forwards ? Lists.reverse(graph.getLayers()) : graph.getLayers();
        for (Layer layer : layers) {

            List<LNode> nodes = Lists.newArrayList(layer.getNodes());
            if (!forwards) {
                Lists.reverse(nodes);
            }

            for (LNode n : nodes) {
                if (check.test(n)) {
                    LNode bpNode = n;
                    BPInfo bpInfo = n.getProperty(InternalProperties.BREAKING_POINT_INFO);
                    LNode dummy = forwards ? bpInfo.endInLayerDummy : bpInfo.startInLayerDummy;

                    didsome = dropDummies(bpNode, dummy, forwards, false);
                }

            }
        }
    } while (didsome);

}

From source file:org.eclipse.xtend.ide.quickfix.XtendQuickfixProvider.java

@Fix(IssueCodes.MISSING_OVERRIDE)
public void fixMissingOverride(final Issue issue, IssueResolutionAcceptor acceptor) {
    acceptor.accept(issue, "Change 'def' to 'override'", "Marks this function as 'override'", "fix_indent.gif",
            new ISemanticModification() {
                @Override/*  w  w  w  .j a v a2 s  . c  o  m*/
                public void apply(EObject element, IModificationContext context) throws Exception {
                    replaceKeyword(grammarAccess.getMethodModifierAccess().findKeywords("def").get(0),
                            "override", element, context.getXtextDocument());
                    if (element instanceof XtendFunction) {
                        XtendFunction function = (XtendFunction) element;
                        for (XAnnotation anno : Lists.reverse(function.getAnnotations())) {
                            if (anno != null && anno.getAnnotationType() != null && Override.class.getName()
                                    .equals(anno.getAnnotationType().getIdentifier())) {
                                ICompositeNode node = NodeModelUtils.findActualNodeFor(anno);
                                context.getXtextDocument().replace(node.getOffset(), node.getLength(), "");
                            }
                        }
                    }
                }
            });
}

From source file:org.apache.beam.fn.harness.control.ProcessBundleHandler.java

public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request)
        throws Exception {
    BeamFnApi.InstructionResponse.Builder response = BeamFnApi.InstructionResponse.newBuilder()
            .setProcessBundle(BeamFnApi.ProcessBundleResponse.getDefaultInstance());

    String bundleId = request.getProcessBundle().getProcessBundleDescriptorReference();
    BeamFnApi.ProcessBundleDescriptor bundleDescriptor = (BeamFnApi.ProcessBundleDescriptor) fnApiRegistry
            .apply(bundleId);/*w  ww  .ja va  2  s . c om*/

    Multimap<BeamFnApi.Target, ThrowingConsumer<WindowedValue<Object>>> outputTargetToConsumer = HashMultimap
            .create();
    List<ThrowingRunnable> startFunctions = new ArrayList<>();
    List<ThrowingRunnable> finishFunctions = new ArrayList<>();
    // We process the primitive transform list in reverse order
    // because we assume that the runner provides it in topologically order.
    // This means that all the start/finish functions will be in reverse topological order.
    for (BeamFnApi.PrimitiveTransform primitiveTransform : Lists
            .reverse(bundleDescriptor.getPrimitiveTransformList())) {
        createConsumersForPrimitiveTransform(primitiveTransform, request::getInstructionId,
                outputTargetToConsumer::get, outputTargetToConsumer::put, startFunctions::add,
                finishFunctions::add);
    }

    // Already in reverse order so we don't need to do anything.
    for (ThrowingRunnable startFunction : startFunctions) {
        LOG.debug("Starting function {}", startFunction);
        startFunction.run();
    }

    // Need to reverse this since we want to call finish in topological order.
    for (ThrowingRunnable finishFunction : Lists.reverse(finishFunctions)) {
        LOG.debug("Finishing function {}", finishFunction);
        finishFunction.run();
    }

    return response;
}

From source file:com.google.devtools.cyclefinder.ReferenceGraph.java

/**
 * Runs a version of Dijkstra's algorithm to find a tight cycle in the given
 * strongly connected component./*from   w w  w  .j  a  v  a 2 s . c o m*/
 */
private List<Edge> runDijkstras(SetMultimap<String, Edge> graph, String root) {
    Map<String, Edge> backlinks = Maps.newHashMap();
    Set<String> visited = Sets.newHashSet();
    List<String> toVisit = Lists.newArrayList(root);
    outer: while (true) {
        List<String> visitNext = Lists.newArrayList();
        for (String source : toVisit) {
            visited.add(source);
            for (Edge e : graph.get(source)) {
                String target = e.getTarget().getKey();
                if (!visited.contains(target)) {
                    visitNext.add(target);
                    backlinks.put(target, e);
                } else if (target.equals(root)) {
                    backlinks.put(root, e);
                    break outer;
                }
            }
        }
        toVisit = visitNext;
    }
    List<Edge> cycle = Lists.newArrayList();
    String curNode = root;
    while (!curNode.equals(root) || cycle.size() == 0) {
        Edge nextEdge = backlinks.get(curNode);
        cycle.add(nextEdge);
        curNode = nextEdge.getOrigin().getKey();
    }
    return Lists.newArrayList(Lists.reverse(cycle));
}

From source file:specminers.evaluation.MopExtractor.java

private String getRegexComplementWithParentherizedRegions(String originalRegex) {
    String splitter = "\\([\\w\\s\\|\\+\\*]+\\)[\\*\\+]*";

    Pattern p = Pattern.compile(splitter);
    Matcher m = p.matcher(originalRegex);

    List<String> groupedElements = new LinkedList<>();
    List<Pair<Integer, Integer>> groupingPositions = new LinkedList();

    while (m.find()) {
        Pair<Integer, Integer> startEndPositions = Pair.of(m.start(), m.end());
        String matching = originalRegex.substring(startEndPositions.getLeft(), startEndPositions.getRight());
        groupedElements.add(matching);//from ww  w .  j  a v a2s  .co  m
        groupingPositions.add(startEndPositions);
    }

    List<String> complementedElements = new LinkedList<>();
    int currentGroup = 0;

    for (int i = 0; i < originalRegex.length(); i++) {
        if (currentGroup < groupingPositions.size() && i == groupingPositions.get(currentGroup).getLeft()) {
            complementedElements.add(groupedElements.get(currentGroup));
            i = groupingPositions.get(currentGroup).getRight();
            currentGroup++;
        } else {
            int endOfCurrentToken = originalRegex.indexOf(" ", i);
            if (endOfCurrentToken == -1) {
                endOfCurrentToken = originalRegex.length();
            }

            complementedElements.add(originalRegex.substring(i, endOfCurrentToken));
            i = endOfCurrentToken;
        }
    }

    Map<String, String> groupInversions = new HashMap<>();

    for (String group : groupedElements) {

        int groupIndex = complementedElements.indexOf(group);
        boolean thereAreElementsBeforeGroup = groupIndex > 0;
        boolean thereAreElementsAfterGroup = groupIndex < complementedElements.size() - 1;

        String mixElement;

        if (thereAreElementsBeforeGroup) {
            mixElement = complementedElements.get(groupIndex - 1).replace("*", "");
        } else {
            if (thereAreElementsAfterGroup) {
                mixElement = complementedElements.get(groupIndex + 1).replace("*", "");
            } else {
                mixElement = "";
            }
        }

        List<String> options = Arrays.asList(group.split("\\|"));
        List<String> invertedOptions = new LinkedList<>();

        for (String option : options) {
            List<String> optionComponents = Arrays.asList(option.split(" ")).stream()
                    .filter(s -> s.trim().length() > 0).collect(Collectors.toList());
            List<String> invertedOptionComponents = new LinkedList<>();

            if (thereAreElementsBeforeGroup) {
                String suffix = "";
                if (optionComponents.get(0).contains(")")) {
                    suffix = optionComponents.get(0).substring(optionComponents.get(0).indexOf(")"));
                }
                invertedOptionComponents
                        .add(optionComponents.get(0).replace(")", "").replace("*", "").replace("+", ""));
                invertedOptionComponents.add(mixElement + suffix);
            } else {
                if (thereAreElementsAfterGroup) {
                    String preamble = optionComponents.get(0).contains("(") ? "(" : "";

                    invertedOptionComponents.add(preamble + mixElement);
                    invertedOptionComponents.add(optionComponents.get(0).replace("(", ""));
                }
            }

            if (thereAreElementsAfterGroup || thereAreElementsBeforeGroup) {
                invertedOptionComponents.addAll(optionComponents.subList(1, optionComponents.size()));
                String invertedOption = invertedOptionComponents.stream().collect(Collectors.joining(" "));

                invertedOptions.add(invertedOption);
            } else {
                List<String> inversionSample = optionComponents.stream()
                        .map(c -> c.replace("+", "").replace("*", "")).collect(Collectors.toList());
                inversionSample = Lists.reverse(inversionSample);
                inversionSample.set(0, "(" + inversionSample.get(0).replace(")", ""));
                inversionSample.set(inversionSample.size() - 1,
                        inversionSample.get(inversionSample.size() - 1).replace("(", "") + ")");

                invertedOptionComponents.addAll(inversionSample);

                String invertedOption = invertedOptionComponents.stream().collect(Collectors.joining(" "));

                invertedOptions.add(invertedOption);
            }
        }

        String invertedGroup = invertedOptions.stream().collect(Collectors.joining("|"));

        groupInversions.put(group, invertedGroup);
    }

    List<String> complementedInvertedElements = new LinkedList<>();
    currentGroup = 0;

    for (String complement : complementedElements) {
        if (groupedElements.contains(complement)) {
            complementedInvertedElements.add(groupInversions.get(complement));
        } else {
            complementedInvertedElements.add(complement);
        }
    }

    String result = complementedInvertedElements.stream().collect(Collectors.joining(" "));

    return result;
}

From source file:org.sonar.javascript.cfg.ControlFlowGraphBuilder.java

private void buildExpressions(List<? extends Tree> expressions) {
    for (Tree expression : Lists.reverse(expressions)) {
        buildExpression(expression);/* w  w  w.  j  a  va2  s .  co  m*/
    }
}

From source file:org.eclipse.tracecompass.tmf.core.trace.TmfTraceUtils.java

/**
 * Retrieve from a trace the previous event, from a given rank, matching the
 * given predicate./*  w ww.  j  a  va 2 s.  com*/
 *
 * @param trace
 *            The trace
 * @param startRank
 *            The rank of the event at which to start searching backwards.
 * @param predicate
 *            The predicate to test events against
 * @param monitor Optional progress monitor that can be used to cancel the
 *            operation
 * @return The first event found matching the predicate, or null if the
 *         beginning of the trace was reached and no event was found
 * @since 2.1
 */
public static @Nullable ITmfEvent getPreviousEventMatching(ITmfTrace trace, long startRank,
        Predicate<ITmfEvent> predicate, @Nullable IProgressMonitor monitor) {
    if (monitor != null && monitor.isCanceled()) {
        return null;
    }
    /*
     * We are going to do a series of queries matching the trace's cache
     * size in length (which should minimize on-disk seeks), then iterate on
     * the found events in reverse order until we find a match.
     */
    int step = trace.getCacheSize();

    /*
     * If we are close to the beginning of the trace, make sure we only look
     * for the events before the startRank.
     */
    if (startRank < step) {
        step = (int) startRank;
    }

    long currentRank = startRank;
    try {
        while (currentRank > 0) {
            currentRank = Math.max(currentRank - step, 0);

            List<ITmfEvent> list = new ArrayList<>(step);
            ArrayFillingRequest req = new ArrayFillingRequest(currentRank, step, list);
            trace.sendRequest(req);

            /* Check periodically if the job was cancelled */
            req.waitForStart();
            while (req.isRunning()) {
                Thread.sleep(200);
                if (monitor != null && monitor.isCanceled()) {
                    req.cancel();
                    return null;
                }
            }
            req.waitForCompletion();

            Optional<ITmfEvent> matchingEvent = Lists.reverse(list).stream().filter(predicate).findFirst();

            if (matchingEvent.isPresent()) {
                /* We found an event matching, return it! */
                return matchingEvent.get();
            }
            /* Keep searching, next loop */

        }
    } catch (InterruptedException e) {
        return null;
    }

    /*
     * We searched up to the beginning of the trace and didn't find
     * anything.
     */
    return null;

}

From source file:com.google.gerrit.server.notedb.ChangeNotesParser.java

private ChangeNotesState buildState() {
    return ChangeNotesState.create(tip.copy(), id, new Change.Key(changeId), createdOn, lastUpdatedOn, ownerId,
            branch, buildCurrentPatchSetId(), subject, topic, originalSubject, submissionId,
            assignee != null ? assignee.orElse(null) : null, status,
            Sets.newLinkedHashSet(Lists.reverse(pastAssignees)), hashtags, patchSets, buildApprovals(),
            ReviewerSet.fromTable(Tables.transpose(reviewers)),
            ReviewerByEmailSet.fromTable(Tables.transpose(reviewersByEmail)), allPastReviewers,
            buildReviewerUpdates(), submitRecords, buildAllMessages(), buildMessagesByPatchSet(), comments,
            readOnlyUntil, isPrivate);//from w  w w .j  av  a  2s  .c  o  m
}

From source file:fr.treeptik.cloudunit.service.impl.FileServiceImpl.java

/**
 * Logs Display Feature/*from   w w  w .  java  2 s  . co  m*/
 *
 * List the files and folder for a container
 *
 * @param containerId
 * @return
 * @throws ServiceException
 */
public List<LogLine> catFileForNLines(String containerId, String file, Integer nbRows) throws ServiceException {

    List<LogLine> files = new ArrayList<>();
    try {
        DockerClient docker = null;
        if (Boolean.valueOf(isHttpMode)) {
            docker = DefaultDockerClient.builder().uri("http://" + dockerManagerIp).build();
        } else {
            final DockerCertificates certs = new DockerCertificates(Paths.get(certsDirPath));
            docker = DefaultDockerClient.builder().uri("https://" + dockerManagerIp).dockerCertificates(certs)
                    .build();
        }
        List<Container> containers = docker.listContainers();
        containers = containers.stream()
                .filter(container1 -> container1.id().substring(0, 12).equalsIgnoreCase(containerId))
                .collect(Collectors.toList());
        for (Container container : containers) {
            String logDirectory = getLogDirectory(containerId);
            // Exec command inside running container with attached STDOUT
            // and STDERR
            final String[] command = { "bash", "-c", "tail -n " + nbRows + " /cloudunit/appconf/logs/" + file };
            String execId;
            String containerName = container.names().get(0);
            execId = docker.execCreate(containerName, command, DockerClient.ExecParameter.STDOUT,
                    DockerClient.ExecParameter.STDERR);
            final LogStream output = docker.execStart(execId);
            final String execOutput = output.readFully();
            if (execOutput != null && execOutput.contains("cannot access") == false) {
                StringTokenizer lignes = new StringTokenizer(execOutput, "\n");
                while (lignes.hasMoreTokens()) {
                    String line = lignes.nextToken();
                    LogLine logLine = new LogLine(file, line);
                    files.add(logLine);
                }
                files = Lists.reverse(files);
                output.close();
            }
        }
    } catch (DockerException | InterruptedException | DockerCertificateException e) {
        throw new ServiceException("Error in listByContainerIdAndPath", e);
    }

    return files;
}

From source file:org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.java

@Override
public InternalAggregation reduce(ReduceContext reduceContext) {
    List<InternalAggregation> aggregations = reduceContext.aggregations();
    if (aggregations.size() == 1) {

        InternalHistogram<B> histo = (InternalHistogram<B>) aggregations.get(0);

        if (minDocCount == 1) {
            for (B bucket : histo.buckets) {
                bucket.aggregations.reduce(reduceContext.bigArrays());
            }/*  ww  w  .  j  a v a 2  s  .c  o  m*/
            return histo;
        }

        CollectionUtil.introSort(histo.buckets,
                order.asc ? InternalOrder.KEY_ASC.comparator() : InternalOrder.KEY_DESC.comparator());
        List<B> list = order.asc ? histo.buckets : Lists.reverse(histo.buckets);
        B lastBucket = null;
        ListIterator<B> iter = list.listIterator();

        // we need to fill the gaps with empty buckets
        if (minDocCount == 0) {
            ExtendedBounds bounds = emptyBucketInfo.bounds;

            // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
            if (bounds != null) {
                B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
                if (firstBucket == null) {
                    if (bounds.min != null && bounds.max != null) {
                        long key = bounds.min;
                        long max = bounds.max;
                        while (key <= max) {
                            iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                            key = emptyBucketInfo.rounding.nextRoundingValue(key);
                        }
                    }
                } else {
                    if (bounds.min != null) {
                        long key = bounds.min;
                        while (key < firstBucket.key) {
                            iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                            key = emptyBucketInfo.rounding.nextRoundingValue(key);
                        }
                    }
                }
            }

            // now adding the empty buckets within the actual data,
            // e.g. if the data series is [1,2,3,7] there are 3 empty buckets that will be created for 4,5,6
            while (iter.hasNext()) {
                // look ahead on the next bucket without advancing the iter
                // so we'll be able to insert elements at the right position
                B nextBucket = list.get(iter.nextIndex());
                nextBucket.aggregations.reduce(reduceContext.bigArrays());
                if (lastBucket != null) {
                    long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
                    while (key != nextBucket.key) {
                        iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                        key = emptyBucketInfo.rounding.nextRoundingValue(key);
                    }
                }
                lastBucket = iter.next();
            }

            // finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
            if (bounds != null && lastBucket != null && bounds.max != null && bounds.max > lastBucket.key) {
                long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
                long max = bounds.max;
                while (key <= max) {
                    iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                    key = emptyBucketInfo.rounding.nextRoundingValue(key);
                }
            }

        } else {
            while (iter.hasNext()) {
                InternalHistogram.Bucket bucket = iter.next();
                if (bucket.getDocCount() < minDocCount) {
                    iter.remove();
                } else {
                    bucket.aggregations.reduce(reduceContext.bigArrays());
                }
            }
        }

        if (order != InternalOrder.KEY_ASC && order != InternalOrder.KEY_DESC) {
            CollectionUtil.introSort(histo.buckets, order.comparator());
        }

        return histo;
    }

    InternalHistogram reduced = (InternalHistogram) aggregations.get(0);

    LongObjectPagedHashMap<List<B>> bucketsByKey = new LongObjectPagedHashMap<>(reduceContext.bigArrays());
    for (InternalAggregation aggregation : aggregations) {
        InternalHistogram<B> histogram = (InternalHistogram) aggregation;
        for (B bucket : histogram.buckets) {
            List<B> bucketList = bucketsByKey.get(bucket.key);
            if (bucketList == null) {
                bucketList = new ArrayList<>(aggregations.size());
                bucketsByKey.put(bucket.key, bucketList);
            }
            bucketList.add(bucket);
        }
    }

    List<B> reducedBuckets = new ArrayList<>((int) bucketsByKey.size());
    for (LongObjectPagedHashMap.Cursor<List<B>> cursor : bucketsByKey) {
        List<B> sameTermBuckets = cursor.value;
        B bucket = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.bigArrays());
        if (bucket.getDocCount() >= minDocCount) {
            reducedBuckets.add(bucket);
        }
    }
    bucketsByKey.close();

    // adding empty buckets in needed
    if (minDocCount == 0) {
        CollectionUtil.introSort(reducedBuckets,
                order.asc ? InternalOrder.KEY_ASC.comparator() : InternalOrder.KEY_DESC.comparator());
        List<B> list = order.asc ? reducedBuckets : Lists.reverse(reducedBuckets);
        B lastBucket = null;
        ExtendedBounds bounds = emptyBucketInfo.bounds;
        ListIterator<B> iter = list.listIterator();

        // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
        if (bounds != null) {
            B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
            if (firstBucket == null) {
                if (bounds.min != null && bounds.max != null) {
                    long key = bounds.min;
                    long max = bounds.max;
                    while (key <= max) {
                        iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                        key = emptyBucketInfo.rounding.nextRoundingValue(key);
                    }
                }
            } else {
                if (bounds.min != null) {
                    long key = bounds.min;
                    if (key < firstBucket.key) {
                        while (key < firstBucket.key) {
                            iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                            key = emptyBucketInfo.rounding.nextRoundingValue(key);
                        }
                    }
                }
            }
        }

        // now adding the empty buckets within the actual data,
        // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6
        while (iter.hasNext()) {
            B nextBucket = list.get(iter.nextIndex());
            if (lastBucket != null) {
                long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
                while (key != nextBucket.key) {
                    iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                    key = emptyBucketInfo.rounding.nextRoundingValue(key);
                }
            }
            lastBucket = iter.next();
        }

        // finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
        if (bounds != null && lastBucket != null && bounds.max != null && bounds.max > lastBucket.key) {
            long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
            long max = bounds.max;
            while (key <= max) {
                iter.add(createBucket(key, 0, emptyBucketInfo.subAggregations, formatter));
                key = emptyBucketInfo.rounding.nextRoundingValue(key);
            }
        }

        if (order != InternalOrder.KEY_ASC && order != InternalOrder.KEY_DESC) {
            CollectionUtil.introSort(reducedBuckets, order.comparator());
        }

    } else {
        CollectionUtil.introSort(reducedBuckets, order.comparator());
    }

    reduced.buckets = reducedBuckets;
    return reduced;
}