Example usage for com.google.common.collect Iterators concat

List of usage examples for com.google.common.collect Iterators concat

Introduction

In this page you can find the example usage for com.google.common.collect Iterators concat.

Prototype

public static <T> Iterator<T> concat(Iterator<? extends T> a, Iterator<? extends T> b) 

Source Link

Document

Combines two iterators into a single iterator.

Usage

From source file:io.druid.server.coordinator.HttpLoadQueuePeon.java

private void doSegmentManagement() {
    if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
        log.debug("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
        return;//from  w w  w .  j  a v  a 2s .c  o  m
    }

    int batchSize = config.getHttpLoadQueuePeonBatchSize();

    List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);

    synchronized (lock) {
        Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators
                .concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());

        while (batchSize > 0 && iter.hasNext()) {
            batchSize--;
            Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
            if (entry.getValue().hasTimedOut()) {
                entry.getValue().requestFailed("timed out");
                iter.remove();
            } else {
                newRequests.add(entry.getValue().getChangeRequest());
            }
        }
    }

    if (newRequests.size() == 0) {
        log.debug("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].",
                serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
        mainLoopInProgress.set(false);
        return;
    }

    try {
        log.debug("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
        BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
        ListenableFuture<InputStream> future = httpClient.go(
                new Request(HttpMethod.POST, changeRequestURL)
                        .addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON)
                        .addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON)
                        .setContent(requestBodyWriter.writeValueAsBytes(newRequests)),
                responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));

        Futures.addCallback(future, new FutureCallback<InputStream>() {
            @Override
            public void onSuccess(InputStream result) {
                boolean scheduleNextRunImmediately = true;
                try {
                    if (responseHandler.status == HttpServletResponse.SC_NO_CONTENT) {
                        log.debug("Received NO CONTENT reseponse from [%s]", serverId);
                    } else if (HttpServletResponse.SC_OK == responseHandler.status) {
                        try {
                            List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper
                                    .readValue(result, RESPONSE_ENTITY_TYPE_REF);
                            log.debug("Server[%s] returned status response [%s].", serverId, statuses);
                            synchronized (lock) {
                                if (stopped) {
                                    log.debug("Ignoring response from Server[%s]. We are already stopped.",
                                            serverId);
                                    scheduleNextRunImmediately = false;
                                    return;
                                }

                                for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
                                    switch (e.getStatus().getState()) {
                                    case SUCCESS:
                                    case FAILED:
                                        handleResponseStatus(e.getRequest(), e.getStatus());
                                        break;
                                    case PENDING:
                                        log.info("Request[%s] is still pending on server[%s].", e.getRequest(),
                                                serverId);
                                        break;
                                    default:
                                        scheduleNextRunImmediately = false;
                                        log.error("WTF! Server[%s] returned unknown state in status[%s].",
                                                serverId, e.getStatus());
                                    }
                                }
                            }
                        } catch (Exception ex) {
                            scheduleNextRunImmediately = false;
                            logRequestFailure(ex);
                        }
                    } else {
                        scheduleNextRunImmediately = false;
                        logRequestFailure(new RE("Unexpected Response Status."));
                    }
                } finally {
                    mainLoopInProgress.set(false);

                    if (scheduleNextRunImmediately) {
                        processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
                    }
                }
            }

            @Override
            public void onFailure(Throwable t) {
                try {
                    logRequestFailure(t);
                } finally {
                    mainLoopInProgress.set(false);
                }
            }

            private void logRequestFailure(Throwable t) {
                log.error(t, "Request[%s] Failed with code[%s] and status[%s]. Reason[%s].", changeRequestURL,
                        responseHandler.status, responseHandler.description);
            }
        }, processingExecutor);
    } catch (Throwable th) {
        log.error(th, "Error sending load/drop request to [%s].", serverId);
        mainLoopInProgress.set(false);
    }
}

From source file:org.apache.druid.server.coordinator.HttpLoadQueuePeon.java

private void doSegmentManagement() {
    if (stopped || !mainLoopInProgress.compareAndSet(false, true)) {
        log.debug("[%s]Ignoring tick. Either in-progress already or stopped.", serverId);
        return;//from ww  w.j  a  v a  2s . c  om
    }

    int batchSize = config.getHttpLoadQueuePeonBatchSize();

    List<DataSegmentChangeRequest> newRequests = new ArrayList<>(batchSize);

    synchronized (lock) {
        Iterator<Map.Entry<DataSegment, SegmentHolder>> iter = Iterators
                .concat(segmentsToDrop.entrySet().iterator(), segmentsToLoad.entrySet().iterator());

        while (batchSize > 0 && iter.hasNext()) {
            batchSize--;
            Map.Entry<DataSegment, SegmentHolder> entry = iter.next();
            if (entry.getValue().hasTimedOut()) {
                entry.getValue().requestFailed("timed out");
                iter.remove();
            } else {
                newRequests.add(entry.getValue().getChangeRequest());
            }
        }
    }

    if (newRequests.size() == 0) {
        log.debug("[%s]Found no load/drop requests. SegmentsToLoad[%d], SegmentsToDrop[%d], batchSize[%d].",
                serverId, segmentsToLoad.size(), segmentsToDrop.size(), config.getHttpLoadQueuePeonBatchSize());
        mainLoopInProgress.set(false);
        return;
    }

    try {
        log.debug("Sending [%d] load/drop requests to Server[%s].", newRequests.size(), serverId);
        BytesAccumulatingResponseHandler responseHandler = new BytesAccumulatingResponseHandler();
        ListenableFuture<InputStream> future = httpClient.go(
                new Request(HttpMethod.POST, changeRequestURL)
                        .addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON)
                        .addHeader(HttpHeaders.Names.CONTENT_TYPE, MediaType.APPLICATION_JSON)
                        .setContent(requestBodyWriter.writeValueAsBytes(newRequests)),
                responseHandler, new Duration(config.getHttpLoadQueuePeonHostTimeout().getMillis() + 5000));

        Futures.addCallback(future, new FutureCallback<InputStream>() {
            @Override
            public void onSuccess(InputStream result) {
                boolean scheduleNextRunImmediately = true;
                try {
                    if (responseHandler.status == HttpServletResponse.SC_NO_CONTENT) {
                        log.debug("Received NO CONTENT reseponse from [%s]", serverId);
                    } else if (HttpServletResponse.SC_OK == responseHandler.status) {
                        try {
                            List<SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus> statuses = jsonMapper
                                    .readValue(result, RESPONSE_ENTITY_TYPE_REF);
                            log.debug("Server[%s] returned status response [%s].", serverId, statuses);
                            synchronized (lock) {
                                if (stopped) {
                                    log.debug("Ignoring response from Server[%s]. We are already stopped.",
                                            serverId);
                                    scheduleNextRunImmediately = false;
                                    return;
                                }

                                for (SegmentLoadDropHandler.DataSegmentChangeRequestAndStatus e : statuses) {
                                    switch (e.getStatus().getState()) {
                                    case SUCCESS:
                                    case FAILED:
                                        handleResponseStatus(e.getRequest(), e.getStatus());
                                        break;
                                    case PENDING:
                                        log.info("Request[%s] is still pending on server[%s].", e.getRequest(),
                                                serverId);
                                        break;
                                    default:
                                        scheduleNextRunImmediately = false;
                                        log.error("WTF! Server[%s] returned unknown state in status[%s].",
                                                serverId, e.getStatus());
                                    }
                                }
                            }
                        } catch (Exception ex) {
                            scheduleNextRunImmediately = false;
                            logRequestFailure(ex);
                        }
                    } else {
                        scheduleNextRunImmediately = false;
                        logRequestFailure(new RE("Unexpected Response Status."));
                    }
                } finally {
                    mainLoopInProgress.set(false);

                    if (scheduleNextRunImmediately) {
                        processingExecutor.execute(HttpLoadQueuePeon.this::doSegmentManagement);
                    }
                }
            }

            @Override
            public void onFailure(Throwable t) {
                try {
                    responseHandler.description = t.toString();
                    logRequestFailure(t);
                } finally {
                    mainLoopInProgress.set(false);
                }
            }

            private void logRequestFailure(Throwable t) {
                log.error(t, "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL,
                        responseHandler.status, responseHandler.description);
            }
        }, processingExecutor);
    } catch (Throwable th) {
        log.error(th, "Error sending load/drop request to [%s].", serverId);
        mainLoopInProgress.set(false);
    }
}

From source file:org.polarsys.reqcycle.traceability.cache.AbstractCachedTraceabilityEngine.java

@Override
public Iterator<Pair<Link, Reachable>> getTraceability(Request... requests) throws EngineException {
    long timeInNanos = 0;
    boolean debug = logger.isDebug(Activator.OPTIONS_DEBUG, Activator.getDefault());

    if (debug) {/*from  w ww .  j  a v  a 2s . c om*/
        timeInNanos = System.nanoTime();
    }
    if (requests == null) {
        throw new EngineException("request can not be null");
    }
    boolean checkCache = isCacheCheckNeeded(requests);
    if (checkCache) {
        checkScope(getScope(requests));
    }
    if (debug) {
        if (checkCache) {
            long timeInMsc = (System.nanoTime() - timeInNanos) / 1000000;
            logger.trace(String.format("Cache checked in %d ms", timeInMsc));
        } else {
            logger.trace(String.format("Cache checked disabled via request"));
        }
    }
    Iterator<Pair<Link, Reachable>> result = Iterators.emptyIterator();
    for (Request request : requests) {
        // Scope and Filter are used to validate or invalidate paths so
        // they can be combined
        Predicate<Pair<Link, Reachable>> requestPredicate = new IsInScopePredicate(request.getScope());
        if (request.getFilter() != null) {
            requestPredicate = Predicates.and(new FilterPredicate(request.getFilter()), requestPredicate);
        }
        Iterable<Couple> couples = request.getCouples();
        if (!couples.iterator().hasNext()) {
            if (request.getDepth() == DEPTH.ONE) {
                throw new EngineException(
                        "for a couple with source equals to null the request shall be infinite");
            } else {
                result = Iterators.concat(result,
                        doGetAllTraceability(request.getDirection(), requestPredicate));
            }
        }
        // for each couple an traceability iterable is computed
        for (Couple c : couples) {
            // if the source is null it means the engine needs to return all
            // the traceability and the request depth shall be equals to
            // INFINITE
            if (c.getSource() == null) {
                if (request.getDepth() == DEPTH.ONE) {
                    throw new EngineException(
                            "for a couple with source equals to null the request shall be infinite");
                } else {
                    result = Iterators.concat(result,
                            doGetAllTraceability(request.getDirection(), requestPredicate));
                }
            } else {
                // when the target is equals to null it is a prospective
                // analysis
                if (c.getStopCondition() == null) {
                    // for a depth equals to infinite the traceability shall
                    // be
                    // complete
                    if (request.getDepth() == DEPTH.INFINITE) {
                        result = Iterators.concat(result,
                                doGetTraceability(c.getSource(), request.getDirection(), requestPredicate));
                        // otherwise just the first level shall be complete
                    } else if (request.getDepth() == DEPTH.ONE) {
                        result = Iterators.concat(result, doGetOneLevelTraceability(c.getSource(),
                                request.getDirection(), requestPredicate));
                    }
                    // when the target is different to null a search shall
                    // be
                    // performed
                } else {
                    if (request.getDepth() == DEPTH.INFINITE) {
                        result = Iterators.concat(result, doGetTraceability(c.getSource(), c.getStopCondition(),
                                request.getDirection(), requestPredicate));
                    } else {
                        // except when the depth is equals to one in this
                        // case
                        // it can be computed using a filter
                        result = Iterators.concat(result,
                                doGetTraceability(c.getSource(), c.getStopCondition(), request.getDirection(),
                                        Predicates.and(requestPredicate,
                                                new TargetEqualsPredicate(c.getStopCondition()))));
                    }
                }
            }
        }
    }
    if (debug) {
        timeInNanos = System.nanoTime() - timeInNanos;
        long timeInMsc = timeInNanos / 1000000;
        logger.trace(String.format("Traceability computed in %d ms (including cache check)", timeInMsc));
    }
    return result;
}

From source file:alluxio.master.lineage.DefaultLineageMaster.java

@Override
public synchronized Iterator<JournalEntry> getJournalEntryIterator() {
    return Iterators.concat(mLineageStore.getJournalEntryIterator(),
            CommonUtils.singleElementIterator(mLineageIdGenerator.toJournalEntry()));
}

From source file:org.apache.hadoop.mapreduce.counters.AbstractCounters.java

@Override
public Iterator<G> iterator() {
    return Iterators.concat(fgroups.values().iterator(), groups.values().iterator());
}

From source file:org.apache.jackrabbit.oak.security.user.UserPrincipalProvider.java

@Nonnull
@Override//from  w  ww.  jav  a 2s  .com
public Iterator<? extends Principal> findPrincipals(final String nameHint, final int searchType) {
    try {
        AuthorizableType type = AuthorizableType.getType(searchType);
        StringBuilder statement = new StringBuilder()
                .append(QueryUtil.getSearchRoot(type, config.getParameters())).append("//element(*,")
                .append(QueryUtil.getNodeTypeName(type)).append(')').append("[jcr:like(@rep:principalName,'")
                .append(buildSearchPattern(nameHint)).append("')]");

        Result result = root.getQueryEngine().executeQuery(statement.toString(), javax.jcr.query.Query.XPATH,
                NO_BINDINGS, namePathMapper.getSessionLocalMappings());

        Iterator<Principal> principals = Iterators.filter(
                Iterators.transform(result.getRows().iterator(), new ResultRowToPrincipal()),
                Predicates.notNull());

        if (matchesEveryone(nameHint, searchType)) {
            principals = Iterators.concat(principals,
                    Iterators.singletonIterator(EveryonePrincipal.getInstance()));
            return Iterators.filter(principals, new EveryonePredicate());
        } else {
            return principals;
        }
    } catch (ParseException e) {
        log.debug(e.getMessage());
        return Iterators.emptyIterator();
    }
}

From source file:com.github.adejanovski.cassandra.jdbc.CassandraResultSet.java

/**
 * Instantiates a new cassandra result set from a com.datastax.driver.core.ResultSet.
 *//*from  w ww .  j  av  a2s  .  co m*/
CassandraResultSet(CassandraStatement statement, ArrayList<com.datastax.driver.core.ResultSet> resultSets)
        throws SQLException {
    this.statement = statement;
    this.resultSetType = statement.getResultSetType();
    this.fetchDirection = statement.getFetchDirection();
    this.fetchSize = statement.getFetchSize();
    //this.rowsIterators = Lists.newArrayList();

    // We have several result sets, but we will use only the first one for metadata needs
    this.driverResultSet = resultSets.get(0);

    // Now we concatenate iterators of the different result sets into a single one and voil !! ;) 

    currentIteratorIndex = 0;

    rowsIterator = driverResultSet.iterator();
    for (int i = 1; i < resultSets.size(); i++) {
        rowsIterator = Iterators.concat(rowsIterator, resultSets.get(i).iterator()); // this leads to Stack Overflow Exception when there are too many resultSets 
        /*if(resultSets.get(i).iterator().hasNext()){
           rowsIterators.add(resultSets.get(i).iterator());
        }
        */
    }

    //colDefinitions = driverResultSet.getColumnDefinitions();

    // Initialize to column values from the first row
    if (hasMoreRows()) {
        populateColumns();
    }

    meta = new CResultSetMetaData();
}

From source file:org.geogit.api.plumbing.diff.DiffCounter.java

private DiffObjectCount countBucketsChildren(ImmutableSortedMap<Integer, Bucket> buckets,
        Iterator<Node> children, final NodeStorageOrder refOrder, final int depth) {

    final SortedSetMultimap<Integer, Node> treesByBucket;
    final SortedSetMultimap<Integer, Node> featuresByBucket;
    {//from www  . jav  a2s . c o m
        treesByBucket = TreeMultimap.create(Ordering.natural(), refOrder); // make sure values
                                                                           // are sorted
                                                                           // according to
                                                                           // refOrder
        featuresByBucket = TreeMultimap.create(Ordering.natural(), refOrder);// make sure values
                                                                             // are sorted
                                                                             // according to
                                                                             // refOrder
        while (children.hasNext()) {
            Node ref = children.next();
            Integer bucket = refOrder.bucket(ref, depth);
            if (ref.getType().equals(TYPE.TREE)) {
                treesByBucket.put(bucket, ref);
            } else {
                featuresByBucket.put(bucket, ref);
            }
        }
    }

    DiffObjectCount count = new DiffObjectCount();

    {// count full size of all buckets for which no children falls into
        final Set<Integer> loneleyBuckets = Sets.difference(buckets.keySet(),
                Sets.union(featuresByBucket.keySet(), treesByBucket.keySet()));

        for (Integer bucket : loneleyBuckets) {
            ObjectId bucketId = buckets.get(bucket).id();
            count.add(sizeOfTree(bucketId));
        }
    }
    {// count the full size of all children whose buckets don't exist on the buckets tree
        for (Integer bucket : Sets.difference(featuresByBucket.keySet(), buckets.keySet())) {
            SortedSet<Node> refs = featuresByBucket.get(bucket);
            count.addFeatures(refs.size());
        }

        for (Integer bucket : Sets.difference(treesByBucket.keySet(), buckets.keySet())) {
            SortedSet<Node> refs = treesByBucket.get(bucket);
            count.add(aggregateSize(refs));
        }
    }

    // find the number of diffs of the intersection
    final Set<Integer> commonBuckets = Sets.intersection(buckets.keySet(),
            Sets.union(featuresByBucket.keySet(), treesByBucket.keySet()));
    for (Integer bucket : commonBuckets) {

        Iterator<Node> refs = Iterators.concat(treesByBucket.get(bucket).iterator(),
                featuresByBucket.get(bucket).iterator());

        final ObjectId bucketId = buckets.get(bucket).id();
        final RevTree bucketTree = getTree(bucketId);

        if (bucketTree.isEmpty()) {
            // unlikely
            count.add(aggregateSize(refs));
        } else if (!bucketTree.buckets().isPresent()) {
            count.add(countChildrenDiffs(bucketTree.children(), refs));
        } else {
            final int deeperBucketsDepth = depth + 1;
            final ImmutableSortedMap<Integer, Bucket> deeperBuckets;
            deeperBuckets = bucketTree.buckets().get();
            count.add(countBucketsChildren(deeperBuckets, refs, refOrder, deeperBucketsDepth));
        }
    }

    return count;
}

From source file:org.geogit.api.plumbing.diff.TreeDiffEntryIterator.java

private Iterator<DiffEntry> addRemoveAll(@Nullable final NodeRef treeRef, final RevTree tree,
        final ChangeType changeType) {
    DepthTreeIterator treeIterator;//w w  w  . j a  va  2  s  .  c o m

    final String path = treeRef == null ? "" : treeRef.path();
    final ObjectId metadataId = treeRef == null ? ObjectId.NULL : treeRef.getMetadataId();

    treeIterator = new DepthTreeIterator(path, metadataId, tree, objectDb, strategy);

    Iterator<DiffEntry> iterator;

    iterator = Iterators.transform(treeIterator, new RefToDiffEntry(changeType));

    if (reportTrees && !NodeRef.ROOT.equals(path)) {
        NodeRef oldTreeRef = ChangeType.ADDED.equals(changeType) ? null : treeRef;
        NodeRef newTreeRef = ChangeType.ADDED.equals(changeType) ? treeRef : null;
        DiffEntry treeEntry = new DiffEntry(oldTreeRef, newTreeRef);
        iterator = Iterators.concat(Iterators.singletonIterator(treeEntry), iterator);
    }
    return iterator;
}

From source file:org.ow2.authzforce.core.pdp.impl.policy.FlattenedPolicyTree.java

/**
 * The first element is the root policy in the tree, then, if any (e.g. the root policy is a XACML PolicySet enclosing Policy(Set)IdReferences), come the policies referenced from it. (This assumes
 * that all policy references, e.g. Policy(Set)IdReferences, have been statically resolved.)
 *///www  .j  a  v  a 2  s .  c o m
@Override
public Iterator<PrimaryPolicyMetadata> iterator() {
    final Iterator<PrimaryPolicyMetadata> rootPolicyIterator = Iterators
            .singletonIterator(this.immutableRootPolicyMetadata);
    return immutableRootPolicyRefsMetadata.isPresent()
            ? Iterators.concat(rootPolicyIterator,
                    this.immutableRootPolicyRefsMetadata.get().getRefPolicies().iterator())
            : rootPolicyIterator;
}