Example usage for com.google.common.collect Iterators cycle

List of usage examples for com.google.common.collect Iterators cycle

Introduction

In this page you can find the example usage for com.google.common.collect Iterators cycle.

Prototype

public static <T> Iterator<T> cycle(T... elements) 

Source Link

Document

Returns an iterator that cycles indefinitely over the provided elements.

Usage

From source file:gov.nih.nci.firebird.test.nes.ExternalEntityTestDataSource.java

private Iterator<PracticeSite> getPracticeSiteCycler() {
    if (practiceSiteCycler == null) {
        practiceSiteCycler = Iterators.cycle(practiceSites);
    }//from  ww  w . j a va2  s  . co m
    return practiceSiteCycler;
}

From source file:gov.nih.nci.firebird.test.nes.ExternalEntityTestDataSource.java

private Iterator<ClinicalLaboratory> getClinicalLabCycler() {
    if (clinicalLabCycler == null) {
        clinicalLabCycler = Iterators.cycle(clinicalLabs);
    }/*from   ww w  . ja v  a 2s  . c  o  m*/
    return clinicalLabCycler;
}

From source file:gov.nih.nci.firebird.test.nes.ExternalEntityTestDataSource.java

private Iterator<InstitutionalReviewBoard> getIrbCycler() {
    if (irbCycler == null) {
        irbCycler = Iterators.cycle(irbs);
    }
    return irbCycler;
}

From source file:org.apache.samza.container.grouper.task.GroupByContainerIds.java

/**
 * {@inheritDoc}/*from   ww w .  j av a 2s  .  c  o  m*/
 *
 * When the are `t` tasks and `p` processors, where t &lt;= p, a fair task distribution should ideally assign
 * (t / p) tasks to each processor. In addition to guaranteeing a fair distribution, this {@link TaskNameGrouper}
 * implementation generates a locationId aware task assignment to processors where it makes best efforts in assigning
 * the tasks to processors with the same locality.
 *
 * Task assignment to processors is accomplished through the following two phases:
 *
 * 1. In the first phase, each task(T) is assigned to a processor(P) that satisfies the following constraints:
 *    A. The processor(P) should have the same locality of the task(T).
 *    B. Number of tasks already assigned to the processor should be less than the (number of tasks / number of processors).
 *
 * 2. Each unassigned task from phase 1 are then mapped to any processor with task count less than the
 * (number of tasks / number of processors). When no such processor exists, then the unassigned
 * task is mapped to any processor from available processors in a round robin fashion.
 */
@Override
public Set<ContainerModel> group(Set<TaskModel> taskModels, GrouperMetadata grouperMetadata) {
    // Validate that the task models are not empty.
    Map<TaskName, LocationId> taskLocality = grouperMetadata.getTaskLocality();
    Preconditions.checkArgument(!taskModels.isEmpty(),
            "No tasks found. Likely due to no input partitions. Can't run a job with no tasks.");

    // Invoke the default grouper when the processor locality does not exist.
    if (MapUtils.isEmpty(grouperMetadata.getProcessorLocality())) {
        LOG.info("ProcessorLocality is empty. Generating with the default group method.");
        return group(taskModels, new ArrayList<>());
    }

    Map<String, LocationId> processorLocality = new TreeMap<>(grouperMetadata.getProcessorLocality());
    /**
     * When there're more task models than processors then choose the lexicographically least `x` processors(where x = tasks.size()).
     */
    if (processorLocality.size() > taskModels.size()) {
        processorLocality = processorLocality.entrySet().stream().limit(taskModels.size())
                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    }

    Map<LocationId, List<String>> locationIdToProcessors = new HashMap<>();
    Map<String, TaskGroup> processorIdToTaskGroup = new HashMap<>();

    // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping.
    processorLocality.forEach((processorId, locationId) -> {
        List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
        processorIds.add(processorId);
        locationIdToProcessors.put(locationId, processorIds);
        processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
    });

    int numTasksPerProcessor = taskModels.size() / processorLocality.size();
    Set<TaskName> assignedTasks = new HashSet<>();

    /**
     * A processor is considered under-assigned when number of tasks assigned to it is less than
     * (number of tasks / number of processors).
     * Map the tasks to the under-assigned processors with same locality.
     */
    for (TaskModel taskModel : taskModels) {
        LocationId taskLocationId = taskLocality.get(taskModel.getTaskName());
        if (taskLocationId != null) {
            List<String> processorIds = locationIdToProcessors.getOrDefault(taskLocationId, new ArrayList<>());
            for (String processorId : processorIds) {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorId);
                if (taskGroup.size() < numTasksPerProcessor) {
                    taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
                    assignedTasks.add(taskModel.getTaskName());
                    break;
                }
            }
        }
    }

    /**
     * In some scenarios, the task either might not have any previous locality or might not have any
     * processor that maps to its previous locality. This cyclic processorId's iterator helps us in
     * those scenarios to assign the processorIds to those kind of tasks in a round robin fashion.
     */
    Iterator<String> processorIdsCyclicIterator = Iterators.cycle(processorLocality.keySet());

    // Order the taskGroups to choose a task group in a deterministic fashion for unassigned tasks.
    List<TaskGroup> taskGroups = new ArrayList<>(processorIdToTaskGroup.values());
    taskGroups.sort(Comparator.comparing(TaskGroup::getContainerId));

    /**
     * For the tasks left over from the previous stage, map them to any under-assigned processor.
     * When a under-assigned processor doesn't exist, then map them to any processor from the
     * available processors in a round robin manner.
     */
    for (TaskModel taskModel : taskModels) {
        if (!assignedTasks.contains(taskModel.getTaskName())) {
            Optional<TaskGroup> underAssignedTaskGroup = taskGroups.stream()
                    .filter(taskGroup -> taskGroup.size() < numTasksPerProcessor).findFirst();
            if (underAssignedTaskGroup.isPresent()) {
                underAssignedTaskGroup.get().addTaskName(taskModel.getTaskName().getTaskName());
            } else {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorIdsCyclicIterator.next());
                taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
            }
            assignedTasks.add(taskModel.getTaskName());
        }
    }

    return TaskGroup.buildContainerModels(taskModels, taskGroups);
}

From source file:org.apache.drill.exec.store.schedule.AssignmentCreator.java

/**
 * Groups minor fragments together by corresponding endpoint, and creates an iterator that can be used to evenly
 * distribute work assigned to a given endpoint to all corresponding minor fragments evenly
 *
 * @return/*from   w w  w. ja v  a 2  s .  co m*/
 */
private Map<DrillbitEndpoint, FragIteratorWrapper> getEndpointIterators() {
    Stopwatch watch = new Stopwatch();
    watch.start();
    Map<DrillbitEndpoint, FragIteratorWrapper> map = Maps.newLinkedHashMap();
    Map<DrillbitEndpoint, List<Integer>> mmap = Maps.newLinkedHashMap();
    for (int i = 0; i < incomingEndpoints.size(); i++) {
        DrillbitEndpoint endpoint = incomingEndpoints.get(i);
        List<Integer> intList = mmap.get(incomingEndpoints.get(i));
        if (intList == null) {
            intList = Lists.newArrayList();
        }
        intList.add(Integer.valueOf(i));
        mmap.put(endpoint, intList);
    }

    for (DrillbitEndpoint endpoint : mmap.keySet()) {
        FragIteratorWrapper wrapper = new FragIteratorWrapper();
        wrapper.iter = Iterators.cycle(mmap.get(endpoint));
        wrapper.maxCount = maxWork * mmap.get(endpoint).size();
        wrapper.minCount = Math.max(maxWork - 1, 1) * mmap.get(endpoint).size();
        map.put(endpoint, wrapper);
    }
    return map;
}

From source file:com.amazonaws.services.kinesis.stormspout.state.zookeeper.ZookeeperStateManager.java

private void bootstrapStateFromZookeeper() {
    ImmutableList<String> shardAssignment = getShardAssignment();
    ImmutableList<IShardGetter> getters;

    // Task could not get an assignment (e.g. there are too many tasks for too few shards).
    if (shardAssignment.isEmpty()) {
        shardStates = new HashMap<>();
        getters = ImmutableList.of();/*from  w w w.  j a va 2 s. com*/
    } else {
        updateLocalState(shardAssignment);
        getters = makeGetters(shardAssignment);
    }

    this.currentGetter = Iterators.cycle(getters);
    LOG.info(this + " got getter assignment. Handling " + getters + ".");
}

From source file:google.registry.loadtest.LoadTestAction.java

private Function<String, String> listNameReplacer(final String toReplace, List<String> choices) {
    final Iterator<String> iterator = Iterators.cycle(choices);
    return new Function<String, String>() {
        @Override/*from w  ww .j ava  2 s .  co  m*/
        public String apply(String xml) {
            return xml.replace(toReplace, iterator.next());
        }
    };
}

From source file:org.lttng.scope.lami.ui.viewers.LamiBarChartViewer.java

/**
 * Set the chart series colors according to the selection state. Use light
 * colors when a selection is present.// w  w  w. ja  v  a  2s . c  o m
 */
private void setBarSeriesColors() {
    Iterator<Color> colorsIt;

    if (isSelected()) {
        colorsIt = Iterators.cycle(LIGHT_COLORS);
    } else {
        colorsIt = Iterators.cycle(COLORS);
    }

    for (ISeries series : getChart().getSeriesSet().getSeries()) {
        ((IBarSeries) series).setBarColor(colorsIt.next());
    }
}

From source file:org.apache.drill.exec.planner.fragment.SimpleParallelizer.java

private List<DrillbitEndpoint> findEndpoints(Collection<DrillbitEndpoint> activeEndpoints,
        Map<DrillbitEndpoint, EndpointAffinity> endpointAffinityMap, final int width)
        throws PhysicalOperatorSetupException {

    final List<DrillbitEndpoint> endpoints = Lists.newArrayList();

    if (endpointAffinityMap.size() > 0) {
        // Get EndpointAffinity list sorted in descending order of affinity values
        List<EndpointAffinity> sortedAffinityList = ENDPOINT_AFFINITY_ORDERING
                .immutableSortedCopy(endpointAffinityMap.values());

        // Find the number of mandatory nodes (nodes with +infinity affinity).
        int numRequiredNodes = 0;
        for (EndpointAffinity ep : sortedAffinityList) {
            if (ep.isAssignmentRequired()) {
                numRequiredNodes++;//from w  ww  .jav a 2s . co m
            } else {
                // As the list is sorted in descending order of affinities, we don't need to go beyond the first occurrance
                // of non-mandatory node
                break;
            }
        }

        if (width < numRequiredNodes) {
            throw new PhysicalOperatorSetupException(
                    "Can not parallelize the fragment as the parallelization width (" + width + ") is "
                            + "less than the number of mandatory nodes (" + numRequiredNodes
                            + " nodes with +INFINITE affinity).");
        }

        // Find the maximum number of slots which should go to endpoints with affinity (See DRILL-825 for details)
        int affinedSlots = Math.max(1, (int) (affinityFactor * width / activeEndpoints.size()))
                * sortedAffinityList.size();

        // Make sure affined slots is at least the number of mandatory nodes
        affinedSlots = Math.max(affinedSlots, numRequiredNodes);

        // Cap the affined slots to max parallelization width
        affinedSlots = Math.min(affinedSlots, width);

        Iterator<EndpointAffinity> affinedEPItr = Iterators.cycle(sortedAffinityList);

        // Keep adding until we have selected "affinedSlots" number of endpoints.
        while (endpoints.size() < affinedSlots) {
            EndpointAffinity ea = affinedEPItr.next();
            endpoints.add(ea.getEndpoint());
        }
    }

    // add remaining endpoints if required
    if (endpoints.size() < width) {
        // Get a list of endpoints that are not part of the affinity endpoint list
        List<DrillbitEndpoint> endpointsWithNoAffinity;
        final Set<DrillbitEndpoint> endpointsWithAffinity = endpointAffinityMap.keySet();

        if (endpointAffinityMap.size() > 0) {
            endpointsWithNoAffinity = Lists.newArrayList();
            for (DrillbitEndpoint ep : activeEndpoints) {
                if (!endpointsWithAffinity.contains(ep)) {
                    endpointsWithNoAffinity.add(ep);
                }
            }
        } else {
            endpointsWithNoAffinity = Lists.newArrayList(activeEndpoints); // Need to create a copy instead of an
            // immutable copy, because we need to shuffle the list (next statement) and Collections.shuffle() doesn't
            // support immutable copy as input.
        }

        // round robin with random start.
        Collections.shuffle(endpointsWithNoAffinity, ThreadLocalRandom.current());
        Iterator<DrillbitEndpoint> otherEPItr = Iterators
                .cycle(endpointsWithNoAffinity.size() > 0 ? endpointsWithNoAffinity : endpointsWithAffinity);
        while (endpoints.size() < width) {
            endpoints.add(otherEPItr.next());
        }
    }

    return endpoints;
}

From source file:org.apache.drill.exec.physical.impl.spill.SpillSet.java

public SpillSet(DrillConfig config, FragmentHandle handle, PhysicalOperator popConfig) {
    String operName;/*  w  ww  . ja  v  a  2s.co m*/

    // Set the spill options from the configuration
    String spillFs;
    List<String> dirList;

    // Set the operator name (used as part of the spill file name),
    // and set oper. specific options (the config file defaults to using the
    // common options; users may override those - per operator)
    if (popConfig instanceof Sort) {
        operName = "Sort";
        spillFs = config.getString(ExecConstants.EXTERNAL_SORT_SPILL_FILESYSTEM);
        dirList = config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS);
    } else if (popConfig instanceof HashAggregate) {
        operName = "HashAgg";
        spillFs = config.getString(ExecConstants.HASHAGG_SPILL_FILESYSTEM);
        dirList = config.getStringList(ExecConstants.HASHAGG_SPILL_DIRS);
    } else {
        // just use the common ones
        operName = "Unknown";
        spillFs = config.getString(ExecConstants.SPILL_FILESYSTEM);
        dirList = config.getStringList(ExecConstants.SPILL_DIRS);
    }

    dirs = Iterators.cycle(dirList);

    // If more than one directory, semi-randomly choose an offset into
    // the list to avoid overloading the first directory in the list.

    if (dirList.size() > 1) {
        int hash = handle.getQueryId().hashCode() + handle.getMajorFragmentId() + handle.getMinorFragmentId()
                + popConfig.getOperatorId();
        int offset = hash % dirList.size();
        for (int i = 0; i < offset; i++) {
            dirs.next();
        }
    }

    // Use the high-performance local file system if the local file
    // system is selected and impersonation is off. (We use that
    // as a proxy for a non-production Drill setup.)

    boolean impersonationEnabled = config.getBoolean(ExecConstants.IMPERSONATION_ENABLED);
    if (spillFs.startsWith(FileSystem.DEFAULT_FS) && !impersonationEnabled) {
        fileManager = new LocalFileManager(spillFs);
    } else {
        fileManager = new HadoopFileManager(spillFs);
    }

    spillDirName = String.format("%s_%s_%s-%s-%s", QueryIdHelper.getQueryId(handle.getQueryId()), operName,
            handle.getMajorFragmentId(), popConfig.getOperatorId(), handle.getMinorFragmentId());
}