Example usage for com.google.common.collect Iterables limit

List of usage examples for com.google.common.collect Iterables limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterables limit.

Prototype

public static <T> Iterable<T> limit(final Iterable<T> iterable, final int limitSize) 

Source Link

Document

Creates an iterable with the first limitSize elements of the given iterable.

Usage

From source file:hudson.model.queue.MappingWorksheet.java

public MappingWorksheet(BuildableItem item, List<? extends ExecutorSlot> offers,
        Collection<? extends LoadPredictor> loadPredictors) {
    this.item = item;

    // group executors by their computers
    Map<Computer, List<ExecutorSlot>> j = new HashMap<Computer, List<ExecutorSlot>>();
    for (ExecutorSlot o : offers) {
        Computer c = o.getExecutor().getOwner();
        List<ExecutorSlot> l = j.get(c);
        if (l == null)
            j.put(c, l = new ArrayList<ExecutorSlot>());
        l.add(o);/*from   ww  w.  j  av  a2s.  c  o m*/
    }

    {// take load prediction into account and reduce the available executor pool size accordingly
        long duration = item.task.getEstimatedDuration();
        if (duration > 0) {
            long now = System.currentTimeMillis();
            for (Entry<Computer, List<ExecutorSlot>> e : j.entrySet()) {
                final List<ExecutorSlot> list = e.getValue();
                final int max = e.getKey().countExecutors();

                // build up the prediction model. cut the chase if we hit the max.
                Timeline timeline = new Timeline();
                int peak = 0;
                OUTER: for (LoadPredictor lp : loadPredictors) {
                    for (FutureLoad fl : Iterables.limit(lp.predict(this, e.getKey(), now, now + duration),
                            100)) {
                        peak = max(peak,
                                timeline.insert(fl.startTime, fl.startTime + fl.duration, fl.numExecutors));
                        if (peak >= max)
                            break OUTER;
                    }
                }

                int minIdle = max - peak; // minimum number of idle nodes during this time period
                // total predicted load could exceed available executors [JENKINS-8882]
                if (minIdle < 0) {
                    // Should we toss a warning/info message?
                    minIdle = 0;
                }
                if (minIdle < list.size())
                    e.setValue(list.subList(0, minIdle));
            }
        }
    }

    // build into the final shape
    List<ExecutorChunk> executors = new ArrayList<ExecutorChunk>();
    for (List<ExecutorSlot> group : j.values()) {
        if (group.isEmpty())
            continue; // evict empty group
        ExecutorChunk ec = new ExecutorChunk(group, executors.size());
        if (ec.node == null)
            continue; // evict out of sync node
        executors.add(ec);
    }
    this.executors = ImmutableList.copyOf(executors);

    // group execution units into chunks. use of LinkedHashMap ensures that the main work comes at the top
    Map<Object, List<SubTask>> m = new LinkedHashMap<Object, List<SubTask>>();
    for (SubTask meu : Tasks.getSubTasksOf(item.task)) {
        Object c = Tasks.getSameNodeConstraintOf(meu);
        if (c == null)
            c = new Object();

        List<SubTask> l = m.get(c);
        if (l == null)
            m.put(c, l = new ArrayList<SubTask>());
        l.add(meu);
    }

    // build into the final shape
    List<WorkChunk> works = new ArrayList<WorkChunk>();
    for (List<SubTask> group : m.values()) {
        works.add(new WorkChunk(group, works.size()));
    }
    this.works = ImmutableList.copyOf(works);
}

From source file:org.jclouds.blobstore.config.LocalBlobStore.java

/**
 * default maxResults is 1000/*from   w ww  .  j  ava  2 s .  co m*/
 */
@Override
public PageSet<? extends StorageMetadata> list(final String containerName, ListContainerOptions options) {
    if (options.getDir() != null && options.getPrefix() != null) {
        throw new IllegalArgumentException("Cannot set both prefix and directory");
    }

    if ((options.getDir() != null || options.isRecursive()) && (options.getDelimiter() != null)) {
        throw new IllegalArgumentException("Cannot set the delimiter if directory or recursive is set");
    }

    // Check if the container exists
    if (!storageStrategy.containerExists(containerName))
        throw cnfe(containerName);

    // Loading blobs from container
    Iterable<String> blobBelongingToContainer = null;
    try {
        blobBelongingToContainer = storageStrategy.getBlobKeysInsideContainer(containerName);
    } catch (IOException e) {
        logger.error(e, "An error occurred loading blobs contained into container %s", containerName);
        propagate(e);
    }

    blobBelongingToContainer = Iterables.filter(blobBelongingToContainer, new Predicate<String>() {
        @Override
        public boolean apply(String key) {
            // ignore folders
            return storageStrategy.blobExists(containerName, key);
        }
    });
    SortedSet<StorageMetadata> contents = newTreeSet(
            FluentIterable.from(blobBelongingToContainer).transform(new Function<String, StorageMetadata>() {
                @Override
                public StorageMetadata apply(String key) {
                    Blob oldBlob = loadBlob(containerName, key);
                    if (oldBlob == null) {
                        return null;
                    }
                    checkState(oldBlob.getMetadata() != null,
                            "blob " + containerName + "/" + key + " has no metadata");
                    MutableBlobMetadata md = BlobStoreUtils.copy(oldBlob.getMetadata());
                    md.setSize(oldBlob.getMetadata().getSize());
                    return md;
                }
            }).filter(Predicates.<StorageMetadata>notNull()));

    String marker = null;
    if (options != null) {
        if (options.getDir() != null && !options.getDir().isEmpty()) {
            contents = filterDirectory(contents, options);
        } else if (options.getPrefix() != null) {
            contents = filterPrefix(contents, options);
        } else if (!options.isRecursive() || (options.getDelimiter() != null)) {
            String delimiter = options.getDelimiter() == null ? storageStrategy.getSeparator()
                    : options.getDelimiter();
            contents = extractCommonPrefixes(contents, delimiter, null);
        }

        if (options.getMarker() != null) {
            final String finalMarker = options.getMarker();
            String delimiter = storageStrategy.getSeparator();
            Optional<StorageMetadata> lastMarkerMetadata = tryFind(contents, new Predicate<StorageMetadata>() {
                public boolean apply(StorageMetadata metadata) {
                    return metadata.getName().compareTo(finalMarker) > 0;
                }
            });
            if (lastMarkerMetadata.isPresent()) {
                contents = contents.tailSet(lastMarkerMetadata.get());
            } else {
                // marker is after last key or container is empty
                contents.clear();
            }
        }

        int maxResults = options.getMaxResults() != null ? options.getMaxResults() : 1000;
        if (!contents.isEmpty()) {
            StorageMetadata lastElement = contents.last();
            contents = newTreeSet(Iterables.limit(contents, maxResults));
            if (maxResults != 0 && !contents.contains(lastElement)) {
                // Partial listing
                lastElement = contents.last();
                marker = lastElement.getName();
            }
        }

        // trim metadata, if the response isn't supposed to be detailed.
        if (!options.isDetailed()) {
            for (StorageMetadata md : contents) {
                md.getUserMetadata().clear();
            }
        }
    }

    return new PageSetImpl<StorageMetadata>(contents, marker);
}

From source file:org.glowroot.agent.live.ClasspathCache.java

private static ImmutableList<String> combineClassNamesWithLimit(Set<String> fullMatchingClassNames,
        Set<String> matchingClassNames, int limit) {
    if (fullMatchingClassNames.size() < limit) {
        int space = limit - fullMatchingClassNames.size();
        int numToAdd = Math.min(space, matchingClassNames.size());
        fullMatchingClassNames.addAll(ImmutableList.copyOf(Iterables.limit(matchingClassNames, numToAdd)));
    }//  w w w.j a va 2s. co  m
    return ImmutableList.copyOf(fullMatchingClassNames);
}

From source file:com.jeffjirsa.cassandra.db.compaction.TimeWindowCompactionStrategy.java

/**
 * @param bucket set of sstables// w  w w .  ja  va2 s .  co  m
 * @param maxThreshold maximum number of sstables in a single compaction task.
 * @return A bucket trimmed to the maxThreshold newest sstables.
 */
@VisibleForTesting
static List<SSTableReader> trimToThreshold(Set<SSTableReader> bucket, int maxThreshold) {
    List<SSTableReader> ssTableReaders = new ArrayList<>(bucket);

    // Trim the largest sstables off the end to meet the maxThreshold
    Collections.sort(ssTableReaders, new SSTableReader.SizeComparator());

    return ImmutableList.copyOf(Iterables.limit(ssTableReaders, maxThreshold));
}

From source file:com.yahoo.druid.hadoop.HiveDatasourceInputFormat.java

private static String[] getFrequentLocations(Iterable<String> hosts) {

    final CountingMap<String> counter = new CountingMap<>();
    for (String location : hosts) {
        counter.add(location, 1);//from   www  .  ja  v a 2 s  .co m
    }

    final TreeSet<Pair<Long, String>> sorted = Sets
            .<Pair<Long, String>>newTreeSet(new Comparator<Pair<Long, String>>() {
                @Override
                public int compare(Pair<Long, String> o1, Pair<Long, String> o2) {
                    int compare = o2.lhs.compareTo(o1.lhs); // descending
                    if (compare == 0) {
                        compare = o1.rhs.compareTo(o2.rhs); // ascending
                    }
                    return compare;
                }
            });

    for (Map.Entry<String, AtomicLong> entry : counter.entrySet()) {
        sorted.add(Pair.of(entry.getValue().get(), entry.getKey()));
    }

    // use default replication factor, if possible
    final List<String> locations = Lists.newArrayListWithCapacity(3);
    for (Pair<Long, String> frequent : Iterables.limit(sorted, 3)) {
        locations.add(frequent.rhs);
    }
    return locations.toArray(new String[locations.size()]);
}

From source file:au.com.centrumsystems.hudson.plugin.buildpipeline.BuildPipelineView.java

/**
 * Returns BuildPipelineForm containing the build pipeline to display.
 *
 * @return - Representation of the projects and their related builds making up the build pipeline view
 * @throws URISyntaxException//ww  w .j av  a2s. co  m
 *             {@link URISyntaxException}
 */
public BuildPipelineForm getBuildPipelineForm() throws URISyntaxException {
    final int maxNoOfDisplayBuilds = Integer.valueOf(noOfDisplayedBuilds);

    final ProjectGrid project = gridBuilder.build(this);
    if (project.isEmpty()) {
        return null;
    }
    return new BuildPipelineForm(project, Iterables.limit(project.builds(), maxNoOfDisplayBuilds));
}

From source file:org.bin01.db.verifier.Validator.java

public String getResultsComparison() {
    List<List<Object>> controlResults = controlResult.getResults();
    List<List<Object>> testResults = testResult.getResults();

    if (valid() || (controlResults == null) || (testResults == null)) {
        return "";
    }// www.j  a  va2 s .c o m

    Multiset<List<Object>> control = ImmutableSortedMultiset.copyOf(rowComparator(), controlResults);
    Multiset<List<Object>> test = ImmutableSortedMultiset.copyOf(rowComparator(), testResults);

    try {
        Iterable<ChangedRow> diff = ImmutableSortedMultiset.<ChangedRow>naturalOrder()
                .addAll(Iterables.transform(Multisets.difference(control, test),
                        ChangedRow.changedRows(Changed.REMOVED)))
                .addAll(Iterables.transform(Multisets.difference(test, control),
                        ChangedRow.changedRows(Changed.ADDED)))
                .build();
        diff = Iterables.limit(diff, 100);

        StringBuilder sb = new StringBuilder();

        sb.append(format("Control %s rows, Test %s rows%n", control.size(), test.size()));
        if (verboseResultsComparison) {
            Joiner.on("\n").appendTo(sb, diff);
        } else {
            sb.append("RESULTS DO NOT MATCH\n");
        }

        return sb.toString();
    } catch (TypesDoNotMatchException e) {
        return e.getMessage();
    }
}

From source file:org.diqube.execution.steps.ResolveValuesStep.java

@Override
public void execute() {
    rowIdReadWriteLock.writeLock().lock();
    ConcurrentNavigableMap<String, ConcurrentMap<Long, Pair<ExecutionEnvironment, Long>>> activeColsAndRows;
    try {//from   w ww.  ja  va 2 s . co m
        activeColsAndRows = inputColsAndRows;
        inputColsAndRows = new ConcurrentSkipListMap<>();

        if (sourcesAreEmpty.get() && activeColsAndRows.isEmpty() && inputColsAndRows.isEmpty()) {
            // there won't be any input at all. Stop processing.
            forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
            doneProcessing();
            return;
        }
    } finally {
        rowIdReadWriteLock.writeLock().unlock();
    }

    if (activeColsAndRows.size() > 0) {
        logger.debug("Starting to resolve values...");
        QueryUuidThreadState uuidState = QueryUuid.getCurrentThreadState();
        Map<String, Map<Long, Object>> valuesPerColumn = activeColsAndRows.entrySet().stream() //
                .parallel().flatMap( //
                        new Function<Entry<String, ConcurrentMap<Long, Pair<ExecutionEnvironment, Long>>>, Stream<Triple<String, Long, Object>>>() {
                            @Override
                            public Stream<Triple<String, Long, Object>> apply(
                                    Entry<String, ConcurrentMap<Long, Pair<ExecutionEnvironment, Long>>> e) {
                                QueryUuid.setCurrentThreadState(uuidState);
                                try {
                                    String colName = e.getKey();

                                    List<Triple<String, Long, Object>> res = new ArrayList<>();

                                    // group by ExecutionEnvs and columnValueIds, so we do not have to decompress specific colValueIds
                                    // multiple times
                                    Map<ExecutionEnvironment, SortedMap<Long, List<Long>>> envToColumnValueIdToRowId = new HashMap<>();

                                    for (Entry<Long, Pair<ExecutionEnvironment, Long>> rowIdColValueIdEntry : e
                                            .getValue().entrySet()) {
                                        Long rowId = rowIdColValueIdEntry.getKey();
                                        Long columnValueId = rowIdColValueIdEntry.getValue().getRight();
                                        ExecutionEnvironment env = rowIdColValueIdEntry.getValue().getLeft();

                                        if (!envToColumnValueIdToRowId.containsKey(env))
                                            envToColumnValueIdToRowId.put(env, new TreeMap<>());

                                        if (!envToColumnValueIdToRowId.get(env).containsKey(columnValueId))
                                            envToColumnValueIdToRowId.get(env).put(columnValueId,
                                                    new ArrayList<>());
                                        envToColumnValueIdToRowId.get(env).get(columnValueId).add(rowId);
                                    }

                                    for (ExecutionEnvironment env : envToColumnValueIdToRowId.keySet()) {
                                        SortedMap<Long, List<Long>> columnValueIdToRowId = envToColumnValueIdToRowId
                                                .get(env);
                                        Long[] sortedColumnValueIds = columnValueIdToRowId.keySet()
                                                .toArray(new Long[columnValueIdToRowId.keySet().size()]);

                                        ColumnShard columnShard = env.getColumnShard(colName);
                                        Object[] values = columnShard.getColumnShardDictionary()
                                                .decompressValues(sortedColumnValueIds);

                                        for (int i = 0; i < sortedColumnValueIds.length; i++) {
                                            Long columnValueId = sortedColumnValueIds[i];
                                            Object value = values[i];

                                            for (Long rowId : columnValueIdToRowId.get(columnValueId))
                                                res.add(new Triple<>(colName, rowId, value));
                                        }
                                    }

                                    return res.stream();
                                } finally {
                                    QueryUuid.clearCurrent();
                                }
                            }

                        })
                .collect(() -> new HashMap<String, Map<Long, Object>>(), (map, triple) -> {
                    String colName = triple.getLeft();
                    Long rowId = triple.getMiddle();
                    Object value = triple.getRight();
                    if (!map.containsKey(colName))
                        map.put(colName, new HashMap<>());
                    map.get(colName).put(rowId, value);
                }, (map1, map2) -> {
                    for (String colName : map2.keySet()) {
                        if (!map1.containsKey(colName))
                            map1.put(colName, new HashMap<>());
                        map1.get(colName).putAll(map2.get(colName));
                    }
                });

        QueryUuid.setCurrentThreadState(uuidState);

        for (String colName : valuesPerColumn.keySet()) {
            logger.trace("Resolved values, sending them out now (limit): {}, {}", colName,
                    Iterables.limit(valuesPerColumn.get(colName).entrySet(), 10));
            forEachOutputConsumerOfType(ColumnValueConsumer.class,
                    c -> c.consume(colName, valuesPerColumn.get(colName)));
        }
    }

    if (sourcesAreEmpty.get() && inputColsAndRows.isEmpty()) {
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
    }
}

From source file:org.diqube.server.queryremote.flatten.FlattenedControlFileFlattenedTableDiskCache.java

Map<Pair<String, String>, Deque<CachedDataInfo>> loadCurrentData() {
    File[] controlFiles = cacheDirectory
            .listFiles(f -> f.isFile() && f.getName().endsWith(FLATTENED_CONTROL_FILE_SUFFIX));

    // evict data from files that have been removed
    Set<String> removedFiles = Sets.difference(controlFileInfo.keySet(),
            Stream.of(controlFiles).map(f -> f.getAbsolutePath()).collect(Collectors.toSet()));
    if (!removedFiles.isEmpty()) {
        for (String removedFile : removedFiles) {
            synchronized (sync) {
                Pair<FileTime, Triple<String, String, Set<Long>>> p = controlFileInfo.remove(removedFile);
                if (p != null) {
                    String tableName = p.getRight().getLeft();
                    String flattenBy = p.getRight().getMiddle();
                    Set<Long> firstRowIds = p.getRight().getRight();

                    logger.info(//from ww  w  . j av a  2  s  .c om
                            "Identified removal of {} from flattenedcache. Cache will not provide "
                                    + "flattened tables anymore on following values: {}/{}/{} (last limit)",
                            removedFile, tableName, flattenBy, Iterables.limit(firstRowIds, 100));

                    Deque<CachedDataInfo> deque = curData.remove(new Pair<>(tableName, flattenBy));
                    Iterator<CachedDataInfo> it = deque.iterator();
                    while (it.hasNext()) {
                        CachedDataInfo cur = it.next();
                        if (cur.getOrigFirstRowIds().equals(firstRowIds))
                            it.remove();
                    }
                }
            }
        }
    }

    for (File controlFile : controlFiles) {
        FileTime modifiedTime = modifiedTime(controlFile);
        if (modifiedTime == null)
            continue;

        // check if file is new or changed.
        if (!controlFileInfo.containsKey(controlFile.getAbsolutePath())
                || !controlFileInfo.get(controlFile.getAbsolutePath()).getLeft().equals(modifiedTime)) {
            File dataFile = new File(dataFileName(controlFile));

            if (!dataFile.exists() || !dataFile.isFile()) {
                logger.warn(
                        "Data file for cached flattened table '{}' does not exist or is directory. Ignoring.",
                        dataFile.getAbsolutePath());
                continue;
            }

            synchronized (sync) {
                // re-check if file changed in the meantime.
                modifiedTime = modifiedTime(controlFile);
                if (modifiedTime == null)
                    continue;

                if (!controlFileInfo.containsKey(controlFile.getAbsolutePath())
                        || !controlFileInfo.get(controlFile.getAbsolutePath()).getLeft().equals(modifiedTime)) {

                    Properties control = new Properties();
                    try (FileInputStream fis = new FileInputStream(controlFile)) {
                        control.load(new InputStreamReader(fis, Charset.forName("UTF-8")));
                    } catch (IOException e) {
                        logger.warn(
                                "IOException while trying to access control file in flattenedcache: {}. Ignoring.",
                                controlFile.getAbsolutePath(), e);
                        continue;
                    }

                    String sourceTableName = control.getProperty(FLATTENED_CONTROL_SOURCE_TABLE);
                    String flattenBy = control.getProperty(FLATTENED_CONTROL_FLATTEN_BY);
                    String origFirstRow = control.getProperty(FLATTENED_CONTROL_ORIG_FIRST_ROW);

                    if (sourceTableName == null || flattenBy == null || origFirstRow == null) {
                        logger.warn("Control file of flattenedcache is invalid: {}. Ignoring.",
                                controlFile.getAbsolutePath());
                        continue;
                    }

                    String[] firstRowIds = origFirstRow
                            .split(Pattern.quote(FLATTENED_CONTROL_ORIG_FIRST_ROW_DELIMITER));
                    Set<Long> firstRowIdsSet = new HashSet<>();
                    boolean error = false;
                    for (String firstRowIdString : firstRowIds) {
                        try {
                            firstRowIdsSet.add(Long.parseLong(firstRowIdString));
                        } catch (NumberFormatException e) {
                            logger.warn("Control file of flattenedcache is invalid: {}. Ignoring.",
                                    controlFile.getAbsolutePath(), e);
                            error = true;
                            break;
                        }
                    }
                    if (error)
                        continue;

                    Supplier<FlattenedTable> loader = new Supplier<FlattenedTable>() {
                        @Override
                        public FlattenedTable get() {
                            try (RandomAccessFile f = new RandomAccessFile(dataFile, "r")) {
                                BigByteBuffer buf = new BigByteBuffer(f.getChannel(), MapMode.READ_ONLY,
                                        b -> b.load());

                                DiqubeFileReader fileReader = diqubeFileFactory.createDiqubeFileReader(buf);

                                Collection<DefaultTableShard> shards = fileReader.loadAllTableShards();

                                return flattenDataFactory.createFlattenedTable(
                                        "FLATTENED_LOADED" /* No need to guarantee a specific table name */,
                                        shards.stream().map(s -> (TableShard) s).collect(Collectors.toList()),
                                        firstRowIdsSet);
                            } catch (IOException | DeserializationException e) {
                                logger.error("Could not load disk-cached flattened table from {}",
                                        dataFile.getAbsolutePath(), e);
                                return null;
                            }
                        }
                    };

                    logger.info(
                            "Found new/changed flattenedcache control file '{}'. Cache will provide data on following "
                                    + "values in the future: {}/{}/{} (last limit)",
                            controlFile.getAbsolutePath(), sourceTableName, flattenBy,
                            Iterables.limit(firstRowIdsSet, 100));

                    Pair<String, String> keyPair = new Pair<>(sourceTableName, flattenBy);
                    curData.computeIfAbsent(keyPair, k -> new ConcurrentLinkedDeque<CachedDataInfo>());
                    curData.get(keyPair)
                            .add(new CachedDataInfo(firstRowIdsSet, controlFile.getAbsolutePath(), loader));

                    controlFileInfo.put(controlFile.getAbsolutePath(),
                            new Pair<>(modifiedTime, new Triple<>(sourceTableName, flattenBy, firstRowIdsSet)));
                }
            }
        }
    }

    return curData;
}

From source file:org.diqube.execution.steps.ProjectStep.java

@Override
protected void execute() {
    // Did we fill the output column completely and are we done?
    boolean columnFullyBuilt = false;

    ColumnShard column = null;/*from ww  w .  j a  v  a  2 s  . c  o m*/

    VersionedExecutionEnvironment temporaryEnv;
    Set<Long> curAdjustedRowIds;
    synchronized (newestSync) {
        temporaryEnv = newestTemporaryEnv;
        curAdjustedRowIds = newestAdjustedRowIds;
        if (curAdjustedRowIds != null && !curAdjustedRowIds.isEmpty())
            newestAdjustedRowIds = new HashSet<>();
    }

    if (inputColNames.size() == 0) {
        // we do not have input columns, just literals. The resulting column will likely end up being a column with only
        // one row, a 'constant' row. This is handled accordingly in ResolveColumnDictIdsStep.

        ColumnType inputColType = null;
        if (functionParameters[0].getValue() instanceof Long)
            inputColType = ColumnType.LONG;
        else if (functionParameters[0].getValue() instanceof String)
            inputColType = ColumnType.STRING;
        else if (functionParameters[0].getValue() instanceof Double)
            inputColType = ColumnType.DOUBLE;

        ProjectionFunction<Object, Object> fn = functionFactory.createProjectionFunction(functionNameLowerCase,
                inputColType);

        if (fn == null)
            throw new ExecutablePlanExecutionException("Cannot find function '" + functionNameLowerCase
                    + "' with input data type " + inputColType);

        for (int paramIdx = 0; paramIdx < functionParameters.length; paramIdx++)
            fn.provideConstantParameter(paramIdx, functionParameters[paramIdx].getValue());

        Object[] fnResult = fn.execute();

        switch (fn.getOutputType()) {
        case LONG:
            column = columnShardFactory.createConstantLongColumnShard(outputColName, (Long) fnResult[0],
                    defaultEnv.getFirstRowIdInShard());
            break;
        case STRING:
            column = columnShardFactory.createConstantStringColumnShard(outputColName, (String) fnResult[0],
                    defaultEnv.getFirstRowIdInShard());
            break;
        case DOUBLE:
            column = columnShardFactory.createConstantDoubleColumnShard(outputColName, (Double) fnResult[0],
                    defaultEnv.getFirstRowIdInShard());
            break;
        }
        columnFullyBuilt = true;
        logger.trace("Build constant column {} as there are no column inputs. Value: {}", outputColName,
                fnResult[0]);
    } else if (columnBuiltConsumer.getNumberOfTimesWired() == 0
            || (columnBuiltConsumer.getNumberOfTimesWired() > 0 && allColumnsBuilt.get())) {
        // We waited enough, all our source columns are built fully and are available in the defaultEnv.

        logger.trace("Build standard column {} based on default environment (= last run).", outputColName);
        column = buildColumnBasedProjection(defaultEnv);
        columnFullyBuilt = true;
    } else if (columnBuiltConsumer.getNumberOfTimesWired() > 0 && inputSourcesDone.get()
            && !allColumnsBuilt.get()) {
        // we need to wait for columns to be built, but the columnBuiltConsumer reported to be done, but not all columns
        // have been built. Therefore we cannot execute the projection, but just report "done".
        logger.debug("Projection waited for columns to be built, but some won't be built. Skipping.");
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
        return;
    } else {
        // not all columns are yet fully available. Let's see if we have enough information to at least project some parts
        // for the time being.

        if (temporaryEnv != null && existsOutputConsumerOfType(ColumnVersionBuiltConsumer.class)) {
            boolean allInputColsAvailable = inputColNames.stream()
                    .allMatch(colName -> temporaryEnv.getColumnShard(colName) != null);

            if (allInputColsAvailable) {
                // we have data for all input columns available, which means that we can start projection at least
                // /something/.

                logger.trace("Build intermediary column {} after following rowIds were adjusted (limit) {}",
                        outputColName, Iterables.limit(curAdjustedRowIds, 100));

                // execute full projection, although we have specific row IDs that have been altered.
                // TODO #8 cache intermediary results and use that to not again apply the projection function to all elements
                // again.
                column = buildColumnBasedProjection(temporaryEnv);
            }
        }
    }

    if (column != null) {
        if (temporaryEnv != null && columnVersionManager != null
                && existsOutputConsumerOfType(ColumnVersionBuiltConsumer.class)) {
            logger.trace("Will store new version of {}", outputColName);
            // inform ColumnVersionBuiltConsumer
            VersionedExecutionEnvironment newEnv = columnVersionManager.createNewVersion(column);

            forEachOutputConsumerOfType(ColumnVersionBuiltConsumer.class,
                    c -> c.columnVersionBuilt(newEnv, outputColName, curAdjustedRowIds));
        }

        // if done, inform other consumers.
        if (columnFullyBuilt) {
            logger.trace("Will store final column {}", outputColName);

            switch (column.getColumnType()) {
            case STRING:
                defaultEnv.storeTemporaryStringColumnShard((StringColumnShard) column);
                break;
            case LONG:
                defaultEnv.storeTemporaryLongColumnShard((LongColumnShard) column);
                break;
            case DOUBLE:
                defaultEnv.storeTemporaryDoubleColumnShard((DoubleColumnShard) column);
                break;
            }
            forEachOutputConsumerOfType(ColumnBuiltConsumer.class, c -> c.columnBuilt(outputColName));
            forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
            doneProcessing();
        }
    }
}