Example usage for com.google.common.base Functions compose

List of usage examples for com.google.common.base Functions compose

Introduction

In this page you can find the example usage for com.google.common.base Functions compose.

Prototype

public static <A, B, C> Function<A, C> compose(Function<B, C> g, Function<A, ? extends B> f) 

Source Link

Document

Returns the composition of two functions.

Usage

From source file:org.apache.aurora.scheduler.preemptor.PendingTaskProcessor.java

private List<TaskGroupKey> fetchIdlePendingGroups(StoreProvider store) {
    Multiset<TaskGroupKey> taskGroupCounts = HashMultiset
            .create(FluentIterable.from(store.getTaskStore().fetchTasks(Query.statusScoped(PENDING)))
                    .filter(Predicates.and(isIdleTask, Predicates.not(hasCachedSlot)))
                    .transform(Functions.compose(ASSIGNED_TO_GROUP_KEY, IScheduledTask::getAssignedTask)));

    return getPreemptionSequence(taskGroupCounts, reservationBatchSize);
}

From source file:net.automatalib.util.automata.copy.AutomatonLowLevelCopy.java

/**
 * Copies a {@link UniversalAutomaton} to a {@link MutableAutomaton} with possibly heterogeneous input alphabets and state and transition
 * properties./* ww w. j  a v  a  2s . co  m*/
 * 
 * @param <S1> input automaton state type
 * @param <I1> input automaton input symbol type
 * @param <T1> input automaton transition type
 * @param <SP1> input automaton state property type
 * @param <TP1> input automaton transition property type
 * @param <S2> output automaton state type
 * @param <I2> output automaton input symbol type
 * @param <T2> output automaton transition type
 * @param <SP2> output automaton state property type
 * @param <TP2> output automaton transition property type
 * 
 * @param method the copy method to use
 * @param in the input automaton
 * @param inputs the inputs to consider
 * @param out the output automaton
 * @param inputsMapping the transformation for input symbols
 * @param spTransform the transformation for state properties
 * @param tpTransform the transformation for transition properties
 * @param stateFilter the filter predicate for states
 * @param transFilter the filter predicate for transitions
 * @return a mapping from old to new states
 */
public static <S1, I1, T1, SP1, TP1, S2, I2, T2, SP2, TP2> Mapping<S1, S2> copy(AutomatonCopyMethod method,
        UniversalAutomaton<S1, ? super I1, T1, ? extends SP1, ? extends TP1> in,
        Collection<? extends I1> inputs, MutableAutomaton<S2, I2, T2, ? super SP2, ? super TP2> out,
        Function<? super I1, ? extends I2> inputsMapping, Function<? super SP1, ? extends SP2> spTransform,
        Function<? super TP1, ? extends TP2> tpTransform, Predicate<? super S1> stateFilter,
        TransitionPredicate<? super S1, ? super I1, ? super T1> transFilter) {
    Function<? super S1, ? extends SP2> spMapping = (spTransform == null) ? null
            : Functions.compose(spTransform, TS.stateProperties(in));
    Function<? super T1, ? extends TP2> tpMapping = (tpTransform == null) ? null
            : Functions.compose(tpTransform, TS.transitionProperties(in));
    return rawCopy(method, in, inputs, out, inputsMapping, spMapping, tpMapping, stateFilter, transFilter);
}

From source file:com.palantir.atlasdb.keyvalue.impl.SweepStatsKeyValueService.java

private void flushWrites(Multiset<String> writes, Set<String> clears) {
    if (writes.isEmpty() && clears.isEmpty()) {
        log.debug("No writes to flush");
        return;//ww w. ja v  a 2s .  c o  m
    }

    log.debug("Flushing stats for {} writes and {} clears", writes.size(), clears.size());
    log.trace("Flushing writes: {}", writes);
    log.trace("Flushing clears: {}", clears);
    try {
        Set<String> tableNames = Sets.difference(writes.elementSet(), clears);
        Iterable<byte[]> rows = Collections2.transform(tableNames, Functions
                .compose(Persistables.persistToBytesFunction(), SweepPriorityRow.fromFullTableNameFun()));
        Map<Cell, Value> oldWriteCounts = delegate().getRows(SWEEP_PRIORITY_TABLE, rows,
                SweepPriorityTable.getColumnSelection(SweepPriorityNamedColumn.WRITE_COUNT), Long.MAX_VALUE);
        Map<Cell, byte[]> newWriteCounts = Maps.newHashMapWithExpectedSize(writes.elementSet().size());
        byte[] col = SweepPriorityNamedColumn.WRITE_COUNT.getShortName();
        for (String tableName : tableNames) {
            Preconditions.checkState(!tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX),
                    "The sweep stats kvs should wrap the namespace mapping kvs, not the other way around.");
            byte[] row = SweepPriorityRow.of(tableName).persistToBytes();
            Cell cell = Cell.create(row, col);
            Value oldValue = oldWriteCounts.get(cell);
            long oldCount = oldValue == null || oldValue.getContents().length == 0 ? 0
                    : SweepPriorityTable.WriteCount.BYTES_HYDRATOR.hydrateFromBytes(oldValue.getContents())
                            .getValue();
            long newValue = clears.contains(tableName) ? writes.count(tableName)
                    : oldCount + writes.count(tableName);
            log.debug("Sweep priority for {} has {} writes (was {})", tableName, newValue, oldCount);
            newWriteCounts.put(cell, SweepPriorityTable.WriteCount.of(newValue).persistValue());
        }
        long timestamp = timestampService.getFreshTimestamp();

        // Committing before writing is intentional, we want the start timestamp to
        // show up in the transaction table before we write do our writes.
        commit(timestamp);
        delegate().put(SWEEP_PRIORITY_TABLE, newWriteCounts, timestamp);
    } catch (RuntimeException e) {
        Set<String> allTableNames = delegate().getAllTableNames();
        if (!allTableNames.contains(SWEEP_PRIORITY_TABLE)
                || !allTableNames.contains(TransactionConstants.TRANSACTION_TABLE)) {
            // ignore problems when sweep or transaction tables don't exist
            log.warn("Ignoring failed sweep stats flush due to {}", e.getMessage(), e);
        }
        log.error("Unable to flush sweep stats for writes {} and clears {}: {}", writes, clears, e.getMessage(),
                e);
        throw e;
    }
}

From source file:com.palantir.atlasdb.sweep.BackgroundSweeperImpl.java

@Nullable
private SweepProgressRowResult chooseNextTableToSweep(SweepTransaction t) {
    Set<String> allTables = Sets.difference(kvs.getAllTableNames(), AtlasDbConstants.hiddenTables);
    SweepPriorityTable oldPriorityTable = tableFactory.getSweepPriorityTable(t);
    SweepPriorityTable newPriorityTable = tableFactory.getSweepPriorityTable(t.delegate());

    // We read priorities from the past because we should prioritize based on what the sweeper will
    // actually be able to sweep. We read priorities from the present to make sure we don't repeatedly
    // sweep the same table while waiting for the past to catch up.
    List<SweepPriorityRowResult> oldPriorities = oldPriorityTable.getAllRowsUnordered().immutableCopy();
    List<SweepPriorityRowResult> newPriorities = newPriorityTable.getAllRowsUnordered().immutableCopy();
    Map<String, SweepPriorityRowResult> newPrioritiesByTableName = Maps.uniqueIndex(newPriorities,
            Functions.compose(SweepPriorityRow.getFullTableNameFun(), SweepPriorityRowResult.getRowNameFun()));
    String tableName = getTableToSweep(t, allTables, oldPriorities, newPrioritiesByTableName);
    if (tableName == null) {
        return null;
    }/*ww w.  j ava  2  s  .com*/
    RowResult<byte[]> rawResult = RowResult.<byte[]>create(SweepProgressRow.of(0).persistToBytes(),
            ImmutableSortedMap.<byte[], byte[]>orderedBy(UnsignedBytes.lexicographicalComparator())
                    .put(SweepProgressTable.SweepProgressNamedColumn.FULL_TABLE_NAME.getShortName(),
                            SweepProgressTable.FullTableName.of(tableName).persistValue())
                    .build());

    log.debug("Now starting to sweep {}.", tableName);
    return SweepProgressRowResult.of(rawResult);
}

From source file:org.jclouds.joyent.cloudapi.v6_5.domain.Machine.java

/**
 * Any "extra" metadata this machine has
 * //from w w  w.  ja  va  2 s  . co  m
 * <h4>note</h4>
 * 
 * metadata can contain arbitrarily complex values. If the value has structure, you should use
 * {@link #getMetadataAsJsonLiterals}
 * 
 */
public Map<String, String> getMetadata() {
    return Maps.transformValues(metadata, Functions.compose(Functions.toStringFunction(), unquoteString));
}

From source file:org.apache.aurora.scheduler.updater.JobUpdateControllerImpl.java

@Override
public void pause(final IJobUpdateKey key, AuditData auditData) throws UpdateStateException {
    requireNonNull(key);/*www .  jav  a  2 s  .com*/
    LOG.info("Attempting to pause update " + key);
    unscopedChangeUpdateStatus(key, Functions.compose(createAuditedEvent(auditData), GET_PAUSE_STATE));
}

From source file:org.sosy_lab.cpachecker.util.predicates.AbstractionManager.java

public Region buildRegionFromFormula(BooleanFormula pF) {
    return rmgr.fromFormula(pF, fmgr, Functions.compose(new Function<AbstractionPredicate, Region>() {
        @Override/*from  w  w w . j av a2  s .c o m*/
        public Region apply(AbstractionPredicate pInput) {
            return pInput.getAbstractVariable();
        }
    }, Functions.forMap(atomToPredicate)));
}

From source file:net.shibboleth.idp.profile.impl.FilterAttributes.java

/**
 * Set the strategy used to locate the {@link SAMLMetadataContext} associated with a given
 * {@link ProfileRequestContext}.  Also sets the strategy to find the {@link SAMLMetadataContext}
 * from the {@link AttributeFilterContext};  
 * SAMLMetadataContext/*from w  ww . jav a2  s .com*/
 * @param strategy strategy used to locate the {@link AuthenticationContext} associated with a given
 *            {@link ProfileRequestContext}
 */
public void setMetadataContextLookupStrategy(
        @Nonnull final Function<ProfileRequestContext, SAMLMetadataContext> strategy) {
    ComponentSupport.ifInitializedThrowUnmodifiabledComponentException(this);

    metadataContextLookupStrategy = Constraint.isNotNull(strategy,
            "MetadataContext lookup strategy cannot be null");
    metadataFromFilterLookupStrategy = Functions.compose(metadataContextLookupStrategy,
            new RootContextLookup<AttributeFilterContext, ProfileRequestContext>());
}

From source file:org.apache.aurora.scheduler.updater.JobUpdateControllerImpl.java

@Override
public void abort(IJobUpdateKey key, AuditData auditData) throws UpdateStateException {
    unscopedChangeUpdateStatus(key,/*from   w  w w  .  ja  v  a  2s .  c  o m*/
            Functions.compose(createAuditedEvent(auditData), Functions.constant(ABORTED)));
}

From source file:org.apache.aurora.scheduler.updater.JobUpdateControllerImpl.java

@Override
public void rollback(IJobUpdateKey key, AuditData auditData) throws UpdateStateException {
    unscopedChangeUpdateStatus(key,/*  w w  w  . j  a  va  2s  .co m*/
            Functions.compose(createAuditedEvent(auditData), Functions.constant(ROLLING_BACK)));
}