List of usage examples for com.google.common.collect Lists transform
@CheckReturnValue public static <F, T> List<T> transform(List<F> fromList, Function<? super F, ? extends T> function)
From source file:com.proofpoint.galaxy.coordinator.CoordinatorAssignmentResource.java
@POST @Consumes(MediaType.APPLICATION_JSON)/* ww w.j a v a2 s . c o m*/ @Produces(MediaType.APPLICATION_JSON) public Response upgrade(UpgradeVersions upgradeVersions, @Context UriInfo uriInfo, @HeaderParam(GALAXY_SLOTS_VERSION_HEADER) String expectedSlotsVersion) { Preconditions.checkNotNull(upgradeVersions, "upgradeRepresentation must not be null"); // build filter List<UUID> uuids = Lists.transform(coordinator.getAllSlotStatus(), SlotStatus.uuidGetter()); Predicate<SlotStatus> slotFilter = SlotFilterBuilder.build(uriInfo, true, uuids); // upgrade slots List<SlotStatus> results = coordinator.upgrade(slotFilter, upgradeVersions, expectedSlotsVersion); // build response return Response.ok(transform(results, fromSlotStatus(coordinator.getAllSlotStatus(), repository))) .header(GALAXY_SLOTS_VERSION_HEADER, createSlotsVersion(results)).build(); }
From source file:org.jasig.portal.io.xml.pags.PersonAttributesGroupStoreDataFunction.java
@Override public Iterable<? extends IPortalData> apply(IPortalDataType input) { final Set<IPersonAttributesGroupDefinition> personAttributesGroupDefinitions = this.personAttributesGroupDefinitionDao .getPersonAttributesGroupDefinitions(); List<IPersonAttributesGroupDefinition> pagsDefs = new ArrayList<IPersonAttributesGroupDefinition>(); for (IPersonAttributesGroupDefinition pagsDef : personAttributesGroupDefinitions) { pagsDefs.add(pagsDef);/* w ww . j a va2 s. c o m*/ } final List<IPortalData> portalData = Lists.transform(pagsDefs, new Function<IPersonAttributesGroupDefinition, IPortalData>() { @Override public IPortalData apply(IPersonAttributesGroupDefinition personAttributesGroup) { return new SimpleStringPortalData(personAttributesGroup.getName(), null, personAttributesGroup.getDescription()); } }); return portalData; }
From source file:org.batoo.jpa.core.impl.criteria.expression.FunctionExpression.java
/** * {@inheritDoc}//from w w w . j ava2 s.c o m * */ @Override public String generateJpqlRestriction(final BaseQueryImpl<?> query) { return this.function + "(" + Joiner.on(", ").join(Lists.transform(this.arguments, new Function<Expression<?>, String>() { @Override public String apply(Expression<?> input) { return ((AbstractExpression<?>) input).generateJpqlRestriction(query); } })) + ")"; }
From source file:org.elasticsearch.search.aggregations.reducers.SiblingReducer.java
@SuppressWarnings("unchecked") @Override//from w w w . j ava 2 s. com public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { @SuppressWarnings("rawtypes") InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; List<? extends Bucket> buckets = multiBucketsAgg.getBuckets(); List<Bucket> newBuckets = new ArrayList<>(); for (int i = 0; i < buckets.size(); i++) { InternalMultiBucketAggregation.InternalBucket bucket = (InternalMultiBucketAggregation.InternalBucket) buckets .get(i); InternalAggregation aggToAdd = doReduce(bucket.getAggregations(), reduceContext); List<InternalAggregation> aggs = new ArrayList<>( Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); aggs.add(aggToAdd); InternalMultiBucketAggregation.InternalBucket newBucket = multiBucketsAgg .createBucket(new InternalAggregations(aggs), bucket); newBuckets.add(newBucket); } return multiBucketsAgg.create(newBuckets); }
From source file:co.freeside.betamax.ConfigurationBuilder.java
public T withProperties(Properties properties) { if (properties.containsKey("betamax.tapeRoot")) { tapeRoot(new File(properties.getProperty("betamax.tapeRoot"))); }/*from w ww.ja v a 2s . c o m*/ if (properties.containsKey("betamax.defaultMode")) { defaultMode(TapeMode.valueOf(properties.getProperty("betamax.defaultMode"))); } if (properties.containsKey("betamax.defaultMatchRules")) { List<MatchRule> rules = Lists.transform( Splitter.on(",").splitToList(properties.getProperty("betamax.defaultMatchRules")), new Function<String, MatchRule>() { @Override public MatchRule apply(String input) { return MatchRules.valueOf(input); } }); defaultMatchRule(ComposedMatchRule.of(rules)); } if (properties.containsKey("betamax.ignoreHosts")) { ignoreHosts(Splitter.on(",").splitToList(properties.getProperty("betamax.ignoreHosts"))); } if (properties.containsKey("betamax.ignoreLocalhost")) { ignoreLocalhost(Boolean.valueOf(properties.getProperty("betamax.ignoreLocalhost"))); } return self(); }
From source file:de.iteratec.iteraplan.elasticeam.derived.AddReplaceList.java
final List<N> get() { return Lists.transform(ListUtils.sum(rawGet(), getAdditionalElements()), this.replacer); }
From source file:models.Message.java
public static List<Message> getLastMessages(ObjectId threadId, boolean doUpdate, ObjectId forUser, Integer start, Integer count) { BasicDBObject query = new BasicDBObject().append("thread", threadId); BasicDBObject sort = new BasicDBObject().append("sent", -1); if (start == null) start = 0;//from ww w . j a v a2s .c o m if (count == null) count = 30; DBCursor iobj = MongoDB.getDB().getCollection(MongoDB.CMessage).find(query).sort(sort).skip(start) .limit(count); if (doUpdate) MessageThread.setAsRead(threadId, forUser); // Hilarity ensues :) return Lists.transform(iobj.toArray(), MongoDB.getSelf().toMessage()); }
From source file:org.apache.druid.query.groupby.GroupByQueryHelper.java
public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair( final GroupByQuery query, final GroupByQueryConfig config, NonBlockingPool<ByteBuffer> bufferPool, final boolean combine) { final GroupByQueryConfig querySpecificConfig = config.withOverrides(query); final Granularity gran = query.getGranularity(); final DateTime timeStart = query.getIntervals().get(0).getStart(); DateTime granTimeStart = timeStart;//from w w w . j a v a2 s .c om if (!(Granularities.ALL.equals(gran))) { granTimeStart = gran.bucketStart(timeStart); } final List<AggregatorFactory> aggs; if (combine) { aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() { @Override public AggregatorFactory apply(AggregatorFactory input) { return input.getCombiningFactory(); } }); } else { aggs = query.getAggregatorSpecs(); } final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() { @Override public String apply(DimensionSpec input) { return input.getOutputName(); } }); final IncrementalIndex index; final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true); // All groupBy dimensions are strings, for now. final List<DimensionSchema> dimensionSchemas = Lists.newArrayList(); for (DimensionSpec dimension : query.getDimensions()) { dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName())); } final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withDimensionsSpec(new DimensionsSpec(dimensionSchemas, null, null)) .withMetrics(aggs.toArray(new AggregatorFactory[0])).withQueryGranularity(gran) .withMinTimestamp(granTimeStart.getMillis()).build(); if (query.getContextValue("useOffheap", false)) { index = new IncrementalIndex.Builder().setIndexSchema(indexSchema).setDeserializeComplexMetrics(false) .setConcurrentEventAdd(true).setSortFacts(sortResults) .setMaxRowCount(querySpecificConfig.getMaxResults()).buildOffheap(bufferPool); } else { index = new IncrementalIndex.Builder().setIndexSchema(indexSchema).setDeserializeComplexMetrics(false) .setConcurrentEventAdd(true).setSortFacts(sortResults) .setMaxRowCount(querySpecificConfig.getMaxResults()).buildOnheap(); } Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() { @Override public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) { if (in instanceof MapBasedRow) { try { MapBasedRow row = (MapBasedRow) in; accumulated.add(new MapBasedInputRow(row.getTimestamp(), dimensions, row.getEvent())); } catch (IndexSizeExceededException e) { throw new ResourceLimitExceededException(e.getMessage()); } } else { throw new ISE("Unable to accumulate something of type [%s]", in.getClass()); } return accumulated; } }; return new Pair<>(index, accumulator); }
From source file:com.google.cloud.bigquery.FieldList.java
static FieldList fromPb(List<TableFieldSchema> fields) { return FieldList.of(Lists.transform(fields, Field.FROM_PB_FUNCTION)); }
From source file:de.kuschku.util.backports.Stream.java
@NonNull public <S> Stream<S> map(@NonNull Function<T, S> function) { return new Stream<>(Lists.transform(list, function)); }