List of usage examples for com.google.common.base Functions constant
public static <E> Function<Object, E> constant(@Nullable E value)
From source file:org.apache.brooklyn.cloudfoundry.entity.CloudFoundryEntityImpl.java
protected void connectServiceIsRunning() { serviceProcessIsRunning = FunctionFeed.builder().entity(this).period(Duration.FIVE_SECONDS) .poll(new FunctionPollConfig<Boolean, Boolean>(SERVICE_PROCESS_IS_RUNNING) .onException(Functions.constant(Boolean.FALSE)).callable(new Callable<Boolean>() { public Boolean call() { return driver.isRunning(); }/*from w w w .j a va2s . c om*/ })) .build(); }
From source file:co.cask.cdap.common.zookeeper.coordination.ResourceCoordinatorClient.java
/** * Deletes the {@link ResourceRequirement} for the given resource. * * @param resourceName Name of the resource. * @return A {@link ListenableFuture} that will be completed when the requirement is successfully removed. * If the requirement doesn't exists, the deletion would still be treated as successful. *///from w ww .j a v a2 s . c om public ListenableFuture<String> deleteRequirement(String resourceName) { String zkPath = CoordinationConstants.REQUIREMENTS_PATH + "/" + resourceName; return Futures.transform(ZKOperations.ignoreError(zkClient.delete(zkPath), KeeperException.NoNodeException.class, resourceName), Functions.constant(resourceName)); }
From source file:org.apache.brooklyn.core.feed.FeedConfig.java
/** @see #onFailureOrException(Function) */ public F setOnFailureOrException(T val) { return onFailureOrException(Functions.constant(val)); }
From source file:org.apache.drill.exec.store.parquet.AbstractParquetScanBatchCreator.java
protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan, OperatorContext oContext) throws ExecutionSetupException { final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns()); if (!columnExplorer.isStarQuery()) { rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns()); rowGroupScan.setOperatorId(rowGroupScan.getOperatorId()); }//from w w w .ja v a 2 s . co m AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions()); // keep footers in a map to avoid re-reading them Map<String, ParquetMetadata> footers = new HashMap<>(); List<RecordReader> readers = new LinkedList<>(); List<Map<String, String>> implicitColumns = new ArrayList<>(); Map<String, String> mapWithMaxColumns = new LinkedHashMap<>(); for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) { /* Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine) we should add more information to the RowGroupInfo that will be populated upon the first read to provide the reader with all of th file meta-data it needs These fields will be added to the constructor below */ try { Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null; DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath()); if (!footers.containsKey(rowGroup.getPath())) { if (timer != null) { timer.start(); } ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath()); if (timer != null) { long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS); logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0, 0, 0, timeToRead); } footers.put(rowGroup.getPath(), footer); } ParquetMetadata footer = footers.get(rowGroup.getPath()); boolean autoCorrectCorruptDates = rowGroupScan.areCorruptDatesAutoCorrected(); ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility .detectCorruptDates(footer, rowGroupScan.getColumns(), autoCorrectCorruptDates); logger.debug("Contains corrupt dates: {}", containsCorruptDates); if (!context.getOptions().getBoolean(ExecConstants.PARQUET_NEW_RECORD_READER) && !isComplex(footer)) { readers.add(new ParquetRecordReader(context, rowGroup.getPath(), rowGroup.getRowGroupIndex(), rowGroup.getNumRecordsToRead(), fs, CodecFactory.createDirectCodecFactory(fs.getConf(), new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0), footer, rowGroupScan.getColumns(), containsCorruptDates)); } else { readers.add(new DrillParquetReader(context, footer, rowGroup, columnExplorer.getTableColumns(), fs, containsCorruptDates)); } List<String> partitionValues = rowGroupScan.getPartitionValues(rowGroup); Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(rowGroup.getPath(), partitionValues, rowGroupScan.supportsFileImplicitColumns()); implicitColumns.add(implicitValues); if (implicitValues.size() > mapWithMaxColumns.size()) { mapWithMaxColumns = implicitValues; } } catch (IOException e) { throw new ExecutionSetupException(e); } } // all readers should have the same number of implicit columns, add missing ones with value null Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null)); for (Map<String, String> map : implicitColumns) { map.putAll(Maps.difference(map, diff).entriesOnlyOnRight()); } return new ScanBatch(context, oContext, readers, implicitColumns); }
From source file:org.apache.aurora.scheduler.updater.JobDiff.java
/** * Creates a map of {@code instanceCount} copies of {@code config}. * * @param config Configuration to generate an instance mapping for. * @param instanceCount Number of instances to represent. * @return A map of instance IDs (from 0 to {@code instanceCount - 1}) to {@code config}. *//* w ww . ja va 2 s . c o m*/ public static Map<Integer, ITaskConfig> asMap(ITaskConfig config, int instanceCount) { requireNonNull(config); Set<Integer> desiredInstances = ContiguousSet.create(Range.closedOpen(0, instanceCount), DiscreteDomain.integers()); return ImmutableMap.copyOf(Maps.asMap(desiredInstances, Functions.constant(config))); }
From source file:com.googlecode.blaisemath.style.ObjectStyler.java
/** * Sets a single label style for all objects. * @param style style to use for all objects *//*from ww w.j av a 2 s . c om*/ public void setLabelStyleConstant(AttributeSet style) { setLabelStyleDelegate(Functions.constant(checkNotNull(style))); }
From source file:org.terasology.cities.WorldFacade.java
/** * @param seed the seed value/*from w w w.j av a 2 s .c om*/ * @param heightMap the height map to use */ public WorldFacade(final String seed, final HeightMap heightMap) { final CityTerrainComponent terrainConfig = WorldFacade.getWorldEntity() .getComponent(CityTerrainComponent.class); final CitySpawnComponent spawnConfig = WorldFacade.getWorldEntity().getComponent(CitySpawnComponent.class); junctions = new Function<Vector2i, Junction>() { @Override public Junction apply(Vector2i input) { return new Junction(input); } }; junctions = CachingFunction.wrap(junctions); lakeMap = CachingFunction.wrap(new Function<Sector, Set<Lake>>() { @Override public Set<Lake> apply(Sector sector) { Integer salt = 2354234; int ngseed = Objects.hashCode(salt, seed, sector); WaterNameProvider ng = new WaterNameProvider(ngseed, new DebugWaterTheme()); int minSize = 16; int scale = 8; int size = Sector.SIZE / scale; HeightMap orgHm = HeightMaps.scalingArea(heightMap, scale); Vector2i coords = sector.getCoords(); Rectangle sectorRect = new Rectangle(coords.x * size, coords.y * size, size, size); ContourTracer ct = new ContourTracer(orgHm, sectorRect, terrainConfig.getSeaLevel()); Set<Lake> lakes = Sets.newHashSet(); for (Contour c : ct.getOuterContours()) { Contour scaledContour = c.scale(scale); Polygon polyLake = scaledContour.getPolygon(); if (polyLake.getBounds().width > minSize && polyLake.getBounds().height > minSize) { Lake lake = new Lake(scaledContour, ng.generateName()); for (Contour isl : ct.getInnerContours()) { Rectangle bboxIsland = isl.getPolygon().getBounds(); if (polyLake.getBounds().contains(bboxIsland)) { if (allInside(polyLake, isl.getPoints())) { lake.addIsland(isl); } } } lakes.add(lake); } } return lakes; } private boolean allInside(Polygon polygon, Collection<Point> points) { for (Point pt : points) { if (!polygon.contains(pt)) { return false; } } return true; } }); int minCitiesPerSector = spawnConfig.getMinCitiesPerSector(); int maxCitiesPerSector = spawnConfig.getMaxCitiesPerSector(); int minSize = spawnConfig.getMinCityRadius(); int maxSize = spawnConfig.getMaxCityRadius(); AreaInfo globalAreaInfo = new AreaInfo(terrainConfig, heightMap); Function<? super Sector, AreaInfo> sectorInfos = Functions.constant(globalAreaInfo); SiteFinderRandom cpr = new SiteFinderRandom(seed, sectorInfos, minCitiesPerSector, maxCitiesPerSector, minSize, maxSize); final Function<Sector, Set<Site>> siteMap = CachingFunction.wrap(cpr); double maxDist = spawnConfig.getMaxConnectedCitiesDistance(); connectedCities = new SiteConnector(siteMap, maxDist); connectedCities = CachingFunction.wrap(connectedCities); sectorConnections = new SectorConnector(siteMap, connectedCities); sectorConnections = CachingFunction.wrap(sectorConnections); Function<UnorderedPair<Site>, Road> rg = new Function<UnorderedPair<Site>, Road>() { private RoadGeneratorSimple rgs = new RoadGeneratorSimple(junctions); private RoadModifierRandom rmr = new RoadModifierRandom(0.5); @Override public Road apply(UnorderedPair<Site> input) { Road road = rgs.apply(input); rmr.apply(road); return road; } }; final Function<UnorderedPair<Site>, Road> cachedRoadgen = CachingFunction.wrap(rg); roadMap = new Function<Sector, Set<Road>>() { @Override public Set<Road> apply(Sector sector) { Set<Road> allRoads = Sets.newHashSet(); Set<UnorderedPair<Site>> localConns = sectorConnections.apply(sector); Set<UnorderedPair<Site>> allConns = Sets.newHashSet(localConns); Set<Lake> allBlockedAreas = Sets.newHashSet(lakeMap.apply(sector)); // add all neighbors, because their roads might be passing through for (Orientation dir : Orientation.values()) { Sector neighbor = sector.getNeighbor(dir); allConns.addAll(sectorConnections.apply(neighbor)); allBlockedAreas.addAll(lakeMap.apply(sector)); } for (UnorderedPair<Site> conn : allConns) { Road road = cachedRoadgen.apply(conn); if (!isBlocked(road, lakeMap.apply(sector))) { allRoads.add(road); } } return allRoads; } public boolean isBlocked(Road road, Set<? extends NamedArea> blockedAreas) { for (Vector2i pt : road.getPoints()) { Vector2d v = new Vector2d(pt.x, pt.y); for (NamedArea area : blockedAreas) { if (area.contains(v)) { return true; } } } return false; } }; roadMap = CachingFunction.wrap(roadMap); roadShapeFunc = new RoadShapeGenerator(roadMap); roadShapeFunc = CachingFunction.wrap(roadShapeFunc); final DefaultTownWallGenerator twg = new DefaultTownWallGenerator(seed, heightMap); final LotGeneratorRandom housingLotGenerator = new LotGeneratorRandom(seed); final LotGeneratorRandom churchLotGenerator = new LotGeneratorRandom(seed, 25d, 40d, 1, 100); final SimpleHousingGenerator blgGenerator = new SimpleHousingGenerator(seed, heightMap); final SimpleFenceGenerator sfg = new SimpleFenceGenerator(seed); final SimpleChurchGenerator sacg = new SimpleChurchGenerator(seed, heightMap); decoratedCities = CachingFunction.wrap(new Function<Sector, Set<City>>() { @Override public Set<City> apply(Sector input) { int sectorSeed = Objects.hashCode(seed, input); TownNameProvider nameGen = new TownNameProvider(sectorSeed, new DebugTownTheme()); Stopwatch pAll = null; Stopwatch pSites = null; Stopwatch pRoads = null; if (logger.isInfoEnabled()) { pAll = Stopwatch.createStarted(); } if (logger.isInfoEnabled()) { pSites = Stopwatch.createStarted(); } Set<Site> sites = siteMap.apply(input); if (logger.isInfoEnabled()) { logger.info("Generated settlement sites for {} in {}ms.", input, pSites.elapsed(TimeUnit.MILLISECONDS)); } if (logger.isInfoEnabled()) { pRoads = Stopwatch.createStarted(); } Shape roadShape = roadShapeFunc.apply(input); if (logger.isInfoEnabled()) { logger.info("Generated roads for {} in {}ms.", input, pRoads.elapsed(TimeUnit.MILLISECONDS)); } Set<City> cities = Sets.newHashSet(); for (Site site : sites) { Stopwatch pSite = null; if (logger.isInfoEnabled()) { pSite = Stopwatch.createStarted(); } int minX = site.getPos().x - site.getRadius(); int minZ = site.getPos().y - site.getRadius(); Rectangle cityArea = new Rectangle(minX, minZ, site.getRadius() * 2, site.getRadius() * 2); HeightMap cityAreaHeightMap = HeightMaps.caching(heightMap, cityArea, 4); AreaInfo si = new AreaInfo(terrainConfig, cityAreaHeightMap); si.addBlockedArea(roadShape); String name = nameGen.generateName(TownAffinityVector.create().prefix(0.2).postfix(0.2)); MedievalTown town = new MedievalTown(name, site.getPos(), site.getRadius()); // add a town wall if radius is larger than 1/4 int minRadForTownWall = (spawnConfig.getMinCityRadius() * 3 + spawnConfig.getMaxCityRadius()) / 4; if (town.getRadius() > minRadForTownWall) { TownWall tw = twg.generate(town, si); town.setTownWall(tw); TownWallShapeGenerator twsg = new TownWallShapeGenerator(); Shape townWallShape = twsg.computeShape(tw); si.addBlockedArea(townWallShape); } Set<SimpleLot> churchLots = churchLotGenerator.generate(town, si); if (!churchLots.isEmpty()) { SimpleLot lot = churchLots.iterator().next(); SimpleChurch church = sacg.generate(lot); lot.addBuilding(church); town.add(lot); } Set<SimpleLot> housingLots = housingLotGenerator.generate(town, si); for (SimpleLot lot : housingLots) { town.add(lot); for (SimpleBuilding bldg : blgGenerator.apply(lot)) { lot.addBuilding(bldg); SimpleFence fence = sfg.createFence(town, lot.getShape()); lot.setFence(fence); } } if (logger.isInfoEnabled()) { logger.info("Generated city '{}' in {} in {}ms.", town, input, pSite.elapsed(TimeUnit.MILLISECONDS)); } cities.add(town); } if (logger.isInfoEnabled()) { logger.info("Generated {} .. in {}ms.", input, pAll.elapsed(TimeUnit.MILLISECONDS)); } return cities; } }); // this required by WorldEventReceiver CoreRegistry.put(WorldFacade.class, this); }
From source file:org.apache.drill.exec.store.hive.HiveDrillNativeScanBatchCreator.java
@Override public ScanBatch getBatch(FragmentContext context, HiveDrillNativeParquetSubScan config, List<RecordBatch> children) throws ExecutionSetupException { final Table table = config.getTable(); final List<InputSplit> splits = config.getInputSplits(); final List<Partition> partitions = config.getPartitions(); final List<SchemaPath> columns = config.getColumns(); final String partitionDesignator = context.getOptions() .getOption(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL).string_val; List<Map<String, String>> implicitColumns = Lists.newLinkedList(); boolean selectAllQuery = AbstractRecordReader.isStarQuery(columns); final boolean hasPartitions = (partitions != null && partitions.size() > 0); final List<String[]> partitionColumns = Lists.newArrayList(); final List<Integer> selectedPartitionColumns = Lists.newArrayList(); List<SchemaPath> newColumns = columns; if (!selectAllQuery) { // Separate out the partition and non-partition columns. Non-partition columns are passed directly to the // ParquetRecordReader. Partition columns are passed to ScanBatch. newColumns = Lists.newArrayList(); Pattern pattern = Pattern.compile(String.format("%s[0-9]+", partitionDesignator)); for (SchemaPath column : columns) { Matcher m = pattern.matcher(column.getAsUnescapedPath()); if (m.matches()) { selectedPartitionColumns.add( Integer.parseInt(column.getAsUnescapedPath().substring(partitionDesignator.length()))); } else { newColumns.add(column);//from w w w . ja v a 2 s . c o m } } } final OperatorContext oContext = context.newOperatorContext(config); int currentPartitionIndex = 0; final List<RecordReader> readers = Lists.newArrayList(); final HiveConf conf = config.getHiveConf(); // TODO: In future we can get this cache from Metadata cached on filesystem. final Map<String, ParquetMetadata> footerCache = Maps.newHashMap(); Map<String, String> mapWithMaxColumns = Maps.newLinkedHashMap(); try { for (InputSplit split : splits) { final FileSplit fileSplit = (FileSplit) split; final Path finalPath = fileSplit.getPath(); final JobConf cloneJob = new ProjectionPusher().pushProjectionsAndFilters(new JobConf(conf), finalPath.getParent()); final FileSystem fs = finalPath.getFileSystem(cloneJob); ParquetMetadata parquetMetadata = footerCache.get(finalPath.toString()); if (parquetMetadata == null) { parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath); footerCache.put(finalPath.toString(), parquetMetadata); } final List<Integer> rowGroupNums = getRowGroupNumbersFromFileSplit(fileSplit, parquetMetadata); for (int rowGroupNum : rowGroupNums) { readers.add(new ParquetRecordReader(context, Path.getPathWithoutSchemeAndAuthority(finalPath).toString(), rowGroupNum, fs, CodecFactory.createDirectCodecFactory(fs.getConf(), new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0), parquetMetadata, newColumns)); Map<String, String> implicitValues = Maps.newLinkedHashMap(); if (hasPartitions) { List<String> values = partitions.get(currentPartitionIndex).getValues(); for (int i = 0; i < values.size(); i++) { if (selectAllQuery || selectedPartitionColumns.contains(i)) { implicitValues.put(partitionDesignator + i, values.get(i)); } } } implicitColumns.add(implicitValues); if (implicitValues.size() > mapWithMaxColumns.size()) { mapWithMaxColumns = implicitValues; } } currentPartitionIndex++; } } catch (final IOException | RuntimeException e) { AutoCloseables.close(e, readers); throw new ExecutionSetupException("Failed to create RecordReaders. " + e.getMessage(), e); } // all readers should have the same number of implicit columns, add missing ones with value null mapWithMaxColumns = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null)); for (Map<String, String> map : implicitColumns) { map.putAll(Maps.difference(map, mapWithMaxColumns).entriesOnlyOnRight()); } // If there are no readers created (which is possible when the table is empty or no row groups are matched), // create an empty RecordReader to output the schema if (readers.size() == 0) { readers.add(new HiveRecordReader(table, null, null, columns, context, conf, ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName()))); } return new ScanBatch(config, context, oContext, readers.iterator(), implicitColumns); }
From source file:org.apache.brooklyn.cloudfoundry.entity.CloudFoundryEntityImpl.java
protected void connectServiceUp() { serviceProcessUp = FunctionFeed.builder().entity(this).period(Duration.FIVE_SECONDS) .poll(new FunctionPollConfig<Boolean, Boolean>(SERVICE_UP) .onException(Functions.constant(Boolean.FALSE)).callable(new Callable<Boolean>() { public Boolean call() { return driver.isRunning(); }/*from w ww .ja v a 2 s . co m*/ })) .build(); }
From source file:io.crate.executor.transport.SnapshotRestoreDDLDispatcher.java
public ListenableFuture<Long> dispatch(final RestoreSnapshotAnalyzedStatement analysis) { boolean waitForCompletion = analysis.settings().getAsBoolean(WAIT_FOR_COMPLETION.settingName(), WAIT_FOR_COMPLETION.defaultValue()); boolean ignoreUnavailable = analysis.settings().getAsBoolean(IGNORE_UNAVAILABLE.settingName(), IGNORE_UNAVAILABLE.defaultValue()); // ignore_unavailable as set by statement IndicesOptions indicesOptions = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, IndicesOptions.lenientExpandOpen()); RestoreSnapshotRequest request = new RestoreSnapshotRequest(analysis.repositoryName(), analysis.snapshotName()).indices(analysis.indices()).indicesOptions(indicesOptions) .settings(analysis.settings()).waitForCompletion(waitForCompletion) .includeGlobalState(false).includeAliases(true); FutureActionListener<RestoreSnapshotResponse, Long> listener = new FutureActionListener<>( Functions.constant(1L)); transportActionProvider.transportRestoreSnapshotAction().execute(request, listener); return listener; }