List of usage examples for com.fasterxml.jackson.module.afterburner AfterburnerModule AfterburnerModule
public AfterburnerModule()
From source file:com.neoteric.starter.metrics.report.elastic.ElasticsearchReporter.java
public ElasticsearchReporter(MetricRegistry registry, String[] hosts, int timeout, String index, String indexDateFormat, int bulkSize, Clock clock, String prefix, TimeUnit rateUnit, TimeUnit durationUnit, MetricFilter filter, MetricFilter percolationFilter, Notifier percolationNotifier, String timestampFieldname, Map<String, Object> additionalFields) { super(registry, "elasticsearch-reporter", filter, rateUnit, durationUnit); this.hosts = hosts; this.index = index; this.bulkSize = bulkSize; this.clock = clock; this.prefix = prefix; this.timeout = timeout; if (indexDateFormat != null && indexDateFormat.length() > 0) { this.indexDateFormat = new SimpleDateFormat(indexDateFormat); }//w w w . ja v a2s . c o m if (percolationNotifier != null && percolationFilter != null) { this.percolationFilter = percolationFilter; this.notifier = percolationNotifier; } if (timestampFieldname == null || timestampFieldname.trim().length() == 0) { LOGGER.error("Timestampfieldname {}is not valid, using default @timestamp", timestampFieldname); timestampFieldname = "@timestamp"; } objectMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); objectMapper.configure(SerializationFeature.CLOSE_CLOSEABLE, false); // auto closing means, that the objectmapper is closing after the first write call, which does not work for bulk requests objectMapper.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); objectMapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); objectMapper.registerModule(new AfterburnerModule()); objectMapper.registerModule( new MetricsElasticsearchModule(rateUnit, durationUnit, timestampFieldname, additionalFields)); writer = objectMapper.writer(); checkForIndexTemplate(); }
From source file:com.arpnetworking.logback.StenoEncoder.java
StenoEncoder(final JsonFactory jsonFactory, final ObjectMapper objectMapper) { // Initialize object mapper; _objectMapper = objectMapper;//from w ww .j av a 2 s . c om _objectMapper.setAnnotationIntrospector(new StenoAnnotationIntrospector()); final SimpleFilterProvider simpleFilterProvider = new SimpleFilterProvider(); simpleFilterProvider.addFilter(RedactionFilter.REDACTION_FILTER_ID, new RedactionFilter(!DEFAULT_REDACT_NULL)); // Initialize this here based on the above code, if it was initialized at the declaration site then things // could get out of sync _redactEnabled = true; _objectMapper.setFilters(simpleFilterProvider); // Setup writing of Date/DateTime values _objectMapper.registerModule(new JodaModule()); _objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); _objectMapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS); _objectMapper.setDateFormat(new ISO8601DateFormat()); // Setup other common modules _objectMapper.registerModule(new AfterburnerModule()); _objectMapper.registerModule(new Jdk7Module()); _objectMapper.registerModule(new Jdk8Module()); _objectMapper.registerModule(new GuavaModule()); // Serialization strategies _listsSerialziationStrategy = new ListsSerialziationStrategy(this, jsonFactory, _objectMapper); _objectAsJsonSerialziationStrategy = new ObjectAsJsonSerialziationStrategy(this, jsonFactory, _objectMapper); _objectSerialziationStrategy = new ObjectSerialziationStrategy(this, jsonFactory, _objectMapper); _mapOfJsonSerialziationStrategy = new MapOfJsonSerialziationStrategy(this, jsonFactory, _objectMapper); _mapSerialziationStrategy = new MapSerialziationStrategy(this, jsonFactory, _objectMapper); _arrayOfJsonSerialziationStrategy = new ArrayOfJsonSerialziationStrategy(this, jsonFactory, _objectMapper); _arraySerialziationStrategy = new ArraySerialziationStrategy(this, jsonFactory, _objectMapper); _standardSerializationStrategy = new StandardSerializationStrategy(this, jsonFactory, _objectMapper); }
From source file:org.mayocat.application.AbstractService.java
protected void configureObjectMapper() { // Initialize our own object mapper. We don't want to use Dropwizard's one (environment.getObjectMapper) because // we don't have full control over its initialization, and we don't necessarily want mayocat's one to be // configured identically as the one used by DW. objectMapper = new ObjectMapper(new YAMLFactory()); // Standard modules objectMapper.registerModule(new GuavaModule()); objectMapper.registerModule(new JodaModule()); objectMapper.registerModule(new AfterburnerModule()); // Dropwizard modules objectMapper.registerModule(new GuavaExtrasModule()); objectMapper.registerModule(new LogbackModule()); // Mayocat modules objectMapper.registerModule(new TimeZoneModule()); objectMapper.registerModule(new NIOModule()); objectMapper.registerModule(new MayocatJodaModule()); objectMapper.registerModule(new MayocatLocaleBCP47LanguageTagModule()); objectMapper.registerModule(new MayocatGroovyModule()); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); }
From source file:org.elasticsearch.metrics.ElasticsearchReporter.java
public ElasticsearchReporter(MetricRegistry registry, String[] hosts, int timeout, String index, String indexDateFormat, int bulkSize, Clock clock, String prefix, TimeUnit rateUnit, TimeUnit durationUnit, MetricFilter filter, MetricFilter percolationFilter, Notifier percolationNotifier, String timestampFieldname, Map<String, Object> additionalFields) throws MalformedURLException { super(registry, "elasticsearch-reporter", filter, rateUnit, durationUnit); this.hosts = hosts; this.index = index; this.bulkSize = bulkSize; this.clock = clock; this.prefix = prefix; this.timeout = timeout; if (indexDateFormat != null && indexDateFormat.length() > 0) { this.indexDateFormat = new SimpleDateFormat(indexDateFormat); }//from w w w. ja v a2s. c o m if (percolationNotifier != null && percolationFilter != null) { this.percolationFilter = percolationFilter; this.notifier = percolationNotifier; } if (timestampFieldname == null || timestampFieldname.trim().length() == 0) { LOGGER.error("Timestampfieldname {}is not valid, using default @timestamp", timestampFieldname); timestampFieldname = "@timestamp"; } objectMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); objectMapper.configure(SerializationFeature.CLOSE_CLOSEABLE, false); // auto closing means, that the objectmapper is closing after the first write call, which does not work for bulk requests objectMapper.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); objectMapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); objectMapper.registerModule(new AfterburnerModule()); objectMapper.registerModule( new MetricsElasticsearchModule(rateUnit, durationUnit, timestampFieldname, additionalFields)); writer = objectMapper.writer(); checkForIndexTemplate(); }
From source file:org.apache.drill.exec.store.parquet.metadata.Metadata.java
/** * Read the parquet metadata from a file * * @param path to metadata file// ww w. ja v a2s . com * @param dirsOnly true for {@link Metadata#METADATA_DIRECTORIES_FILENAME} * or false for {@link Metadata#METADATA_FILENAME} files reading * @param metaContext current metadata context */ private void readBlockMeta(Path path, boolean dirsOnly, MetadataContext metaContext, FileSystem fs) { Stopwatch timer = logger.isDebugEnabled() ? Stopwatch.createStarted() : null; Path metadataParentDir = Path.getPathWithoutSchemeAndAuthority(path.getParent()); String metadataParentDirPath = metadataParentDir.toUri().getPath(); ObjectMapper mapper = new ObjectMapper(); final SimpleModule serialModule = new SimpleModule(); serialModule.addDeserializer(SchemaPath.class, new SchemaPath.De()); serialModule.addKeyDeserializer(Metadata_V2.ColumnTypeMetadata_v2.Key.class, new Metadata_V2.ColumnTypeMetadata_v2.Key.DeSerializer()); serialModule.addKeyDeserializer(ColumnTypeMetadata_v3.Key.class, new ColumnTypeMetadata_v3.Key.DeSerializer()); AfterburnerModule module = new AfterburnerModule(); module.setUseOptimizedBeanDeserializer(true); mapper.registerModule(serialModule); mapper.registerModule(module); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); try (FSDataInputStream is = fs.open(path)) { boolean alreadyCheckedModification; boolean newMetadata = false; alreadyCheckedModification = metaContext.getStatus(metadataParentDirPath); if (dirsOnly) { parquetTableMetadataDirs = mapper.readValue(is, ParquetTableMetadataDirs.class); if (timer != null) { logger.debug("Took {} ms to read directories from directory cache file", timer.elapsed(TimeUnit.MILLISECONDS)); timer.stop(); } parquetTableMetadataDirs.updateRelativePaths(metadataParentDirPath); if (!alreadyCheckedModification && tableModified(parquetTableMetadataDirs.getDirectories(), path, metadataParentDir, metaContext, fs)) { parquetTableMetadataDirs = (createMetaFilesRecursively( Path.getPathWithoutSchemeAndAuthority(path.getParent()).toString(), fs)).getRight(); newMetadata = true; } } else { parquetTableMetadata = mapper.readValue(is, ParquetTableMetadataBase.class); if (timer != null) { logger.debug("Took {} ms to read metadata from cache file", timer.elapsed(TimeUnit.MILLISECONDS)); timer.stop(); } if (new MetadataVersion(parquetTableMetadata.getMetadataVersion()) .compareTo(new MetadataVersion(3, 0)) >= 0) { ((ParquetTableMetadata_v3) parquetTableMetadata).updateRelativePaths(metadataParentDirPath); } if (!alreadyCheckedModification && tableModified(parquetTableMetadata.getDirectories(), path, metadataParentDir, metaContext, fs)) { parquetTableMetadata = (createMetaFilesRecursively( Path.getPathWithoutSchemeAndAuthority(path.getParent()).toString(), fs)).getLeft(); newMetadata = true; } // DRILL-5009: Remove the RowGroup if it is empty List<? extends ParquetFileMetadata> files = parquetTableMetadata.getFiles(); for (ParquetFileMetadata file : files) { List<? extends RowGroupMetadata> rowGroups = file.getRowGroups(); for (Iterator<? extends RowGroupMetadata> iter = rowGroups.iterator(); iter.hasNext();) { RowGroupMetadata r = iter.next(); if (r.getRowCount() == 0) { iter.remove(); } } } } if (newMetadata) { // if new metadata files were created, invalidate the existing metadata context metaContext.clear(); } } catch (IOException e) { logger.error("Failed to read '{}' metadata file", path, e); metaContext.setMetadataCacheCorrupted(true); } }
From source file:org.apache.drill.exec.store.parquet.Metadata.java
/** * Read the parquet metadata from a file * * @param path/*from ww w.ja v a2 s .c o m*/ * @return * @throws IOException */ private void readBlockMeta(String path, boolean dirsOnly, MetadataContext metaContext) throws IOException { Stopwatch timer = Stopwatch.createStarted(); Path p = new Path(path); Path parentDir = p.getParent(); // parent directory of the metadata file ObjectMapper mapper = new ObjectMapper(); final SimpleModule serialModule = new SimpleModule(); serialModule.addDeserializer(SchemaPath.class, new SchemaPath.De()); serialModule.addKeyDeserializer(ColumnTypeMetadata_v2.Key.class, new ColumnTypeMetadata_v2.Key.DeSerializer()); serialModule.addKeyDeserializer(ColumnTypeMetadata_v3.Key.class, new ColumnTypeMetadata_v3.Key.DeSerializer()); AfterburnerModule module = new AfterburnerModule(); module.setUseOptimizedBeanDeserializer(true); mapper.registerModule(serialModule); mapper.registerModule(module); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); FSDataInputStream is = fs.open(p); boolean alreadyCheckedModification = false; boolean newMetadata = false; if (metaContext != null) { alreadyCheckedModification = metaContext.getStatus(parentDir.toString()); } if (dirsOnly) { parquetTableMetadataDirs = mapper.readValue(is, ParquetTableMetadataDirs.class); logger.info("Took {} ms to read directories from directory cache file", timer.elapsed(TimeUnit.MILLISECONDS)); timer.stop(); if (!alreadyCheckedModification && tableModified(parquetTableMetadataDirs.getDirectories(), p, parentDir, metaContext)) { parquetTableMetadataDirs = (createMetaFilesRecursively( Path.getPathWithoutSchemeAndAuthority(p.getParent()).toString())).getRight(); newMetadata = true; } } else { parquetTableMetadata = mapper.readValue(is, ParquetTableMetadataBase.class); logger.info("Took {} ms to read metadata from cache file", timer.elapsed(TimeUnit.MILLISECONDS)); timer.stop(); if (!alreadyCheckedModification && tableModified(parquetTableMetadata.getDirectories(), p, parentDir, metaContext)) { parquetTableMetadata = (createMetaFilesRecursively( Path.getPathWithoutSchemeAndAuthority(p.getParent()).toString())).getLeft(); newMetadata = true; } // DRILL-5009: Remove the RowGroup if it is empty List<? extends ParquetFileMetadata> files = parquetTableMetadata.getFiles(); for (ParquetFileMetadata file : files) { List<? extends RowGroupMetadata> rowGroups = file.getRowGroups(); for (Iterator<? extends RowGroupMetadata> iter = rowGroups.iterator(); iter.hasNext();) { RowGroupMetadata r = iter.next(); if (r.getRowCount() == 0) { iter.remove(); } } } } if (newMetadata && metaContext != null) { // if new metadata files were created, invalidate the existing metadata context metaContext.clear(); } }
From source file:ubicrypt.core.Utils.java
public static void configureMapper(final ObjectMapper mapper) { mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); mapper.registerModule(new Jdk8Module()); mapper.registerModule(new JavaTimeModule()); mapper.registerModule(new SimpleModule("ubicrypt module") { {/*from w w w. j a v a 2 s. com*/ addSerializer(new PGPKValueSerializer(PGPKValue.class)); addDeserializer(PGPKValue.class, new PGPKValueDeserializer(PGPKValue.class)); addSerializer(new PathSerializer(Path.class)); addDeserializer(Path.class, new PathDeserializer(Path.class)); } }); mapper.registerModule(new AfterburnerModule()); mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); }