List of usage examples for org.apache.commons.lang3.tuple Pair getKey
@Override public final L getKey()
Gets the key from this pair.
This method implements the Map.Entry interface returning the left element as the key.
From source file:org.apache.bigtop.bigpetstore.datagenerator.generators.transaction.ProductCategoryUsageTrajectory.java
public double amountAtTime(double time) { Pair<Double, Double> previous = null; for (Pair<Double, Double> entry : trajectory) { if (entry.getKey() > time) break; previous = entry;/*w ww . j a v a2s . c o m*/ } if (previous == null) return 0.0; return previous.getValue(); }
From source file:org.apache.drill.exec.store.ischema.InfoSchemaRecordGenerator.java
/** * Visit the tables in the given schema. The * @param schemaPath the path to the given schema * @param schema the given schema/*from w ww.jav a2 s . co m*/ */ public void visitTables(String schemaPath, SchemaPlus schema) { final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class); final List<String> tableNames = Lists.newArrayList(schema.getTableNames()); for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) { final String tableName = tableNameToTable.getKey(); final Table table = tableNameToTable.getValue(); // Visit the table, and if requested ... if (shouldVisitTable(schemaPath, tableName) && visitTable(schemaPath, tableName, table)) { // ... do for each of the table's fields. final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl()); for (RelDataTypeField field : tableRow.getFieldList()) { if (shouldVisitColumn(schemaPath, tableName, field.getName())) { visitField(schemaPath, tableName, field); } } } } }
From source file:org.apache.drill.exec.store.ischema.RecordGenerator.java
/** * Visit the tables in the given schema. The * @param schemaPath the path to the given schema * @param schema the given schema/*from www .j av a2s . c om*/ */ public void visitTables(String schemaPath, SchemaPlus schema) { final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class); final List<String> tableNames = Lists.newArrayList(schema.getTableNames()); for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) { final String tableName = tableNameToTable.getKey(); final Table table = tableNameToTable.getValue(); // Visit the table, and if requested ... if (shouldVisitTable(schemaPath, tableName) && visitTable(schemaPath, tableName, table)) { // ... do for each of the table's fields. final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl()); for (RelDataTypeField field : tableRow.getFieldList()) { visitField(schemaPath, tableName, field); } } } }
From source file:org.apache.eagle.alert.engine.e2e.SampleClient1.java
private static long send_metric(long base, KafkaProducer<String, String> proceduer, String stream, int hostIndex) { Pair<Long, String> pair = createEntity(base, stream, hostIndex); base = pair.getKey(); ProducerRecord<String, String> record = new ProducerRecord<String, String>("perfmon_metrics", pair.getRight());//from w ww .ja v a 2s. c om proceduer.send(record); return base; }
From source file:org.apache.gobblin.cluster.ScheduledJobConfigurationManager.java
/*** * TODO: Change cluster code to handle Spec. Right now all job properties are needed to be in config and template is not honored * TODO: Materialized JobSpec and make use of ResolvedJobSpec * @throws ExecutionException// w ww .jav a2 s .co m * @throws InterruptedException */ private void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this._specConsumer .changedSpecs().get(); for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) { SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete Spec anonymousSpec = (Spec) entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); jobSpecs.remove(entry.getValue().getUri()); } } }
From source file:org.apache.gobblin.cluster.StreamingJobConfigurationManager.java
private void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this.specConsumer .changedSpecs().get();/* w ww. java 2 s . c o m*/ // propagate thread interruption so that caller will exit from loop if (Thread.interrupted()) { throw new InterruptedException(); } for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) { SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete Spec anonymousSpec = (Spec) entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); } } }
From source file:org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Validate a {@link Table} if it was updated recently by checking if its update time * lies between between maxLookBackTime and skipRecentThanTime window. * @param hiveDataset {@link ConvertibleHiveDataset} containing {@link Table} info. * @throws IOException Issue in validating {@link HiveDataset} *//*from www.jav a 2 s.c o m*/ private void processNonPartitionedTable(final ConvertibleHiveDataset hiveDataset) throws IOException { try { // Validate table final long updateTime = this.updateProvider.getUpdateTime(hiveDataset.getTable()); log.info(String.format("Validating table: %s", hiveDataset.getTable())); for (final String format : hiveDataset.getDestFormats()) { Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset .getConversionConfigForFormat(format); if (conversionConfigOptional.isPresent()) { ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get(); String orcTableName = conversionConfig.getDestinationTableName(); String orcTableDatabase = conversionConfig.getDestinationDbName(); Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils .getDestinationTableMeta(orcTableDatabase, orcTableName, this.props); // Generate validation queries final List<String> validationQueries = HiveValidationQueryGenerator .generateCountValidationQueries(hiveDataset, Optional.<Partition>absent(), conversionConfig); final List<String> dataValidationQueries = Lists.newArrayList(HiveValidationQueryGenerator .generateDataValidationQuery(hiveDataset.getTable().getTableName(), hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(), Optional.<Partition>absent(), this.isNestedORC)); this.futures.add(this.exec.submit(new Callable<Void>() { @Override public Void call() throws Exception { // Execute validation queries log.debug(String.format("Going to execute queries: %s for format: %s", validationQueries, format)); List<Long> rowCounts = ValidationJob.this .getValidationOutputFromHive(validationQueries); log.debug(String.format("Going to execute queries: %s for format: %s", dataValidationQueries, format)); List<Long> rowDataValidatedCount = ValidationJob.this .getValidationOutputFromHive(dataValidationQueries); // Validate and populate report validateAndPopulateReport(hiveDataset.getTable().getCompleteName(), updateTime, rowCounts, rowDataValidatedCount); return null; } })); } else { log.warn(String.format("No config found for format: %s So skipping table: %s for this format", format, hiveDataset.getTable().getCompleteName())); } } } catch (UncheckedExecutionException e) { log.warn(String.format("Not validating table: %s %s", hiveDataset.getTable().getCompleteName(), e.getMessage())); } catch (UpdateNotFoundException e) { log.warn(String.format("Not validating table: %s as update time was not found. %s", hiveDataset.getTable().getCompleteName(), e.getMessage())); } }
From source file:org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Validate all {@link Partition}s for a {@link Table} if it was updated recently by checking if its update time * lies between between maxLookBackTime and skipRecentThanTime window. * @param hiveDataset {@link HiveDataset} containing {@link Table} and {@link Partition} info. * @param client {@link IMetaStoreClient} to query Hive. * @throws IOException Issue in validating {@link HiveDataset} *///from ww w .ja va 2 s . co m private void processPartitionedTable(ConvertibleHiveDataset hiveDataset, AutoReturnableObject<IMetaStoreClient> client) throws IOException { // Get partitions for the table List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), Optional.<String>absent()); for (final String format : hiveDataset.getDestFormats()) { Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset .getConversionConfigForFormat(format); if (conversionConfigOptional.isPresent()) { // Get conversion config ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get(); String orcTableName = conversionConfig.getDestinationTableName(); String orcTableDatabase = conversionConfig.getDestinationDbName(); Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils .getDestinationTableMeta(orcTableDatabase, orcTableName, this.props); // Validate each partition for (final Partition sourcePartition : sourcePartitions) { try { final long updateTime = this.updateProvider.getUpdateTime(sourcePartition); if (shouldValidate(sourcePartition)) { log.info(String.format("Validating partition: %s", sourcePartition.getCompleteName())); // Generate validation queries final List<String> countValidationQueries = HiveValidationQueryGenerator .generateCountValidationQueries(hiveDataset, Optional.of(sourcePartition), conversionConfig); final List<String> dataValidationQueries = Lists .newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery( hiveDataset.getTable().getTableName(), hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(), Optional.of(sourcePartition), this.isNestedORC)); this.futures.add(this.exec.submit(new Callable<Void>() { @Override public Void call() throws Exception { // Execute validation queries log.debug(String.format( "Going to execute count validation queries queries: %s for format: %s " + "and partition %s", countValidationQueries, format, sourcePartition.getCompleteName())); List<Long> rowCounts = ValidationJob.this .getValidationOutputFromHive(countValidationQueries); log.debug(String.format( "Going to execute data validation queries: %s for format: %s and partition %s", dataValidationQueries, format, sourcePartition.getCompleteName())); List<Long> rowDataValidatedCount = ValidationJob.this .getValidationOutputFromHive(dataValidationQueries); // Validate and populate report validateAndPopulateReport(sourcePartition.getCompleteName(), updateTime, rowCounts, rowDataValidatedCount); return null; } })); } else { log.debug(String.format( "Not validating partition: %s as updateTime: %s is not in range of max look back: %s " + "and skip recent than: %s", sourcePartition.getCompleteName(), updateTime, this.maxLookBackTime, this.skipRecentThanTime)); } } catch (UncheckedExecutionException e) { log.warn(String.format("Not validating partition: %s %s", sourcePartition.getCompleteName(), e.getMessage())); } catch (UpdateNotFoundException e) { log.warn(String.format("Not validating partition: %s as update time was not found. %s", sourcePartition.getCompleteName(), e.getMessage())); } } } else { log.info(String.format("No conversion config found for format %s. Ignoring data validation", format)); } } }
From source file:org.apache.hadoop.hive.ql.metadata.TestMetadataColumnRestrictionPreEventListener.java
@SafeVarargs private static Configuration conf(Pair<String, String>... properties) { Configuration conf = new Configuration(false); // Don't load defaults. // Settings for testing. conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_DROP_TABLE_COLUMNS.varname, "true"); conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_ADD_TABLE_COLUMNS_IN_MIDDLE.varname, "true"); conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_DROP_STRUCT_COLUMNS.varname, "true"); conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_ADD_STRUCT_COLUMNS_IN_MIDDLE.varname, "true"); // Overrides. for (Pair<String, String> setting : properties) { conf.set(setting.getKey(), setting.getValue()); }//from www .ja v a 2 s . c o m return conf; }
From source file:org.apache.hyracks.storage.am.lsm.common.impls.MemoryComponentMetadata.java
@Override public ArrayBackedValueStorage get(IValueReference key) { for (Pair<IValueReference, ArrayBackedValueStorage> pair : store) { if (pair.getKey().equals(key)) { return pair.getValue(); }//from w ww. j a v a 2s.c o m } return null; }