List of usage examples for org.apache.commons.collections4 CollectionUtils isEmpty
public static boolean isEmpty(final Collection<?> coll)
From source file:org.finra.herd.service.helper.BusinessObjectDefinitionDaoHelper.java
/** * Create Business Object Definition Entity * @param request business object definition create request * @return Business Object Definition Entity *//*from ww w .j av a 2s. co m*/ public BusinessObjectDefinitionEntity createBusinessObjectDefinitionEntity( BusinessObjectDefinitionCreateRequest request) { // Perform the validation. validateBusinessObjectDefinitionCreateRequest(request); // Get the namespace and ensure it exists. NamespaceEntity namespaceEntity = namespaceDaoHelper.getNamespaceEntity(request.getNamespace()); // Get the data provider and ensure it exists. DataProviderEntity dataProviderEntity = dataProviderDaoHelper .getDataProviderEntity(request.getDataProviderName()); // Get business object definition key. BusinessObjectDefinitionKey businessObjectDefinitionKey = new BusinessObjectDefinitionKey( request.getNamespace(), request.getBusinessObjectDefinitionName()); // Ensure a business object definition with the specified key doesn't already exist. BusinessObjectDefinitionEntity businessObjectDefinitionEntity = businessObjectDefinitionDao .getBusinessObjectDefinitionByKey(businessObjectDefinitionKey); if (businessObjectDefinitionEntity != null) { throw new AlreadyExistsException(String.format( "Unable to create business object definition with name \"%s\" because it already exists for namespace \"%s\".", businessObjectDefinitionKey.getBusinessObjectDefinitionName(), businessObjectDefinitionKey.getNamespace())); } // Create a new entity. businessObjectDefinitionEntity = new BusinessObjectDefinitionEntity(); businessObjectDefinitionEntity.setNamespace(namespaceEntity); businessObjectDefinitionEntity.setName(request.getBusinessObjectDefinitionName()); businessObjectDefinitionEntity.setDescription(request.getDescription()); businessObjectDefinitionEntity.setDataProvider(dataProviderEntity); businessObjectDefinitionEntity.setDisplayName(request.getDisplayName()); // Create the attributes if they are specified. if (!CollectionUtils.isEmpty(request.getAttributes())) { List<BusinessObjectDefinitionAttributeEntity> attributeEntities = new ArrayList<>(); businessObjectDefinitionEntity.setAttributes(attributeEntities); for (Attribute attribute : request.getAttributes()) { BusinessObjectDefinitionAttributeEntity attributeEntity = new BusinessObjectDefinitionAttributeEntity(); attributeEntities.add(attributeEntity); attributeEntity.setBusinessObjectDefinition(businessObjectDefinitionEntity); attributeEntity.setName(attribute.getName()); attributeEntity.setValue(attribute.getValue()); } } // Persist the change event entity saveBusinessObjectDefinitionChangeEvents(businessObjectDefinitionEntity); // Persist and return the new entity. return businessObjectDefinitionDao.saveAndRefresh(businessObjectDefinitionEntity); }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Generates and append to the string builder the create table Hive 13 DDL as per specified parameters. *///from w w w . j a va2 s .c o m private String generateCreateTableDdlHelper(GenerateDdlRequest generateDdlRequest) { // TODO: We might want to consider using a template engine such as Velocity to generate this DDL so we don't wind up just doing string manipulation. StringBuilder sb = new StringBuilder(); // For custom DDL, we would need to substitute the custom DDL tokens with their relative values. HashMap<String, String> replacements = new HashMap<>(); // Validate that partition values passed in the list of partition filters do not contain '/' character. if (generateDdlRequest.isPartitioned && !CollectionUtils.isEmpty(generateDdlRequest.partitionFilters)) { // Validate that partition values do not contain '/' characters. for (List<String> partitionFilter : generateDdlRequest.partitionFilters) { for (String partitionValue : partitionFilter) { Assert.doesNotContain(partitionValue, "/", String .format("Partition value \"%s\" can not contain a '/' character.", partitionValue)); } } } // Get business object format model object to directly access schema columns and partitions. BusinessObjectFormat businessObjectFormat = businessObjectFormatHelper .createBusinessObjectFormatFromEntity(generateDdlRequest.businessObjectFormatEntity); // Validate that we have at least one column specified in the business object format schema. assertSchemaColumnsNotEmpty(businessObjectFormat, generateDdlRequest.businessObjectFormatEntity); if (generateDdlRequest.isPartitioned) { // Validate that we have at least one partition column specified in the business object format schema. Assert.notEmpty(businessObjectFormat.getSchema().getPartitions(), String.format("No schema partitions specified for business object format {%s}.", businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString( generateDdlRequest.businessObjectFormatEntity))); // Validate that partition column names do not contain '/' characters. for (SchemaColumn partitionColumn : businessObjectFormat.getSchema().getPartitions()) { Assert.doesNotContain(partitionColumn.getName(), "/", String.format( "Partition column name \"%s\" can not contain a '/' character. Business object format: {%s}", partitionColumn.getName(), businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString( generateDdlRequest.businessObjectFormatEntity))); } } // Add drop table if requested. if (BooleanUtils.isTrue(generateDdlRequest.includeDropTableStatement)) { sb.append(String.format("DROP TABLE IF EXISTS `%s`;\n\n", generateDdlRequest.tableName)); } // Depending on the flag, prepare "if not exists" option text or leave it an empty string. String ifNotExistsOption = BooleanUtils.isTrue(generateDdlRequest.includeIfNotExistsOption) ? "IF NOT EXISTS " : ""; // Only generate the create table DDL statement, if custom DDL was not specified. if (generateDdlRequest.customDdlEntity == null) { generateStandardBaseDdl(generateDdlRequest, sb, businessObjectFormat, ifNotExistsOption); } else { // Use the custom DDL in place of the create table statement. sb.append(String.format("%s\n\n", generateDdlRequest.customDdlEntity.getDdl())); // We need to substitute the relative custom DDL token with an actual table name. replacements.put(TABLE_NAME_CUSTOM_DDL_TOKEN, generateDdlRequest.tableName); } // Add alter table statements only if the list of partition filters is not empty - this is applicable to generating DDL for business object data only. if (!CollectionUtils.isEmpty(generateDdlRequest.partitionFilters)) { processPartitionFiltersForGenerateDdl(generateDdlRequest, sb, replacements, businessObjectFormat, ifNotExistsOption); } // Add a location statement with a token if this is format dll that does not use custom ddl. else if (!generateDdlRequest.isPartitioned && generateDdlRequest.customDdlEntity == null) { // Since custom DDL is not specified, there are no partition values, and this table is not partitioned, add a LOCATION clause with a token. sb.append(String.format("LOCATION '%s';", NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN)); } // Trim to remove unnecessary end-of-line characters, if any, from the end of the generated DDL. String resultDdl = sb.toString().trim(); // For custom DDL, substitute the relative custom DDL tokens with their values. if (generateDdlRequest.customDdlEntity != null) { for (Map.Entry<String, String> entry : replacements.entrySet()) { String token = entry.getKey(); String value = entry.getValue(); resultDdl = resultDdl.replaceAll(Pattern.quote(token), value); } } return resultDdl; }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Generates the DDL column definitions based on the given business object format. The generated column definitions look like: * <p/>/* w w w . j a va 2s.c o m*/ * <pre> * `COL_NAME1` VARCHAR(2) COMMENT 'some comment', * `COL_NAME2` VARCHAR(2), * `ORIG_COL_NAME3` DATE * ) * </pre> * <p/> * Each column definition is indented using 4 spaces. If a column is also a partition, the text 'ORIG_' will be prefixed in the column name. Note the * closing parenthesis at the end of the statement. * * @param businessObjectFormatEntity The persistent entity of business object format * @param businessObjectFormat The {@link BusinessObjectFormat} * * @return String containing the generated column definitions. */ private String generateDdlColumns(BusinessObjectFormatEntity businessObjectFormatEntity, BusinessObjectFormat businessObjectFormat) { StringBuilder sb = new StringBuilder(); // Add schema columns. Boolean firstRow = true; for (SchemaColumn schemaColumn : businessObjectFormat.getSchema().getColumns()) { if (!firstRow) { sb.append(",\n"); } else { firstRow = false; } // Add a schema column declaration. Check if a schema column is also a partition column and prepend "ORGNL_" prefix if this is the case. sb.append(String.format(" `%s%s` %s%s", (!CollectionUtils.isEmpty(businessObjectFormat.getSchema().getPartitions()) && businessObjectFormat.getSchema().getPartitions().contains(schemaColumn) ? "ORGNL_" : ""), schemaColumn.getName(), getHiveDataType(schemaColumn, businessObjectFormatEntity), StringUtils.isNotBlank(schemaColumn.getDescription()) ? String.format(" COMMENT '%s'", escapeSingleQuotes(schemaColumn.getDescription())) : "")); } sb.append(")\n"); return sb.toString(); }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Processes partition filters for DDL generation as per generate DDL request. * * @param generateDdlRequest the generate DDL request * @param sb the string builder to be updated with the "alter table add partition" statements * @param replacements the hash map of string values to be used to substitute the custom DDL tokens with their actual values * @param businessObjectFormat the business object format * @param ifNotExistsOption specifies if generated DDL contains "if not exists" option *//*w w w. j a v a 2s . com*/ private void processPartitionFiltersForGenerateDdl(GenerateDdlRequest generateDdlRequest, StringBuilder sb, HashMap<String, String> replacements, BusinessObjectFormat businessObjectFormat, String ifNotExistsOption) { // Get the business object format key from the entity. BusinessObjectFormatKey businessObjectFormatKey = businessObjectFormatHelper .getBusinessObjectFormatKey(generateDdlRequest.businessObjectFormatEntity); // Override the business object format version with the original (optional) value from the request. businessObjectFormatKey.setBusinessObjectFormatVersion(generateDdlRequest.businessObjectFormatVersion); // Retrieve a list of storage unit availability DTOs for the specified list of partition filters. The list will be sorted by partition values and // storage names. For a non-partitioned table, there should only exist a single business object data entity (with partitionValue equals to "none"). // We do validate that all specified storage entities are of "S3" storage platform type, so we specify S3 storage platform type in the herdDao call // below, so we select storage units only from all S3 storage entities, when the specified list of storage names is empty. We also specify to select // only "available" storage units. List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos = storageUnitDao .getStorageUnitsByPartitionFilters(businessObjectFormatKey, generateDdlRequest.partitionFilters, generateDdlRequest.businessObjectDataVersion, BusinessObjectDataStatusEntity.VALID, generateDdlRequest.storageNames, StoragePlatformEntity.S3, null, true); // Exclude duplicate business object data per specified list of storage names. // If storage names are not specified, the method fails on business object data instances registered with multiple storage. storageUnitAvailabilityDtos = excludeDuplicateBusinessObjectData(storageUnitAvailabilityDtos, generateDdlRequest.storageNames); // Build a list of matched partition filters. Please note that each request partition // filter might result in multiple available business object data entities. List<List<String>> matchedAvailablePartitionFilters = new ArrayList<>(); List<List<String>> availablePartitions = new ArrayList<>(); for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) { BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey(); matchedAvailablePartitionFilters.add(businessObjectDataHelper.getPartitionFilter(businessObjectDataKey, generateDdlRequest.partitionFilters.get(0))); availablePartitions .add(businessObjectDataHelper.getPrimaryAndSubPartitionValues(businessObjectDataKey)); } // If request specifies to include all registered sub-partitions, fail if any of "non-available" registered sub-partitions are found. if (generateDdlRequest.businessObjectDataVersion == null && BooleanUtils.isTrue(generateDdlRequest.includeAllRegisteredSubPartitions) && !CollectionUtils.isEmpty(matchedAvailablePartitionFilters)) { notAllowNonAvailableRegisteredSubPartitions(businessObjectFormatKey, matchedAvailablePartitionFilters, availablePartitions, generateDdlRequest.storageNames); } // Fail on any missing business object data unless the flag is set to allow missing business object data. if (!BooleanUtils.isTrue(generateDdlRequest.allowMissingData)) { // Get a list of unmatched partition filters. List<List<String>> unmatchedPartitionFilters = new ArrayList<>(generateDdlRequest.partitionFilters); unmatchedPartitionFilters.removeAll(matchedAvailablePartitionFilters); // Throw an exception if we have any unmatched partition filters. if (!unmatchedPartitionFilters.isEmpty()) { // Get the first unmatched partition filter and throw exception. List<String> unmatchedPartitionFilter = getFirstUnmatchedPartitionFilter(unmatchedPartitionFilters); throw new ObjectNotFoundException(String.format( "Business object data {namespace: \"%s\", businessObjectDefinitionName: \"%s\", businessObjectFormatUsage: \"%s\", " + "businessObjectFormatFileType: \"%s\", businessObjectFormatVersion: %d, partitionValue: \"%s\", " + "subpartitionValues: \"%s\", businessObjectDataVersion: %d} is not available in \"%s\" storage(s).", businessObjectFormatKey.getNamespace(), businessObjectFormatKey.getBusinessObjectDefinitionName(), businessObjectFormatKey.getBusinessObjectFormatUsage(), businessObjectFormatKey.getBusinessObjectFormatFileType(), businessObjectFormatKey.getBusinessObjectFormatVersion(), unmatchedPartitionFilter.get(0), StringUtils.join(unmatchedPartitionFilter.subList(1, unmatchedPartitionFilter.size()), ","), generateDdlRequest.businessObjectDataVersion, StringUtils.join(generateDdlRequest.storageNames, ","))); } } // We still need to close/complete the create table statement when there is no custom DDL, // the table is non-partitioned, and there is no business object data found. if (generateDdlRequest.customDdlEntity == null && !generateDdlRequest.isPartitioned && CollectionUtils.isEmpty(storageUnitAvailabilityDtos)) { // Add a LOCATION clause with a token. sb.append(String.format("LOCATION '%s';", NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN)); } // The table is partitioned, custom DDL is specified, or there is at least one business object data instance found. else { // If drop partitions flag is set and the table is partitioned, drop partitions specified by the partition filters. if (generateDdlRequest.isPartitioned && BooleanUtils.isTrue(generateDdlRequest.includeDropPartitions)) { // Generate the beginning of the alter table statement. String alterTableFirstToken = String.format("ALTER TABLE `%s` DROP IF EXISTS", generateDdlRequest.tableName); // Create a drop partition statement for each partition filter entry. List<String> dropPartitionStatements = new ArrayList<>(); for (List<String> partitionFilter : generateDdlRequest.partitionFilters) { // Start building a drop partition statement for this partition filter. StringBuilder dropPartitionStatement = new StringBuilder(); dropPartitionStatement.append(String.format("%s PARTITION (", BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? " " : alterTableFirstToken)); // Specify all partition column values as per this partition filter. List<String> partitionKeyValuePairs = new ArrayList<>(); for (int i = 0; i < partitionFilter.size(); i++) { if (StringUtils.isNotBlank(partitionFilter.get(i))) { // We cannot hit ArrayIndexOutOfBoundsException on getPartitions() since partitionFilter would // not have a value set at an index that is greater or equal than the number of partitions in the schema. String partitionColumnName = businessObjectFormat.getSchema().getPartitions().get(i) .getName(); partitionKeyValuePairs .add(String.format("`%s`='%s'", partitionColumnName, partitionFilter.get(i))); } } // Complete the drop partition statement. dropPartitionStatement.append(StringUtils.join(partitionKeyValuePairs, ", ")).append(')'); // Add this drop partition statement to the list. dropPartitionStatements.add(dropPartitionStatement.toString()); } // Add all drop partition statements to the main string builder. if (CollectionUtils.isNotEmpty(dropPartitionStatements)) { // If specified, combine dropping multiple partitions in a single ALTER TABLE statement. if (BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable)) { sb.append(alterTableFirstToken).append('\n'); } sb.append(StringUtils.join(dropPartitionStatements, BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? ",\n" : ";\n")) .append(";\n\n"); } } // Process storage unit entities. if (!CollectionUtils.isEmpty(storageUnitAvailabilityDtos)) { processStorageUnitsForGenerateDdl(generateDdlRequest, sb, replacements, businessObjectFormat, ifNotExistsOption, storageUnitAvailabilityDtos); } } }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Eliminate storage units that belong to the same business object data by picking storage unit registered in a storage listed earlier in the list of * storage names specified in the request. If storage names are not specified, simply fail on business object data instances registered with multiple * storage./*from ww w .j a v a2 s . c o m*/ * * @param storageUnitAvailabilityDtos the list of storage unit availability DTOs * @param storageNames the list of storage names * * @return the updated list of storage unit availability DTOs * @throws IllegalArgumentException on business object data being registered in multiple storage and storage names are not specified to resolve this */ protected List<StorageUnitAvailabilityDto> excludeDuplicateBusinessObjectData( List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos, List<String> storageNames) throws IllegalArgumentException { // Convert the list of storage names to upper case. List<String> upperCaseStorageNames = new ArrayList<>(); if (CollectionUtils.isNotEmpty(storageNames)) { for (String storageName : storageNames) { upperCaseStorageNames.add(storageName.toUpperCase()); } } // If storage names are not specified, fail on business object data instance registered with multiple storage. // Otherwise, in a case when the same business object data is registered with multiple storage, // pick storage unit registered in a storage listed earlier in the list of storage names specified in the request. Map<BusinessObjectDataKey, StorageUnitAvailabilityDto> businessObjectDataToStorageUnitMap = new LinkedHashMap<>(); for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) { BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey(); if (businessObjectDataToStorageUnitMap.containsKey(businessObjectDataKey)) { // Duplicate business object data is found, so check if storage names are specified. if (CollectionUtils.isEmpty(upperCaseStorageNames)) { // Fail on business object data registered in multiple storage. throw new IllegalArgumentException(String.format( "Found business object data registered in more than one storage. " + "Please specify storage(s) in the request to resolve this. Business object data {%s}", businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey))); } else { // Replace the storage unit entity if it belongs to a "higher priority" storage. String currentUpperCaseStorageName = businessObjectDataToStorageUnitMap .get(businessObjectDataKey).getStorageName().toUpperCase(); int currentStorageIndex = upperCaseStorageNames.indexOf(currentUpperCaseStorageName); int newStorageIndex = upperCaseStorageNames .indexOf(storageUnitAvailabilityDto.getStorageName().toUpperCase()); if (newStorageIndex < currentStorageIndex) { businessObjectDataToStorageUnitMap.put(businessObjectDataKey, storageUnitAvailabilityDto); } } } else { businessObjectDataToStorageUnitMap.put(businessObjectDataKey, storageUnitAvailabilityDto); } } return new ArrayList<>(businessObjectDataToStorageUnitMap.values()); }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Searches for and fails on any of "non-available" registered sub-partitions as per list of "matched" partition filters. * * @param businessObjectFormatKey the business object format key * @param matchedAvailablePartitionFilters the list of "matched" partition filters * @param availablePartitions the list of already discovered "available" partitions, where each partition consists of primary and optional sub-partition * values//from w w w. j ava 2 s. com * @param storageNames the list of storage names */ protected void notAllowNonAvailableRegisteredSubPartitions(BusinessObjectFormatKey businessObjectFormatKey, List<List<String>> matchedAvailablePartitionFilters, List<List<String>> availablePartitions, List<String> storageNames) { // Query all matched partition filters to discover any non-available registered sub-partitions. Retrieve latest business object data per list of // matched filters regardless of business object data and/or storage unit statuses. This is done to discover all registered sub-partitions regardless // of business object data or storage unit statuses. We do validate that all specified storages are of "S3" storage platform type, so we specify S3 // storage platform type in the herdDao call below, so we select storage units only from all S3 storages, when the specified list of storages is empty. // We want to select any existing storage units regardless of their status, so we pass "false" for selectOnlyAvailableStorageUnits parameter. List<StorageUnitAvailabilityDto> matchedNotAvailableStorageUnitAvailabilityDtos = storageUnitDao .getStorageUnitsByPartitionFilters(businessObjectFormatKey, matchedAvailablePartitionFilters, null, null, storageNames, StoragePlatformEntity.S3, null, false); // Exclude all storage units with business object data having "DELETED" status. matchedNotAvailableStorageUnitAvailabilityDtos = storageUnitHelper.excludeBusinessObjectDataStatus( matchedNotAvailableStorageUnitAvailabilityDtos, BusinessObjectDataStatusEntity.DELETED); // Exclude all already discovered "available" partitions. Please note that, since we got here, the list of matched partitions can not be empty. matchedNotAvailableStorageUnitAvailabilityDtos = storageUnitHelper .excludePartitions(matchedNotAvailableStorageUnitAvailabilityDtos, availablePartitions); // Fail on any "non-available" registered sub-partitions. if (!CollectionUtils.isEmpty(matchedNotAvailableStorageUnitAvailabilityDtos)) { // Get the business object data key for the first "non-available" registered sub-partition. BusinessObjectDataKey businessObjectDataKey = matchedNotAvailableStorageUnitAvailabilityDtos.get(0) .getBusinessObjectDataKey(); // Throw an exception. throw new ObjectNotFoundException(String.format( "Business object data {namespace: \"%s\", businessObjectDefinitionName: \"%s\", businessObjectFormatUsage: \"%s\", " + "businessObjectFormatFileType: \"%s\", businessObjectFormatVersion: %d, partitionValue: \"%s\", " + "subpartitionValues: \"%s\", businessObjectDataVersion: %d} is not available in \"%s\" storage(s).", businessObjectFormatKey.getNamespace(), businessObjectFormatKey.getBusinessObjectDefinitionName(), businessObjectFormatKey.getBusinessObjectFormatUsage(), businessObjectFormatKey.getBusinessObjectFormatFileType(), businessObjectFormatKey.getBusinessObjectFormatVersion(), businessObjectDataKey.getPartitionValue(), StringUtils.join(businessObjectDataKey.getSubPartitionValues(), ","), businessObjectDataKey.getBusinessObjectDataVersion(), StringUtils.join(storageNames, ","))); } }
From source file:org.finra.herd.service.helper.ParameterHelper.java
/** * Validates that parameter names are there and that there are no duplicate parameter names in case insensitive manner. This method also trims parameter * names.// w ww . j a v a 2s. com * * @param parameters the list of parameters to be validated */ public void validateParameters(List<Parameter> parameters) { if (!CollectionUtils.isEmpty(parameters)) { Set<String> parameterNameValidationSet = new HashSet<>(); for (Parameter parameter : parameters) { // Validate and trim the parameter name. Assert.hasText(parameter.getName(), "A parameter name must be specified."); parameter.setName(parameter.getName().trim()); // Ensure the parameter name isn't a duplicate by using a set with a "lowercase" name as the key for case insensitivity. String lowercaseParameterName = parameter.getName().toLowerCase(); Assert.isTrue(!parameterNameValidationSet.contains(lowercaseParameterName), "Duplicate parameter name found: " + parameter.getName()); parameterNameValidationSet.add(lowercaseParameterName); } } }
From source file:org.finra.herd.service.helper.S3KeyPrefixHelper.java
/** * Returns S3 key prefix constructed per specified velocity template. * * @param s3KeyPrefixVelocityTemplate the S3 key prefix velocity template, * @param dataProviderName the data provider name * @param businessObjectFormat the business object format * @param businessObjectDataKey the business object data key * @param storageName the storage name/*from www . j a va 2 s . c o m*/ * * @return the S3 key prefix */ public String buildS3KeyPrefix(String s3KeyPrefixVelocityTemplate, String dataProviderName, BusinessObjectFormat businessObjectFormat, BusinessObjectDataKey businessObjectDataKey, String storageName) { // Create and populate the velocity context with variable values. Map<String, Object> context = new HashMap<>(); context.put("environment", s3KeyPrefixFormat(configurationHelper.getProperty(ConfigurationValue.HERD_ENVIRONMENT))); context.put("namespace", s3KeyPrefixFormat(businessObjectFormat.getNamespace())); context.put("dataProviderName", s3KeyPrefixFormat(dataProviderName)); context.put("businessObjectDefinitionName", s3KeyPrefixFormat(businessObjectFormat.getBusinessObjectDefinitionName())); context.put("businessObjectFormatUsage", s3KeyPrefixFormat(businessObjectFormat.getBusinessObjectFormatUsage())); context.put("businessObjectFormatFileType", s3KeyPrefixFormat(businessObjectFormat.getBusinessObjectFormatFileType())); context.put("businessObjectFormatVersion", s3KeyPrefixFormat(String.valueOf(businessObjectFormat.getBusinessObjectFormatVersion()))); context.put("businessObjectDataVersion", s3KeyPrefixFormat(String.valueOf(businessObjectDataKey.getBusinessObjectDataVersion()))); context.put("businessObjectFormatPartitionKey", s3KeyPrefixFormat(s3KeyPrefixFormat(businessObjectFormat.getPartitionKey()))); context.put("businessObjectDataPartitionValue", businessObjectDataKey.getPartitionValue()); // Build an ordered map of sub-partition column names to sub-partition values. Map<String, String> subPartitions = new LinkedHashMap<>(); if (!CollectionUtils.isEmpty(businessObjectDataKey.getSubPartitionValues())) { // Validate that business object format has a schema. Assert.notNull(businessObjectFormat.getSchema(), String.format( "Schema must be defined when using subpartition values for business object format {%s}.", businessObjectFormatHelper.businessObjectFormatKeyToString( businessObjectFormatHelper.getBusinessObjectFormatKey(businessObjectFormat)))); // Validate that business object format has a schema with partitions. Assert.notNull(businessObjectFormat.getSchema().getPartitions(), String.format( "Schema partition(s) must be defined when using subpartition values for business object " + "format {%s}.", businessObjectFormatHelper.businessObjectFormatKeyToString( businessObjectFormatHelper.getBusinessObjectFormatKey(businessObjectFormat)))); // Validate that we have sub-partition columns specified in the business object format schema. Assert.isTrue( businessObjectFormat.getSchema().getPartitions().size() > businessObjectDataKey .getSubPartitionValues().size(), String.format( "Number of subpartition values specified for the business object data is greater than or equal to " + "the number of partition columns defined in the schema for the associated business object format." + "Business object data: {%s}", businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey))); for (int i = 0; i < businessObjectDataKey.getSubPartitionValues().size(); i++) { subPartitions.put( s3KeyPrefixFormat(businessObjectFormat.getSchema().getPartitions().get(i + 1).getName()), businessObjectDataKey.getSubPartitionValues().get(i)); } } // Add the map of sub-partitions to the context. context.put("businessObjectDataSubPartitions", subPartitions); context.put("CollectionUtils", CollectionUtils.class); // Process the velocity template. String s3KeyPrefix = velocityHelper.evaluate(s3KeyPrefixVelocityTemplate, context, configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_KEY_PREFIX_VELOCITY_TEMPLATE)); // Validate that S3 key prefix is not blank. Assert.isTrue(StringUtils.isNotBlank(s3KeyPrefix), String.format( "S3 key prefix velocity template \"%s\" configured for \"%s\" storage results in an empty S3 key prefix.", s3KeyPrefixVelocityTemplate, storageName)); // Return the S3 key prefix. return s3KeyPrefix; }
From source file:org.finra.herd.service.helper.StorageUnitDaoHelper.java
/** * Retrieves a storage unit entity for the specified business object data storage unit key and makes sure it exists. * * @param businessObjectDataStorageUnitKey the business object data storage unit key * * @return the storage unit entity//from www. j a va 2 s. co m */ public StorageUnitEntity getStorageUnitEntityByKey( BusinessObjectDataStorageUnitKey businessObjectDataStorageUnitKey) { StorageUnitEntity storageUnitEntity = storageUnitDao.getStorageUnitByKey(businessObjectDataStorageUnitKey); if (storageUnitEntity == null) { throw new ObjectNotFoundException(String.format( "Business object data storage unit {namespace: \"%s\", businessObjectDefinitionName: \"%s\", businessObjectFormatUsage: \"%s\", " + "businessObjectFormatFileType: \"%s\", businessObjectFormatVersion: %d, businessObjectDataPartitionValue: \"%s\", " + "businessObjectDataSubPartitionValues: \"%s\", businessObjectDataVersion: %d, storageName: \"%s\"} doesn't exist.", businessObjectDataStorageUnitKey.getNamespace(), businessObjectDataStorageUnitKey.getBusinessObjectDefinitionName(), businessObjectDataStorageUnitKey.getBusinessObjectFormatUsage(), businessObjectDataStorageUnitKey.getBusinessObjectFormatFileType(), businessObjectDataStorageUnitKey.getBusinessObjectFormatVersion(), businessObjectDataStorageUnitKey.getPartitionValue(), CollectionUtils.isEmpty(businessObjectDataStorageUnitKey.getSubPartitionValues()) ? "" : StringUtils.join(businessObjectDataStorageUnitKey.getSubPartitionValues(), ","), businessObjectDataStorageUnitKey.getBusinessObjectDataVersion(), businessObjectDataStorageUnitKey.getStorageName())); } return storageUnitEntity; }
From source file:org.finra.herd.service.helper.StorageUnitHelper.java
/** * Creates a list of storage units from the list of storage unit entities. * * @param storageUnitEntities the storage unit entities. * @param includeStorageUnitStatusHistory specifies to include storage unit status history for each storage unit in the response * * @return the list of storage units./*from w w w . j a v a 2 s .com*/ */ public List<StorageUnit> createStorageUnitsFromEntities(Collection<StorageUnitEntity> storageUnitEntities, Boolean includeStorageUnitStatusHistory) { List<StorageUnit> storageUnits = new ArrayList<>(); for (StorageUnitEntity storageUnitEntity : storageUnitEntities) { StorageUnit storageUnit = new StorageUnit(); storageUnits.add(storageUnit); Storage storage = new Storage(); storageUnit.setStorage(storage); StorageEntity storageEntity = storageUnitEntity.getStorage(); storage.setName(storageEntity.getName()); storage.setStoragePlatformName(storageEntity.getStoragePlatform().getName()); // Add the storage attributes. if (!CollectionUtils.isEmpty(storageEntity.getAttributes())) { List<Attribute> storageAttributes = new ArrayList<>(); storage.setAttributes(storageAttributes); for (StorageAttributeEntity storageAttributeEntity : storageEntity.getAttributes()) { Attribute attribute = new Attribute(); storageAttributes.add(attribute); attribute.setName(storageAttributeEntity.getName()); attribute.setValue(storageAttributeEntity.getValue()); } } // Add the storage directory. if (storageUnitEntity.getDirectoryPath() != null) { StorageDirectory storageDirectory = new StorageDirectory(); storageUnit.setStorageDirectory(storageDirectory); storageDirectory.setDirectoryPath(storageUnitEntity.getDirectoryPath()); } // Add the storage files. if (!storageUnitEntity.getStorageFiles().isEmpty()) { List<StorageFile> storageFiles = new ArrayList<>(); storageUnit.setStorageFiles(storageFiles); for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) { storageFiles.add(storageFileHelper.createStorageFileFromEntity(storageFileEntity)); } } // Set the storage unit status. storageUnit.setStorageUnitStatus(storageUnitEntity.getStatus().getCode()); // If specified, add storage unit status history. if (BooleanUtils.isTrue(includeStorageUnitStatusHistory)) { List<StorageUnitStatusChangeEvent> storageUnitStatusChangeEvents = new ArrayList<>(); storageUnit.setStorageUnitStatusHistory(storageUnitStatusChangeEvents); for (StorageUnitStatusHistoryEntity storageUnitStatusHistoryEntity : storageUnitEntity .getHistoricalStatuses()) { storageUnitStatusChangeEvents.add( new StorageUnitStatusChangeEvent(storageUnitStatusHistoryEntity.getStatus().getCode(), HerdDateUtils.getXMLGregorianCalendarValue( storageUnitStatusHistoryEntity.getCreatedOn()), storageUnitStatusHistoryEntity.getCreatedBy())); } } // Set the number of failed attempts to execute a storage policy transition. storageUnit.setStoragePolicyTransitionFailedAttempts( storageUnitEntity.getStoragePolicyTransitionFailedAttempts()); if (storageUnitEntity.getRestoreExpirationOn() != null) { storageUnit.setRestoreExpirationOn( HerdDateUtils.getXMLGregorianCalendarValue(storageUnitEntity.getRestoreExpirationOn())); } } return storageUnits; }