List of usage examples for org.springframework.util Assert notEmpty
public static void notEmpty(@Nullable Map<?, ?> map, Supplier<String> messageSupplier)
From source file:org.finra.dm.service.impl.ExpectedPartitionValueServiceImpl.java
/** * Validate a list of expected partition values. This method also trims the expected partition values. * * @param expectedPartitionValues the list of expected partition values * * @return the validated and sorted list of expected partition values * @throws IllegalArgumentException if any validation errors were found *///from w w w . j ava 2s.c o m private List<String> validateExpectedPartitionValues(List<String> expectedPartitionValues) { Assert.notEmpty(expectedPartitionValues, "At least one expected partition value must be specified."); // Ensure the expected partition value isn't a duplicate by using a hash set. Set<String> validatedExpectedPartitionValuesSet = new LinkedHashSet<>(); for (String expectedPartitionValue : expectedPartitionValues) { Assert.hasText(expectedPartitionValue, "An expected partition value must be specified."); String trimmedExpectedPartitionValue = expectedPartitionValue.trim(); if (validatedExpectedPartitionValuesSet.contains(trimmedExpectedPartitionValue)) { throw new IllegalArgumentException(String.format("Duplicate expected partition value \"%s\" found.", trimmedExpectedPartitionValue)); } validatedExpectedPartitionValuesSet.add(trimmedExpectedPartitionValue); } List<String> validatedExpectedPartitionValues = new ArrayList<>(validatedExpectedPartitionValuesSet); // Sort the expected partition values list. Collections.sort(validatedExpectedPartitionValues); // Return the updated expected partition value list. return validatedExpectedPartitionValues; }
From source file:org.finra.herd.service.helper.BusinessObjectDataDaoHelper.java
/** * Builds a list of partition values from a "partition value range" partition value filter option. The list of partition values will come from the expected * partition values table for values within the specified range. The list will be ordered ascending. * * @param partitionValueRange the partition value range * @param businessObjectFormatEntity the business object format entity * * @return the unique and sorted partition value list */// w w w . j a va2 s .com private List<String> processPartitionValueRangeFilterOption(PartitionValueRange partitionValueRange, BusinessObjectFormatEntity businessObjectFormatEntity) { List<String> resultPartitionValues = new ArrayList<>(); Assert.notNull(businessObjectFormatEntity.getPartitionKeyGroup(), String.format( "A partition key group, which is required to use partition value ranges, is not specified for the business object format {%s}.", businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString(businessObjectFormatEntity))); List<ExpectedPartitionValueEntity> expectedPartitionValueEntities = expectedPartitionValueDao .getExpectedPartitionValuesByGroupAndRange( businessObjectFormatEntity.getPartitionKeyGroup().getPartitionKeyGroupName(), partitionValueRange); // Populate the partition values returned from the range query. for (ExpectedPartitionValueEntity expectedPartitionValueEntity : expectedPartitionValueEntities) { String partitionValue = expectedPartitionValueEntity.getPartitionValue(); // Validate that expected partition value does not match to one of the partition value tokens. Assert.isTrue( !partitionValue.equals(BusinessObjectDataService.MAX_PARTITION_VALUE_TOKEN) && !partitionValue.equals(BusinessObjectDataService.MIN_PARTITION_VALUE_TOKEN), "A partition value token cannot be specified as one of the expected partition values."); resultPartitionValues.add(partitionValue); } // Validate that our partition value range results in a non-empty partition value list. Assert.notEmpty(resultPartitionValues, String.format( "Partition value range [\"%s\", \"%s\"] contains no valid partition values in partition key group \"%s\". Business object format:{%s}", partitionValueRange.getStartPartitionValue(), partitionValueRange.getEndPartitionValue(), businessObjectFormatEntity.getPartitionKeyGroup().getPartitionKeyGroupName(), businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString(businessObjectFormatEntity))); return resultPartitionValues; }
From source file:org.finra.herd.service.helper.BusinessObjectDataHelper.java
/** * Validates a list of partition value filters or a standalone partition filter. This method makes sure that a partition value filter contains exactly one * partition value range or a non-empty partition value list. This method also makes sure that there is no more than one partition value range specified * across all partition value filters./* w w w. ja v a 2s . c o m*/ * * @param partitionValueFilters the list of partition value filters to validate * @param standalonePartitionValueFilter the standalone partition value filter to validate * @param allowPartitionValueTokens specifies whether the partition value filter is allowed to contain partition value tokens */ public void validatePartitionValueFilters(List<PartitionValueFilter> partitionValueFilters, PartitionValueFilter standalonePartitionValueFilter, boolean allowPartitionValueTokens) { // Make sure that request does not contain both a list of partition value filters and a standalone partition value filter. Assert.isTrue(partitionValueFilters == null || standalonePartitionValueFilter == null, "A list of partition value filters and a standalone partition value filter cannot be both specified."); List<PartitionValueFilter> partitionValueFiltersToValidate = new ArrayList<>(); if (partitionValueFilters != null) { partitionValueFiltersToValidate.addAll(partitionValueFilters); } if (standalonePartitionValueFilter != null) { partitionValueFiltersToValidate.add(standalonePartitionValueFilter); } // Make sure that at least one partition value filter is specified. Assert.notEmpty(partitionValueFiltersToValidate, "At least one partition value filter must be specified."); // Validate and trim partition value filters. int partitionValueRangesCount = 0; for (PartitionValueFilter partitionValueFilter : partitionValueFiltersToValidate) { // Partition key is required when request contains a partition value filter list. if (partitionValueFilters != null) { Assert.hasText(partitionValueFilter.getPartitionKey(), "A partition key must be specified."); } // Trim partition key value. if (StringUtils.isNotBlank(partitionValueFilter.getPartitionKey())) { partitionValueFilter.setPartitionKey(partitionValueFilter.getPartitionKey().trim()); } PartitionValueRange partitionValueRange = partitionValueFilter.getPartitionValueRange(); List<String> partitionValues = partitionValueFilter.getPartitionValues(); LatestBeforePartitionValue latestBeforePartitionValue = partitionValueFilter .getLatestBeforePartitionValue(); LatestAfterPartitionValue latestAfterPartitionValue = partitionValueFilter .getLatestAfterPartitionValue(); // Validate that we have exactly one partition filter option specified. List<Boolean> partitionFilterOptions = Arrays.asList(partitionValueRange != null, partitionValues != null, latestBeforePartitionValue != null, latestAfterPartitionValue != null); Assert.isTrue(Collections.frequency(partitionFilterOptions, Boolean.TRUE) == 1, "Exactly one partition value filter option must be specified."); if (partitionValueRange != null) { // A "partition value range" filter option is specified. // Only one partition value range is allowed across all partition value filters. partitionValueRangesCount++; Assert.isTrue(partitionValueRangesCount < 2, "Cannot specify more than one partition value range."); // Validate start partition value for the partition value range. Assert.hasText(partitionValueRange.getStartPartitionValue(), "A start partition value for the partition value range must be specified."); partitionValueRange.setStartPartitionValue(partitionValueRange.getStartPartitionValue().trim()); // Validate end partition value for the partition value range. Assert.hasText(partitionValueRange.getEndPartitionValue(), "An end partition value for the partition value range must be specified."); partitionValueRange.setEndPartitionValue(partitionValueRange.getEndPartitionValue().trim()); // Validate that partition value tokens are not specified as start and end partition values. // This check is required, regardless if partition value tokens are allowed or not. Assert.isTrue( !partitionValueRange.getStartPartitionValue() .equals(BusinessObjectDataService.MAX_PARTITION_VALUE_TOKEN) && !partitionValueRange.getStartPartitionValue() .equals(BusinessObjectDataService.MIN_PARTITION_VALUE_TOKEN) && !partitionValueRange.getEndPartitionValue() .equals(BusinessObjectDataService.MAX_PARTITION_VALUE_TOKEN) && !partitionValueRange.getEndPartitionValue() .equals(BusinessObjectDataService.MIN_PARTITION_VALUE_TOKEN), "A partition value token cannot be specified with a partition value range."); // Using string compare, validate that start partition value is less than or equal to end partition value. Assert.isTrue( partitionValueRange.getStartPartitionValue() .compareTo(partitionValueRange.getEndPartitionValue()) <= 0, String.format( "The start partition value \"%s\" cannot be greater than the end partition value \"%s\".", partitionValueRange.getStartPartitionValue(), partitionValueRange.getEndPartitionValue())); } else if (partitionValues != null) { // A "partition value list" filter option is specified. // Validate that the list contains at least one partition value. Assert.isTrue(!partitionValues.isEmpty(), "At least one partition value must be specified."); for (int i = 0; i < partitionValues.size(); i++) { String partitionValue = partitionValues.get(i); Assert.hasText(partitionValue, "A partition value must be specified."); partitionValue = partitionValue.trim(); // When partition value tokens are not allowed, validate that they are not specified as one of partition values. if (!allowPartitionValueTokens) { Assert.isTrue(!partitionValue.equals(BusinessObjectDataService.MAX_PARTITION_VALUE_TOKEN) && !partitionValue.equals(BusinessObjectDataService.MIN_PARTITION_VALUE_TOKEN), "A partition value token cannot be specified as one of partition values."); } partitionValues.set(i, partitionValue); } } else if (latestBeforePartitionValue != null) { // A "latest before partition value" filter option is specified. Assert.hasText(latestBeforePartitionValue.getPartitionValue(), "A partition value must be specified."); latestBeforePartitionValue.setPartitionValue(latestBeforePartitionValue.getPartitionValue().trim()); } else { // A "latest after partition value" filter option is specified. Assert.hasText(latestAfterPartitionValue.getPartitionValue(), "A partition value must be specified."); latestAfterPartitionValue.setPartitionValue(latestAfterPartitionValue.getPartitionValue().trim()); } } }
From source file:org.finra.herd.service.helper.EmrClusterDefinitionHelper.java
/** * Validates an EMR cluster definition configuration. * * @param emrClusterDefinition the EMR cluster definition configuration * * @throws IllegalArgumentException if any validation errors were found *//*ww w. j a v a 2 s .c om*/ public void validateEmrClusterDefinitionConfiguration(EmrClusterDefinition emrClusterDefinition) throws IllegalArgumentException { Assert.notNull(emrClusterDefinition, "An EMR cluster definition configuration must be specified."); Assert.isTrue(StringUtils.isNotBlank(emrClusterDefinition.getSubnetId()), "Subnet ID must be specified"); for (String token : emrClusterDefinition.getSubnetId().split(",")) { Assert.isTrue(StringUtils.isNotBlank(token), "No blank is allowed in the list of subnet IDs"); } Assert.isTrue( !emrHelper.isInstanceDefinitionsEmpty(emrClusterDefinition.getInstanceDefinitions()) || CollectionUtils.isNotEmpty(emrClusterDefinition.getInstanceFleets()), "Instance group definitions or instance fleets must be specified."); if (!emrHelper.isInstanceDefinitionsEmpty(emrClusterDefinition.getInstanceDefinitions())) { // Check master instances. Assert.notNull(emrClusterDefinition.getInstanceDefinitions().getMasterInstances(), "Master instances must be specified."); validateMasterInstanceDefinition(emrClusterDefinition.getInstanceDefinitions().getMasterInstances()); // Check core instances. if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances() != null) { validateInstanceDefinition("core", emrClusterDefinition.getInstanceDefinitions().getCoreInstances(), 0); // If instance count is <= 0, remove the entire core instance definition since it is redundant. if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances().getInstanceCount() <= 0) { emrClusterDefinition.getInstanceDefinitions().setCoreInstances(null); } } // Check task instances if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { validateInstanceDefinition("task", emrClusterDefinition.getInstanceDefinitions().getTaskInstances(), 1); } // Check that total number of instances does not exceed the max allowed. int maxEmrInstanceCount = configurationHelper.getProperty(ConfigurationValue.MAX_EMR_INSTANCES_COUNT, Integer.class); if (maxEmrInstanceCount > 0) { int instancesRequested = emrClusterDefinition.getInstanceDefinitions().getMasterInstances() .getInstanceCount(); if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances() != null) { instancesRequested += emrClusterDefinition.getInstanceDefinitions().getCoreInstances() .getInstanceCount(); } if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { instancesRequested += emrClusterDefinition.getInstanceDefinitions().getTaskInstances() .getInstanceCount(); } Assert.isTrue((maxEmrInstanceCount >= instancesRequested), "Total number of instances requested can not exceed : " + maxEmrInstanceCount); } } // Validate node tags including checking for required tags and detecting any duplicate node tag names in case sensitive manner. Assert.notEmpty(emrClusterDefinition.getNodeTags(), "Node tags must be specified."); HashSet<String> nodeTagNameValidationSet = new HashSet<>(); for (NodeTag nodeTag : emrClusterDefinition.getNodeTags()) { Assert.hasText(nodeTag.getTagName(), "A node tag name must be specified."); Assert.hasText(nodeTag.getTagValue(), "A node tag value must be specified."); Assert.isTrue(!nodeTagNameValidationSet.contains(nodeTag.getTagName()), String.format("Duplicate node tag \"%s\" is found.", nodeTag.getTagName())); nodeTagNameValidationSet.add(nodeTag.getTagName()); } // Validate the mandatory AWS tags are there for (String mandatoryTag : herdStringHelper.splitStringWithDefaultDelimiter( configurationHelper.getProperty(ConfigurationValue.MANDATORY_AWS_TAGS))) { Assert.isTrue(nodeTagNameValidationSet.contains(mandatoryTag), String.format("Mandatory AWS tag not specified: \"%s\"", mandatoryTag)); } emrClusterDefinition.setAdditionalMasterSecurityGroups(assertNotBlankAndTrim( emrClusterDefinition.getAdditionalMasterSecurityGroups(), "additionalMasterSecurityGroup")); emrClusterDefinition.setAdditionalSlaveSecurityGroups(assertNotBlankAndTrim( emrClusterDefinition.getAdditionalSlaveSecurityGroups(), "additionalSlaveSecurityGroup")); // Fail if security configuration is specified for EMR version less than 4.8.0. if (StringUtils.isNotBlank(emrClusterDefinition.getSecurityConfiguration())) { final DefaultArtifactVersion securityConfigurationMinEmrVersion = new DefaultArtifactVersion("4.8.0"); Assert.isTrue( StringUtils.isNotBlank(emrClusterDefinition.getReleaseLabel()) && securityConfigurationMinEmrVersion.compareTo(new DefaultArtifactVersion( emrClusterDefinition.getReleaseLabel().replaceFirst("^(emr-)", ""))) <= 0, "EMR security configuration is not supported prior to EMR release 4.8.0."); } }
From source file:org.finra.herd.service.helper.HerdHelper.java
/** * Validates an EMR cluster definition configuration. * * @param emrClusterDefinition the EMR cluster definition configuration * * @throws IllegalArgumentException if any validation errors were found *//* w ww . j a va 2 s . co m*/ public void validateEmrClusterDefinitionConfiguration(EmrClusterDefinition emrClusterDefinition) throws IllegalArgumentException { Assert.notNull(emrClusterDefinition, "An EMR cluster definition configuration must be specified."); Assert.isTrue(StringUtils.isNotBlank(emrClusterDefinition.getSubnetId()), "Subnet ID must be specified"); for (String token : emrClusterDefinition.getSubnetId().split(",")) { Assert.isTrue(StringUtils.isNotBlank(token), "No blank is allowed in the list of subnet IDs"); } Assert.notNull(emrClusterDefinition.getInstanceDefinitions(), "Instance definitions must be specified."); // Check master instances. Assert.notNull(emrClusterDefinition.getInstanceDefinitions().getMasterInstances(), "Master instances must be specified."); validateMasterInstanceDefinition(emrClusterDefinition.getInstanceDefinitions().getMasterInstances()); // Check core instances. Assert.notNull(emrClusterDefinition.getInstanceDefinitions().getCoreInstances(), "Core instances must be specified."); validateInstanceDefinition("core", emrClusterDefinition.getInstanceDefinitions().getCoreInstances()); // Check task instances if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { validateInstanceDefinition("task", emrClusterDefinition.getInstanceDefinitions().getTaskInstances()); } // Check that total number of instances does not exceed the max allowed. int maxEmrInstanceCount = configurationHelper.getProperty(ConfigurationValue.MAX_EMR_INSTANCES_COUNT, Integer.class); if (maxEmrInstanceCount > 0) { int instancesRequested = emrClusterDefinition.getInstanceDefinitions().getMasterInstances() .getInstanceCount() + emrClusterDefinition.getInstanceDefinitions().getCoreInstances().getInstanceCount(); if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { instancesRequested += emrClusterDefinition.getInstanceDefinitions().getTaskInstances() .getInstanceCount(); } Assert.isTrue((maxEmrInstanceCount >= instancesRequested), "Total number of instances requested can not exceed : " + maxEmrInstanceCount); } // Validate node tags including checking for required tags and detecting any duplicate node tag names in case sensitive manner. Assert.notEmpty(emrClusterDefinition.getNodeTags(), "Node tags must be specified."); HashSet<String> nodeTagNameValidationSet = new HashSet<>(); for (NodeTag nodeTag : emrClusterDefinition.getNodeTags()) { Assert.hasText(nodeTag.getTagName(), "A node tag name must be specified."); Assert.hasText(nodeTag.getTagValue(), "A node tag value must be specified."); Assert.isTrue(!nodeTagNameValidationSet.contains(nodeTag.getTagName()), String.format("Duplicate node tag \"%s\" is found.", nodeTag.getTagName())); nodeTagNameValidationSet.add(nodeTag.getTagName()); } // Validate the mandatory AWS tags are there for (String mandatoryTag : herdStringHelper.splitStringWithDefaultDelimiter( configurationHelper.getProperty(ConfigurationValue.MANDATORY_AWS_TAGS))) { Assert.isTrue(nodeTagNameValidationSet.contains(mandatoryTag), String.format("Mandatory AWS tag not specified: \"%s\"", mandatoryTag)); } }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Asserts that there exists at least one column specified in the business object format schema. * * @param businessObjectFormat The {@link BusinessObjectFormat} containing schema columns. * @param businessObjectFormatEntity The entity used to generate the error message. *///from w ww .ja va 2 s . c o m private void assertSchemaColumnsNotEmpty(BusinessObjectFormat businessObjectFormat, BusinessObjectFormatEntity businessObjectFormatEntity) { Assert.notEmpty(businessObjectFormat.getSchema().getColumns(), String.format( "No schema columns specified for business object format {%s}.", businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString(businessObjectFormatEntity))); }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Generates and append to the string builder the create table Hive 13 DDL as per specified parameters. */// w w w.j a v a 2 s . co m private String generateCreateTableDdlHelper(GenerateDdlRequest generateDdlRequest) { // TODO: We might want to consider using a template engine such as Velocity to generate this DDL so we don't wind up just doing string manipulation. StringBuilder sb = new StringBuilder(); // For custom DDL, we would need to substitute the custom DDL tokens with their relative values. HashMap<String, String> replacements = new HashMap<>(); // Validate that partition values passed in the list of partition filters do not contain '/' character. if (generateDdlRequest.isPartitioned && !CollectionUtils.isEmpty(generateDdlRequest.partitionFilters)) { // Validate that partition values do not contain '/' characters. for (List<String> partitionFilter : generateDdlRequest.partitionFilters) { for (String partitionValue : partitionFilter) { Assert.doesNotContain(partitionValue, "/", String .format("Partition value \"%s\" can not contain a '/' character.", partitionValue)); } } } // Get business object format model object to directly access schema columns and partitions. BusinessObjectFormat businessObjectFormat = businessObjectFormatHelper .createBusinessObjectFormatFromEntity(generateDdlRequest.businessObjectFormatEntity); // Validate that we have at least one column specified in the business object format schema. assertSchemaColumnsNotEmpty(businessObjectFormat, generateDdlRequest.businessObjectFormatEntity); if (generateDdlRequest.isPartitioned) { // Validate that we have at least one partition column specified in the business object format schema. Assert.notEmpty(businessObjectFormat.getSchema().getPartitions(), String.format("No schema partitions specified for business object format {%s}.", businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString( generateDdlRequest.businessObjectFormatEntity))); // Validate that partition column names do not contain '/' characters. for (SchemaColumn partitionColumn : businessObjectFormat.getSchema().getPartitions()) { Assert.doesNotContain(partitionColumn.getName(), "/", String.format( "Partition column name \"%s\" can not contain a '/' character. Business object format: {%s}", partitionColumn.getName(), businessObjectFormatHelper.businessObjectFormatEntityAltKeyToString( generateDdlRequest.businessObjectFormatEntity))); } } // Add drop table if requested. if (BooleanUtils.isTrue(generateDdlRequest.includeDropTableStatement)) { sb.append(String.format("DROP TABLE IF EXISTS `%s`;\n\n", generateDdlRequest.tableName)); } // Depending on the flag, prepare "if not exists" option text or leave it an empty string. String ifNotExistsOption = BooleanUtils.isTrue(generateDdlRequest.includeIfNotExistsOption) ? "IF NOT EXISTS " : ""; // Only generate the create table DDL statement, if custom DDL was not specified. if (generateDdlRequest.customDdlEntity == null) { generateStandardBaseDdl(generateDdlRequest, sb, businessObjectFormat, ifNotExistsOption); } else { // Use the custom DDL in place of the create table statement. sb.append(String.format("%s\n\n", generateDdlRequest.customDdlEntity.getDdl())); // We need to substitute the relative custom DDL token with an actual table name. replacements.put(TABLE_NAME_CUSTOM_DDL_TOKEN, generateDdlRequest.tableName); } // Add alter table statements only if the list of partition filters is not empty - this is applicable to generating DDL for business object data only. if (!CollectionUtils.isEmpty(generateDdlRequest.partitionFilters)) { processPartitionFiltersForGenerateDdl(generateDdlRequest, sb, replacements, businessObjectFormat, ifNotExistsOption); } // Add a location statement with a token if this is format dll that does not use custom ddl. else if (!generateDdlRequest.isPartitioned && generateDdlRequest.customDdlEntity == null) { // Since custom DDL is not specified, there are no partition values, and this table is not partitioned, add a LOCATION clause with a token. sb.append(String.format("LOCATION '%s';", NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN)); } // Trim to remove unnecessary end-of-line characters, if any, from the end of the generated DDL. String resultDdl = sb.toString().trim(); // For custom DDL, substitute the relative custom DDL tokens with their values. if (generateDdlRequest.customDdlEntity != null) { for (Map.Entry<String, String> entry : replacements.entrySet()) { String token = entry.getKey(); String value = entry.getValue(); resultDdl = resultDdl.replaceAll(Pattern.quote(token), value); } } return resultDdl; }
From source file:org.finra.herd.service.impl.BusinessObjectDataServiceImpl.java
/** * Retrieves the DDL to initialize the specified type of the database system to perform queries for a range of requested business object data in the * specified storage./*from w ww.j av a 2 s . c o m*/ * * @param request the business object data DDL request * @param skipRequestValidation specifies whether to skip the request validation and trimming * * @return the business object data DDL information */ BusinessObjectDataDdl generateBusinessObjectDataDdlImpl(BusinessObjectDataDdlRequest request, boolean skipRequestValidation) { // Perform the validation. if (!skipRequestValidation) { validateBusinessObjectDataDdlRequest(request); } // Get the business object format entity for the specified parameters and make sure it exists. // Please note that when format version is not specified, we should get back the latest format version. BusinessObjectFormatEntity businessObjectFormatEntity = businessObjectFormatDaoHelper .getBusinessObjectFormatEntity(new BusinessObjectFormatKey(request.getNamespace(), request.getBusinessObjectDefinitionName(), request.getBusinessObjectFormatUsage(), request.getBusinessObjectFormatFileType(), request.getBusinessObjectFormatVersion())); // Validate that format has schema information. Assert.notEmpty(businessObjectFormatEntity.getSchemaColumns(), String.format( "Business object format with namespace \"%s\", business object definition name \"%s\", format usage \"%s\", format file type \"%s\"," + " and format version \"%s\" doesn't have schema information.", businessObjectFormatEntity.getBusinessObjectDefinition().getNamespace().getCode(), businessObjectFormatEntity.getBusinessObjectDefinition().getName(), businessObjectFormatEntity.getUsage(), businessObjectFormatEntity.getFileType().getCode(), businessObjectFormatEntity.getBusinessObjectFormatVersion())); // If it was specified, retrieve the custom DDL and ensure it exists. CustomDdlEntity customDdlEntity = null; if (StringUtils.isNotBlank(request.getCustomDdlName())) { CustomDdlKey customDdlKey = new CustomDdlKey( businessObjectFormatEntity.getBusinessObjectDefinition().getNamespace().getCode(), businessObjectFormatEntity.getBusinessObjectDefinition().getName(), businessObjectFormatEntity.getUsage(), businessObjectFormatEntity.getFileType().getCode(), businessObjectFormatEntity.getBusinessObjectFormatVersion(), request.getCustomDdlName()); customDdlEntity = customDdlDaoHelper.getCustomDdlEntity(customDdlKey); } // Build a list of storage names specified in the request. List<String> storageNames = new ArrayList<>(); if (StringUtils.isNotBlank(request.getStorageName())) { storageNames.add(request.getStorageName()); } if (!CollectionUtils.isEmpty(request.getStorageNames())) { storageNames.addAll(request.getStorageNames()); } // Validate that storage entities, specified in the request, exist, of a proper storage platform type, and have S3 bucket name configured. Map<String, StorageEntity> storageEntities = new HashMap<>(); Map<String, String> s3BucketNames = new HashMap<>(); for (String storageName : storageNames) { StorageEntity storageEntity = storageDaoHelper.getStorageEntity(storageName); // Only S3 storage platform is currently supported. Assert.isTrue(storageEntity.getStoragePlatform().getName().equals(StoragePlatformEntity.S3), String.format("Cannot generate DDL for \"%s\" storage platform.", storageEntity.getStoragePlatform().getName())); // Validate that storage have S3 bucket name configured. Please note that since S3 bucket name attribute value is required we pass a "true" flag. String s3BucketName = storageHelper.getStorageAttributeValueByName( configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME), storageEntity, true); // Memorize retrieved values for faster processing. String upperCaseStorageName = storageName.toUpperCase(); storageEntities.put(upperCaseStorageName, storageEntity); s3BucketNames.put(upperCaseStorageName, s3BucketName); } // Create and initialize a business object data DDL object instance. BusinessObjectDataDdl businessObjectDataDdl = createBusinessObjectDataDdl(request); businessObjectDataDdl.setDdl( ddlGeneratorFactory.getDdlGenerator(request.getOutputFormat()).generateCreateTableDdl(request, businessObjectFormatEntity, customDdlEntity, storageNames, storageEntities, s3BucketNames)); return businessObjectDataDdl; }
From source file:org.finra.herd.service.impl.BusinessObjectDataStorageFileServiceImpl.java
/** * Discovers new storage files in S3 for the specified storage unit. * * @param storageUnitEntity the storage unit entity * * @return the list of discovered storage files *//* ww w .j a v a 2 s . c om*/ private List<StorageFile> discoverStorageFiles(StorageUnitEntity storageUnitEntity) { // Retrieve all storage files already registered for this storage unit loaded in a map for easy access. Map<String, StorageFileEntity> storageFileEntities = storageFileHelper .getStorageFileEntitiesMap(storageUnitEntity.getStorageFiles()); // Validate and get storage directory path from the storage unit. Assert.hasText(storageUnitEntity.getDirectoryPath(), "Business object data has no storage directory path which is required for auto-discovery of storage files."); String directoryPath = storageUnitEntity.getDirectoryPath(); // Add a trailing slash to the storage directory path if it doesn't already have it. String directoryPathWithTrailingSlash = StringUtils.appendIfMissing(directoryPath, "/"); // Retrieve all already registered storage files from the storage that start with the directory path. List<String> registeredStorageFilePaths = storageFileDao.getStorageFilesByStorageAndFilePathPrefix( storageUnitEntity.getStorage().getName(), directoryPathWithTrailingSlash); // Sanity check already registered storage files. if (storageFileEntities.size() != registeredStorageFilePaths.size()) { throw new IllegalArgumentException(String.format( "Number of storage files (%d) already registered for the business object data in \"%s\" storage is not equal to " + "the number of registered storage files (%d) matching \"%s\" S3 key prefix in the same storage.", storageFileEntities.size(), storageUnitEntity.getStorage().getName(), registeredStorageFilePaths.size(), directoryPathWithTrailingSlash)); } // Get S3 bucket access parameters and set the key prefix to the directory path with a trailing slash. // Please note that since we got here, the directory path can not be empty. S3FileTransferRequestParamsDto params = storageHelper .getS3BucketAccessParams(storageUnitEntity.getStorage()); params.setS3KeyPrefix(directoryPathWithTrailingSlash); // List S3 files ignoring 0 byte objects that represent S3 directories. // Please note that the map implementation returned by the helper method below // preserves the original order of files as returned by the S3 list command. Map<String, StorageFile> actualS3Keys = storageFileHelper .getStorageFilesMapFromS3ObjectSummaries(s3Service.listDirectory(params, true)); // For the already registered storage files, validate file existence and file size against S3 keys and metadata reported by S3. for (Map.Entry<String, StorageFileEntity> entry : storageFileEntities.entrySet()) { storageFileHelper.validateStorageFileEntity(entry.getValue(), params.getS3BucketName(), actualS3Keys, true); } // Remove all already registered storage files from the map of actual S3 keys. actualS3Keys.keySet().removeAll(storageFileEntities.keySet()); // Validate that we have at least one unregistered storage file discovered in S3. Assert.notEmpty(actualS3Keys.keySet(), String.format("No unregistered storage files were discovered at s3://%s/%s location.", params.getS3BucketName(), directoryPathWithTrailingSlash)); // Build and return a list of storage files. return new ArrayList<>(actualS3Keys.values()); }
From source file:org.finra.herd.service.impl.BusinessObjectDataStorageFileServiceImpl.java
/** * Validates the given request without using any external dependencies (ex. DB). Throws appropriate exceptions when a validation error exists. * * @param businessObjectDataStorageFilesCreateRequest - request to validate *//*from ww w . j a v a 2s . co m*/ private void validateBusinessObjectDataStorageFilesCreateRequest( BusinessObjectDataStorageFilesCreateRequest businessObjectDataStorageFilesCreateRequest) { Assert.hasText(businessObjectDataStorageFilesCreateRequest.getNamespace(), "A namespace must be specified."); businessObjectDataStorageFilesCreateRequest .setNamespace(businessObjectDataStorageFilesCreateRequest.getNamespace().trim()); Assert.hasText(businessObjectDataStorageFilesCreateRequest.getBusinessObjectDefinitionName(), "A business object definition name must be specified."); businessObjectDataStorageFilesCreateRequest.setBusinessObjectDefinitionName( businessObjectDataStorageFilesCreateRequest.getBusinessObjectDefinitionName().trim()); Assert.hasText(businessObjectDataStorageFilesCreateRequest.getBusinessObjectFormatUsage(), "A business object format usage must be specified."); businessObjectDataStorageFilesCreateRequest.setBusinessObjectFormatUsage( businessObjectDataStorageFilesCreateRequest.getBusinessObjectFormatUsage().trim()); Assert.hasText(businessObjectDataStorageFilesCreateRequest.getBusinessObjectFormatFileType(), "A business object format file type must be specified."); businessObjectDataStorageFilesCreateRequest.setBusinessObjectFormatFileType( businessObjectDataStorageFilesCreateRequest.getBusinessObjectFormatFileType().trim()); Assert.notNull(businessObjectDataStorageFilesCreateRequest.getBusinessObjectFormatVersion(), "A business object format version must be specified."); Assert.hasText(businessObjectDataStorageFilesCreateRequest.getPartitionValue(), "A partition value must be specified."); businessObjectDataStorageFilesCreateRequest .setPartitionValue(businessObjectDataStorageFilesCreateRequest.getPartitionValue().trim()); int subPartitionValuesCount = CollectionUtils .size(businessObjectDataStorageFilesCreateRequest.getSubPartitionValues()); Assert.isTrue(subPartitionValuesCount <= BusinessObjectDataEntity.MAX_SUBPARTITIONS, String.format("Exceeded maximum number of allowed subpartitions: %d.", BusinessObjectDataEntity.MAX_SUBPARTITIONS)); for (int i = 0; i < subPartitionValuesCount; i++) { Assert.hasText(businessObjectDataStorageFilesCreateRequest.getSubPartitionValues().get(i), "A subpartition value must be specified."); businessObjectDataStorageFilesCreateRequest.getSubPartitionValues().set(i, businessObjectDataStorageFilesCreateRequest.getSubPartitionValues().get(i).trim()); } Assert.notNull(businessObjectDataStorageFilesCreateRequest.getBusinessObjectDataVersion(), "A business object data version must be specified."); Assert.hasText(businessObjectDataStorageFilesCreateRequest.getStorageName(), "A storage name must be specified."); businessObjectDataStorageFilesCreateRequest .setStorageName(businessObjectDataStorageFilesCreateRequest.getStorageName().trim()); if (BooleanUtils.isTrue(businessObjectDataStorageFilesCreateRequest.isDiscoverStorageFiles())) { // The auto-discovery of storage files is enabled, thus storage files can not be specified. Assert.isTrue(CollectionUtils.isEmpty(businessObjectDataStorageFilesCreateRequest.getStorageFiles()), "Storage files cannot be specified when discovery of storage files is enabled."); } else { // Since auto-discovery is disabled, at least one storage file must be specified. Assert.notEmpty(businessObjectDataStorageFilesCreateRequest.getStorageFiles(), "At least one storage file must be specified when discovery of storage files is not enabled."); // Validate a list of storage files. storageFileHelper.validateCreateRequestStorageFiles( businessObjectDataStorageFilesCreateRequest.getStorageFiles()); } }