Example usage for org.apache.commons.collections4 MultiValuedMap get

List of usage examples for org.apache.commons.collections4 MultiValuedMap get

Introduction

In this page you can find the example usage for org.apache.commons.collections4 MultiValuedMap get.

Prototype

Collection<V> get(K key);

Source Link

Document

Returns a view collection of the values associated with the specified key.

Usage

From source file:com.evolveum.midpoint.task.quartzimpl.work.workers.WorkersManager.java

/**
 * Going through the groups and renaming wrongly-named tasks to the correct names.
 *//*w w  w.  j a  v a  2s .  c o m*/
private int renameWorkers(List<Task> currentWorkers, MultiValuedMap<String, WorkerKey> shouldBeWorkers,
        OperationResult result) throws SchemaException, ObjectNotFoundException, ObjectAlreadyExistsException {
    int count = 0;
    for (String shouldBeGroup : shouldBeWorkers.keySet()) {
        Collection<WorkerKey> shouldBeWorkersInGroup = shouldBeWorkers.get(shouldBeGroup);
        for (Task currentWorker : new ArrayList<>(currentWorkers)) {
            if (Objects.equals(shouldBeGroup, currentWorker.getGroup())) {
                if (!shouldBeWorkersInGroup.isEmpty()) {
                    WorkerKey nextWorker = shouldBeWorkersInGroup.iterator().next();
                    renameWorker(currentWorker, nextWorker.name, result);
                    currentWorkers.remove(currentWorker);
                    shouldBeWorkersInGroup.remove(nextWorker);
                    count++;
                } else {
                    break; // no more workers for this group
                }
            }
        }
    }
    LOGGER.trace("After renameWorkers (result: {}):\nCurrent workers: {}\nShould be workers: {}", count,
            currentWorkers, shouldBeWorkers);
    return count;
}

From source file:com.evolveum.midpoint.prism.schema.SchemaRegistryImpl.java

private void parsePrismSchemas(List<SchemaDescription> schemaDescriptions, boolean allowDelayedItemDefinitions)
        throws SchemaException {
    List<SchemaDescription> prismSchemaDescriptions = schemaDescriptions.stream()
            .filter(sd -> sd.isPrismSchema()).collect(Collectors.toList());
    Element schemaElement = DOMUtil.createElement(DOMUtil.XSD_SCHEMA_ELEMENT);
    schemaElement.setAttribute("targetNamespace", "http://dummy/");
    schemaElement.setAttribute("elementFormDefault", "qualified");

    // These fragmented namespaces should not be included in wrapper XSD because they are defined in multiple XSD files.
    // We have to process them one by one.
    MultiValuedMap<String, SchemaDescription> schemasByNamespace = new ArrayListValuedHashMap<>();
    prismSchemaDescriptions.forEach(sd -> schemasByNamespace.put(sd.getNamespace(), sd));
    List<String> fragmentedNamespaces = schemasByNamespace.keySet().stream()
            .filter(ns -> schemasByNamespace.get(ns).size() > 1).collect(Collectors.toList());
    LOGGER.trace("Fragmented namespaces: {}", fragmentedNamespaces);

    List<SchemaDescription> wrappedDescriptions = new ArrayList<>();
    for (SchemaDescription description : prismSchemaDescriptions) {
        String namespace = description.getNamespace();
        if (!fragmentedNamespaces.contains(namespace)) {
            Element importElement = DOMUtil.createSubElement(schemaElement, DOMUtil.XSD_IMPORT_ELEMENT);
            importElement.setAttribute(DOMUtil.XSD_ATTR_NAMESPACE.getLocalPart(), namespace);
            description.setSchema(new PrismSchemaImpl(prismContext));
            wrappedDescriptions.add(description);
        }//from  w w w  .  jav a2  s  .  c  om
    }
    if (LOGGER.isTraceEnabled()) {
        String xml = DOMUtil.serializeDOMToString(schemaElement);
        LOGGER.trace("Wrapper XSD:\n{}", xml);
    }

    long started = System.currentTimeMillis();
    LOGGER.trace("Parsing {} schemas wrapped in single XSD", wrappedDescriptions.size());
    PrismSchemaImpl.parseSchemas(schemaElement, entityResolver, wrappedDescriptions,
            allowDelayedItemDefinitions, getPrismContext());
    LOGGER.trace("Parsed {} schemas in {} ms", wrappedDescriptions.size(),
            System.currentTimeMillis() - started);

    for (SchemaDescription description : wrappedDescriptions) {
        detectExtensionSchema(description.getSchema());
    }

    for (String namespace : fragmentedNamespaces) {
        Collection<SchemaDescription> fragments = schemasByNamespace.get(namespace);
        LOGGER.trace("Parsing {} schemas for fragmented namespace {}", fragments.size(), namespace);
        for (SchemaDescription schemaDescription : fragments) {
            parsePrismSchema(schemaDescription, allowDelayedItemDefinitions);
        }
    }
}

From source file:org.finra.herd.dao.HerdDaoTest.java

@Test
public void testGetStoragePathsByStorageUnits() throws Exception {
    // Override configuration.
    Map<String, Object> overrideMap = new HashMap<>();
    overrideMap.put(ConfigurationValue.STORAGE_FILE_PATHS_QUERY_PAGINATION_SIZE.getKey(),
            LOCAL_FILES.size() / 2);/*from w w  w.  j  a  v a 2  s . co  m*/
    modifyPropertySourceInEnvironment(overrideMap);

    try {
        // Create database entities required for testing.
        StorageUnitEntity storageUnitEntity = createStorageUnitEntity(STORAGE_NAME, NAMESPACE_CD, BOD_NAME,
                FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, INITIAL_FORMAT_VERSION, PARTITION_VALUE,
                SUBPARTITION_VALUES, INITIAL_DATA_VERSION, true, BDATA_STATUS, STORAGE_UNIT_STATUS,
                NO_STORAGE_DIRECTORY_PATH);
        for (String file : LOCAL_FILES) {
            createStorageFileEntity(storageUnitEntity, file, FILE_SIZE_1_KB, ROW_COUNT_1000);
        }

        // Retrieve storage file paths by storage units.
        MultiValuedMap<Integer, String> result = herdDao
                .getStorageFilePathsByStorageUnits(Arrays.asList(storageUnitEntity));

        // Validate the results.
        assertEquals(LOCAL_FILES.size(), result.get(storageUnitEntity.getId()).size());
    } finally {
        // Restore the property sources so we don't affect other tests.
        restorePropertySourceInEnvironment();
    }
}

From source file:org.finra.herd.dao.StorageFileDaoTest.java

@Test
public void testGetStoragePathsByStorageUnitIds() throws Exception {
    // Override configuration.
    Map<String, Object> overrideMap = new HashMap<>();
    overrideMap.put(ConfigurationValue.STORAGE_FILE_PATHS_QUERY_PAGINATION_SIZE.getKey(),
            LOCAL_FILES.size() / 2);/*w  w  w. j  ava 2s . co m*/
    modifyPropertySourceInEnvironment(overrideMap);

    try {
        // Create database entities required for testing.
        StorageUnitEntity storageUnitEntity = storageUnitDaoTestHelper.createStorageUnitEntity(STORAGE_NAME,
                NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, INITIAL_FORMAT_VERSION,
                PARTITION_VALUE, SUBPARTITION_VALUES, INITIAL_DATA_VERSION, true, BDATA_STATUS,
                STORAGE_UNIT_STATUS, NO_STORAGE_DIRECTORY_PATH);
        for (String file : LOCAL_FILES) {
            storageFileDaoTestHelper.createStorageFileEntity(storageUnitEntity, file, FILE_SIZE_1_KB,
                    ROW_COUNT_1000);
        }

        // Retrieve storage file paths by storage unit ids.
        MultiValuedMap<Integer, String> result = storageFileDao
                .getStorageFilePathsByStorageUnitIds(Lists.newArrayList(storageUnitEntity.getId()));

        // Validate the results.
        assertEquals(LOCAL_FILES.size(), result.get(storageUnitEntity.getId()).size());
    } finally {
        // Restore the property sources so we don't affect other tests.
        restorePropertySourceInEnvironment();
    }
}

From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java

/**
 * Adds the relative "alter table add partition" statements for each storage unit entity. Please note that each request partition value might result in
 * multiple available storage unit entities (subpartitions).
 *
 * @param sb the string builder to be updated with the "alter table add partition" statements
 * @param replacements the hash map of string values to be used to substitute the custom DDL tokens with their actual values
 * @param businessObjectFormatForSchema the business object format to be used for schema
 * @param ifNotExistsOption specifies if generated DDL contains "if not exists" option
 * @param storageUnitAvailabilityDtos the list of storage unit availability DTOs
 *//*from w  ww  . j  ava 2 s . c  o  m*/
private void processStorageUnitsForGenerateDdl(GenerateDdlRequest generateDdlRequest, StringBuilder sb,
        HashMap<String, String> replacements, BusinessObjectFormat businessObjectFormatForSchema,
        String ifNotExistsOption, List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos) {
    // If flag is not set to suppress scan for unregistered sub-partitions, retrieve all storage
    // file paths for the relative storage units loaded in a multi-valued map for easy access.
    MultiValuedMap<Integer, String> storageUnitIdToStorageFilePathsMap = BooleanUtils.isTrue(
            generateDdlRequest.suppressScanForUnregisteredSubPartitions) ? new ArrayListValuedHashMap<>()
                    : storageFileDao.getStorageFilePathsByStorageUnitIds(
                            storageUnitHelper.getStorageUnitIds(storageUnitAvailabilityDtos));

    // Crete a map of storage names in upper case to their relative S3 key prefix velocity templates.
    Map<String, String> s3KeyPrefixVelocityTemplates = new HashMap<>();

    // Crete a map of business object format keys to their relative business object format instances.
    Map<BusinessObjectFormatKey, BusinessObjectFormat> businessObjectFormats = new HashMap<>();

    // Get data provider for the business object definition.
    BusinessObjectDefinitionEntity businessObjectDefinitionEntity = businessObjectDefinitionDaoHelper
            .getBusinessObjectDefinitionEntity(
                    new BusinessObjectDefinitionKey(businessObjectFormatForSchema.getNamespace(),
                            businessObjectFormatForSchema.getBusinessObjectDefinitionName()));
    String dataProviderName = businessObjectDefinitionEntity.getDataProvider().getName();

    // Generate the beginning of the alter table statement.
    String alterTableFirstToken = String
            .format("ALTER TABLE `%s` ADD %s", generateDdlRequest.tableName, ifNotExistsOption).trim();

    // Process all available business object data instances.
    List<String> addPartitionStatements = new ArrayList<>();
    for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) {
        // Get storage name in upper case for this storage unit.
        String upperCaseStorageName = storageUnitAvailabilityDto.getStorageName().toUpperCase();

        // Get storage entity for this storage unit.
        StorageEntity storageEntity = getStorageEntity(upperCaseStorageName,
                generateDdlRequest.storageEntities);

        // Get business object data key for this business object data.
        BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey();

        // Get business object format key for this business object data.
        BusinessObjectFormatKey businessObjectFormatKey = businessObjectFormatHelper
                .getBusinessObjectFormatKey(businessObjectDataKey);

        // Retrieve s3 key prefix velocity template for this storage.
        String s3KeyPrefixVelocityTemplate = getS3KeyPrefixVelocityTemplate(upperCaseStorageName, storageEntity,
                s3KeyPrefixVelocityTemplates);

        // Retrieve business object format for this business object data.
        BusinessObjectFormat businessObjectFormat = getBusinessObjectFormat(businessObjectFormatKey,
                businessObjectFormats);

        // Build the expected S3 key prefix for this storage unit.
        String s3KeyPrefix = s3KeyPrefixHelper.buildS3KeyPrefix(s3KeyPrefixVelocityTemplate, dataProviderName,
                businessObjectFormat, businessObjectDataKey, storageUnitAvailabilityDto.getStorageName());

        // If flag is set to suppress scan for unregistered sub-partitions, use the directory path or the S3 key prefix
        // as the partition's location, otherwise, use storage files to discover all unregistered sub-partitions.
        Collection<String> storageFilePaths = new ArrayList<>();
        if (BooleanUtils.isTrue(generateDdlRequest.suppressScanForUnregisteredSubPartitions)) {
            // Validate the directory path value if it is present.
            if (storageUnitAvailabilityDto.getStorageUnitDirectoryPath() != null) {
                Assert.isTrue(storageUnitAvailabilityDto.getStorageUnitDirectoryPath().equals(s3KeyPrefix),
                        String.format(
                                "Storage directory path \"%s\" registered with business object data {%s} "
                                        + "in \"%s\" storage does not match the expected S3 key prefix \"%s\".",
                                storageUnitAvailabilityDto.getStorageUnitDirectoryPath(),
                                businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey),
                                storageUnitAvailabilityDto.getStorageName(), s3KeyPrefix));
            }

            // Add the S3 key prefix to the list of storage files.
            // We add a trailing '/' character to the prefix, since it represents a directory.
            storageFilePaths.add(StringUtils.appendIfMissing(s3KeyPrefix, "/"));
        } else {
            // Retrieve storage file paths registered with this business object data in the specified storage.
            storageFilePaths = storageUnitIdToStorageFilePathsMap
                    .containsKey(storageUnitAvailabilityDto.getStorageUnitId())
                            ? storageUnitIdToStorageFilePathsMap
                                    .get(storageUnitAvailabilityDto.getStorageUnitId())
                            : new ArrayList<>();

            // Validate storage file paths registered with this business object data in the specified storage.
            // The validation check below is required even if we have no storage files registered.
            storageFileHelper.validateStorageFilePaths(storageFilePaths, s3KeyPrefix, businessObjectDataKey,
                    storageUnitAvailabilityDto.getStorageName());

            // If there are no storage files registered for this storage unit, we should use the storage directory path value.
            if (storageFilePaths.isEmpty()) {
                // Validate that directory path value is present and it matches the S3 key prefix.
                Assert.isTrue(storageUnitAvailabilityDto.getStorageUnitDirectoryPath() != null
                        && storageUnitAvailabilityDto.getStorageUnitDirectoryPath().startsWith(s3KeyPrefix),
                        String.format(
                                "Storage directory path \"%s\" registered with business object data {%s} "
                                        + "in \"%s\" storage does not match the expected S3 key prefix \"%s\".",
                                storageUnitAvailabilityDto.getStorageUnitDirectoryPath(),
                                businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey),
                                storageUnitAvailabilityDto.getStorageName(), s3KeyPrefix));
                // Add storage directory path the empty storage files list.
                // We add a trailing '/' character to the path, since it represents a directory.
                storageFilePaths.add(storageUnitAvailabilityDto.getStorageUnitDirectoryPath() + "/");
            }
        }

        // Retrieve the s3 bucket name.
        String s3BucketName = getS3BucketName(upperCaseStorageName, storageEntity,
                generateDdlRequest.s3BucketNames);

        // For partitioned table, add the relative partitions to the generated DDL.
        if (generateDdlRequest.isPartitioned) {
            // If flag is set to suppress scan for unregistered sub-partitions, validate that the number of primary and sub-partition values specified for
            // the business object data equals to the number of partition columns defined in schema for the format selected for DDL generation.
            if (BooleanUtils.isTrue(generateDdlRequest.suppressScanForUnregisteredSubPartitions)) {
                int businessObjectDataRegisteredPartitions = 1
                        + CollectionUtils.size(businessObjectDataKey.getSubPartitionValues());
                Assert.isTrue(
                        businessObjectFormatForSchema.getSchema().getPartitions()
                                .size() == businessObjectDataRegisteredPartitions,
                        String.format(
                                "Number of primary and sub-partition values (%d) specified for the business object data is not equal to "
                                        + "the number of partition columns (%d) defined in the schema of the business object format selected for DDL generation. "
                                        + "Business object data: {%s},  business object format: {%s}",
                                businessObjectDataRegisteredPartitions,
                                businessObjectFormatForSchema.getSchema().getPartitions().size(),
                                businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey),
                                businessObjectFormatHelper
                                        .businessObjectFormatKeyToString(businessObjectFormatHelper
                                                .getBusinessObjectFormatKey(businessObjectFormatForSchema))));
            }
            // Otherwise, since the format version selected for DDL generation might not match the relative business object format version that business
            // object data is registered against, validate that the number of sub-partition values specified for the business object data is less than
            // the number of partition columns defined in schema for the format selected for DDL generation.
            else {
                Assert.isTrue(
                        businessObjectFormatForSchema.getSchema().getPartitions().size() > CollectionUtils
                                .size(businessObjectDataKey.getSubPartitionValues()),
                        String.format(
                                "Number of subpartition values specified for the business object data is greater than or equal to "
                                        + "the number of partition columns defined in the schema of the business object format selected for DDL generation. "
                                        + "Business object data: {%s},  business object format: {%s}",
                                businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey),
                                businessObjectFormatHelper
                                        .businessObjectFormatKeyToString(businessObjectFormatHelper
                                                .getBusinessObjectFormatKey(businessObjectFormatForSchema))));
            }

            // Get partition information. For multiple level partitioning, auto-discover subpartitions (subdirectories) not already included into the S3 key
            // prefix. Each discovered partition requires a standalone "add partition" clause. Please note that due to the above validation check, there
            // should be no auto discoverable sub-partition columns, when flag is set to suppress scan for unregistered sub-partitions.
            List<SchemaColumn> autoDiscoverableSubPartitionColumns = businessObjectFormatForSchema.getSchema()
                    .getPartitions()
                    .subList(1 + CollectionUtils.size(businessObjectDataKey.getSubPartitionValues()),
                            businessObjectFormatForSchema.getSchema().getPartitions().size());

            // Get and process Hive partitions.
            for (HivePartitionDto hivePartition : getHivePartitions(businessObjectDataKey,
                    autoDiscoverableSubPartitionColumns, s3KeyPrefix, storageFilePaths,
                    storageUnitAvailabilityDto.getStorageName())) {
                // Build an add partition statement for this hive partition.
                StringBuilder addPartitionStatement = new StringBuilder();
                addPartitionStatement.append(String.format("%s PARTITION (",
                        BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable)
                                ? "   "
                                : alterTableFirstToken));
                // Specify all partition column values.
                List<String> partitionKeyValuePairs = new ArrayList<>();
                for (int i = 0; i < businessObjectFormatForSchema.getSchema().getPartitions().size(); i++) {
                    String partitionColumnName = businessObjectFormatForSchema.getSchema().getPartitions()
                            .get(i).getName();
                    String partitionValue = hivePartition.getPartitionValues().get(i);
                    partitionKeyValuePairs.add(String.format("`%s`='%s'", partitionColumnName, partitionValue));
                }
                addPartitionStatement.append(StringUtils.join(partitionKeyValuePairs, ", "));
                addPartitionStatement.append(String.format(") LOCATION 's3n://%s/%s%s'", s3BucketName,
                        s3KeyPrefix,
                        StringUtils.isNotBlank(hivePartition.getPath()) ? hivePartition.getPath() : ""));

                // Add this add partition statement to the list.
                addPartitionStatements.add(addPartitionStatement.toString());
            }
        } else // This is a non-partitioned table.
        {
            // Get location for this non-partitioned table.
            String tableLocation = String.format("s3n://%s/%s", s3BucketName, s3KeyPrefix);

            if (generateDdlRequest.customDdlEntity == null) {
                // Since custom DDL was not specified and this table is not partitioned, add a LOCATION clause.
                // This is the last line in the non-partitioned table DDL.
                sb.append(String.format("LOCATION '%s';", tableLocation));
            } else {
                // Since custom DDL was used for a non-partitioned table, substitute the relative custom DDL token with the actual table location.
                replacements.put(NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN, tableLocation);
            }
        }
    }

    // Add all add partition statements to the main string builder.
    if (CollectionUtils.isNotEmpty(addPartitionStatements)) {
        // If specified, combine adding multiple partitions in a single ALTER TABLE statement.
        if (BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable)) {
            sb.append(alterTableFirstToken).append('\n');
        }

        sb.append(StringUtils.join(addPartitionStatements,
                BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? ",\n"
                        : ";\n"))
                .append(";\n");
    }
}

From source file:org.openecomp.sdc.vendorlicense.licenseartifacts.impl.VendorLicenseArtifactsServiceImpl.java

private static List<VersionableEntity> filterChangedEntities(
        Collection<? extends VersionableEntity> versionableEntities) {
    MultiValuedMap<String, VersionableEntity> entitiesById = mapById(versionableEntities);
    Map<String, VersionableEntity> entitiesByVersionUuId = new HashMap<>();
    List<VersionableEntity> changedOnly = new ArrayList<>();

    for (String epId : entitiesById.keySet()) {
        Collection<VersionableEntity> versionableEntitiesForId = entitiesById.get(epId);
        for (VersionableEntity ep : versionableEntitiesForId) {
            entitiesByVersionUuId.put(ep.getVersionUuId(), ep);
        }/*from   w w  w.j  a  va2s.  co  m*/
    }

    changedOnly.addAll(entitiesByVersionUuId.values());

    return changedOnly;
}