diff --git a/src/coldfront_plugin_cloud/base.py b/src/coldfront_plugin_cloud/base.py index 2a8806b0..b9c7c5ca 100644 --- a/src/coldfront_plugin_cloud/base.py +++ b/src/coldfront_plugin_cloud/base.py @@ -1,15 +1,19 @@ import abc import functools import json +import logging from typing import NamedTuple from coldfront.core.allocation import models as allocation_models from coldfront.core.resource import models as resource_models -from coldfront_plugin_cloud import attributes +from coldfront_plugin_cloud import attributes, tasks, utils from coldfront_plugin_cloud.models.quota_models import QuotaSpecs +logger = logging.getLogger(__name__) + + class ResourceAllocator(abc.ABC): resource_type = "" @@ -45,6 +49,102 @@ def get_or_create_federated_user(self, username): user = self.create_federated_user(username) return user + def set_default_quota_on_allocation(self, coldfront_attr): + resource_quotaspecs = self.resource_quotaspecs + value = resource_quotaspecs.root[coldfront_attr].quota_by_su_quantity( + self.allocation.quantity + ) + utils.set_attribute_on_allocation(self.allocation, coldfront_attr, value) + return value + + def set_users(self, project_id, apply): + coldfront_users = allocation_models.AllocationUser.objects.filter( + allocation=self.allocation, status__name="Active" + ) + cluster_users = self.get_users(project_id) + failed_validation = False + + # Create users that exist in coldfront but not in the resource + for coldfront_user in coldfront_users: + if coldfront_user.user.username not in cluster_users: + failed_validation = True + logger.info( + f"{coldfront_user.user.username} is not part of {project_id}" + ) + if apply: + tasks.add_user_to_allocation(coldfront_user.pk) + + # remove users that are in the resource but not in coldfront + users = set( + [coldfront_user.user.username for coldfront_user in coldfront_users] + ) + for allocation_user in cluster_users: + if allocation_user not in users: + failed_validation = True + logger.info( + f"{allocation_user} exists in the resource {project_id} but not in coldfront" + ) + if apply: + self.remove_role_from_user(allocation_user, project_id) + + return failed_validation + + def check_and_apply_quota_attr( + self, + attr: str, + expected_quota: int, + current_quota: int, + apply: bool, + ) -> bool: + failed_validation = False + if current_quota is None and expected_quota is None: + msg = ( + f"Value for quota for {attr} is not set anywhere" + f" on {self.allocation_str}" + ) + failed_validation = True + + if apply: + expected_quota = self.set_default_quota_on_allocation(attr) + msg = f"Added default quota for {attr} to {self.allocation_str} to {expected_quota}" + logger.info(msg) + elif current_quota is not None and expected_quota is None: + msg = ( + f'Attribute "{attr}" expected on {self.allocation_str} but not set.' + f" Current quota is {current_quota}." + ) + failed_validation = True + + if apply: + utils.set_attribute_on_allocation(self.allocation, attr, current_quota) + expected_quota = ( + current_quota # To pass `current_quota != expected_quota` check + ) + msg = f"{msg} Attribute set to match current quota." + logger.info(msg) + + if current_quota != expected_quota: + msg = ( + f"Value for quota for {attr} = {current_quota} does not match expected" + f" value of {expected_quota} on {self.allocation_str}" + ) + logger.info(msg) + failed_validation = True + + return failed_validation + + # if apply: + # try: + # self.set_quota(project_id) + # logger.info(f"Quota for {project_id} was out of date. Reapplied!") + # except Exception as e: + # logger.info(f"setting cluster-side quota failed: {e}") + # return + + @functools.cached_property + def allocation_str(self): + return f'allocation {self.allocation.pk} of project "{self.allocation.project.title}"' + @functools.cached_property def auth_url(self): return self.resource.get_attribute(attributes.RESOURCE_AUTH_URL).rstrip("/") @@ -54,7 +154,11 @@ def member_role_name(self): return self.resource.get_attribute(attributes.RESOURCE_ROLE) or "member" @abc.abstractmethod - def set_project_configuration(self, project_id, dry_run=False): + def set_project_configuration(self, project_id, apply=True): + pass + + @abc.abstractmethod + def get_project(self, project_id): pass @abc.abstractmethod @@ -85,6 +189,10 @@ def get_quota(self, project_id): def create_federated_user(self, unique_id): pass + @abc.abstractmethod + def get_users(self, unique_id): + pass + @abc.abstractmethod def get_federated_user(self, unique_id): pass diff --git a/src/coldfront_plugin_cloud/management/commands/validate_allocations.py b/src/coldfront_plugin_cloud/management/commands/validate_allocations.py index 7515e279..bf9f979e 100644 --- a/src/coldfront_plugin_cloud/management/commands/validate_allocations.py +++ b/src/coldfront_plugin_cloud/management/commands/validate_allocations.py @@ -1,7 +1,6 @@ import logging from coldfront_plugin_cloud import attributes -from coldfront_plugin_cloud import openshift from coldfront_plugin_cloud import utils from coldfront_plugin_cloud import tasks @@ -9,8 +8,6 @@ from coldfront.core.resource.models import Resource from coldfront.core.allocation.models import ( Allocation, - AllocationStatusChoice, - AllocationUser, ) from keystoneauth1.exceptions import http @@ -18,12 +15,18 @@ logger = logging.getLogger(__name__) STATES_TO_VALIDATE = ["Active", "Active (Needs Renewal)"] -OPENSTACK_OBJ_KEY = "x-account-meta-quota-bytes" class Command(BaseCommand): help = "Validates quotas and users in resource allocations." + PLUGIN_RESOURCE_NAMES = [ + "OpenStack", + "ESI", + "OpenShift", + "Openshift Virtualization", + ] + def add_arguments(self, parser): parser.add_argument( "--apply", @@ -31,67 +34,6 @@ def add_arguments(self, parser): help="Apply expected state if validation fails.", ) - @staticmethod - def sync_users(project_id, allocation, allocator, apply): - coldfront_users = AllocationUser.objects.filter( - allocation=allocation, status__name="Active" - ) - allocation_users = allocator.get_users(project_id) - failed_validation = False - - # Create users that exist in coldfront but not in the resource - for coldfront_user in coldfront_users: - if coldfront_user.user.username not in allocation_users: - failed_validation = True - logger.warning( - f"{coldfront_user.user.username} is not part of {project_id}" - ) - if apply: - tasks.add_user_to_allocation(coldfront_user.pk) - - # remove users that are in the resource but not in coldfront - users = set( - [coldfront_user.user.username for coldfront_user in coldfront_users] - ) - for allocation_user in allocation_users: - if allocation_user not in users: - failed_validation = True - logger.warning( - f"{allocation_user} exists in the resource {project_id} but not in coldfront" - ) - if apply: - allocator.remove_role_from_user(allocation_user, project_id) - - return failed_validation - - @staticmethod - def sync_openshift_project_labels(project_id, allocator, apply): - cloud_namespace_obj = allocator._openshift_get_namespace(project_id) - cloud_namespace_obj_labels = cloud_namespace_obj["metadata"]["labels"] - if missing_or_incorrect_labels := [ - label_items[0] - for label_items in openshift.PROJECT_DEFAULT_LABELS.items() - if label_items not in cloud_namespace_obj_labels.items() - ]: - logger.warning( - f"Openshift project {project_id} is missing default labels: {', '.join(missing_or_incorrect_labels)}" - ) - cloud_namespace_obj_labels.update(openshift.PROJECT_DEFAULT_LABELS) - if apply: - allocator.patch_project(project_id, cloud_namespace_obj) - logger.warning( - f"Labels updated for Openshift project {project_id}: {', '.join(missing_or_incorrect_labels)}" - ) - - @staticmethod - def set_default_quota_on_allocation(allocation, allocator, coldfront_attr): - resource_quotaspecs = allocator.resource_quotaspecs - value = resource_quotaspecs.root[coldfront_attr].quota_by_su_quantity( - allocation.quantity - ) - utils.set_attribute_on_allocation(allocation, coldfront_attr, value) - return value - def check_institution_specific_code(self, allocation, apply): attr = attributes.ALLOCATION_INSTITUTION_SPECIFIC_CODE isc = allocation.get_attribute(attr) @@ -104,206 +46,37 @@ def check_institution_specific_code(self, allocation, apply): logger.warning(f'Attribute "{attr}" added to allocation {alloc_str}') def handle(self, *args, **options): - # Deal with Openstack and ESI resources - openstack_resources = Resource.objects.filter( - resource_type__name__in=["OpenStack", "ESI"] - ) - openstack_allocations = Allocation.objects.filter( - resources__in=openstack_resources, - status__in=AllocationStatusChoice.objects.filter( - name__in=STATES_TO_VALIDATE - ), - ) - for allocation in openstack_allocations: - self.check_institution_specific_code(allocation, options["apply"]) - allocation_str = f'{allocation.pk} of project "{allocation.project.title}"' - msg = f"Starting resource validation for allocation {allocation_str}." - logger.debug(msg) - - failed_validation = False - - allocator = tasks.find_allocator(allocation) - - project_id = allocation.get_attribute(attributes.ALLOCATION_PROJECT_ID) - if not project_id: - logger.error(f"{allocation_str} is active but has no Project ID set.") - continue + for resource_name in self.PLUGIN_RESOURCE_NAMES: + resource = Resource.objects.filter(resource_type__name=resource_name) + allocations = Allocation.objects.filter( + resources__in=resource, + status__name__in=STATES_TO_VALIDATE, + ) - try: - allocator.identity.projects.get(project_id) - except http.NotFound: - logger.error( - f"{allocation_str} has Project ID {project_id}. But" - f" no project found in OpenStack." + for allocation in allocations: + allocator = tasks.find_allocator(allocation) + logger.debug( + f"Starting resource validation for {allocator.allocation_str}." ) - continue + self.check_institution_specific_code(allocation, options["apply"]) - quota = allocator.get_quota(project_id) + project_id = allocation.get_attribute(attributes.ALLOCATION_PROJECT_ID) - failed_validation = Command.sync_users( - project_id, allocation, allocator, options["apply"] - ) - - for attr, quotaspec in allocator.resource_quotaspecs.root.items(): - _, key = allocator._extract_quota_label(quotaspec) - if not key: - # Note(knikolla): Some attributes are only maintained - # for bookkeeping purposes and do not have a - # corresponding quota set on the service. - continue - - expected_value = allocation.get_attribute(attr) - current_value = quota.get(key, None) - if key == OPENSTACK_OBJ_KEY and expected_value <= 0: - expected_obj_value = 1 - current_value = int( - allocator.object(project_id) - .head_account() - .get(OPENSTACK_OBJ_KEY) - ) - if current_value != expected_obj_value: - failed_validation = True - msg = ( - f"Value for quota for {attr} = {current_value} does not match expected" - f" value of {expected_obj_value} on allocation {allocation_str}" - ) - logger.warning(msg) - elif expected_value is None and current_value: - msg = ( - f'Attribute "{attr}" expected on allocation {allocation_str} but not set.' - f" Current quota is {current_value}." - ) - if options["apply"]: - utils.set_attribute_on_allocation( - allocation, attr, current_value - ) - msg = f"{msg} Attribute set to match current quota." - logger.warning(msg) - elif not current_value == expected_value: - failed_validation = True - msg = ( - f"Value for quota for {attr} = {current_value} does not match expected" - f" value of {expected_value} on allocation {allocation_str}" + # Check project ID is set + if not project_id: + logger.error( + f"{allocator.allocation_str} is active but has no Project ID set." ) - logger.warning(msg) + continue - if failed_validation and options["apply"]: + # Check project exists in remote cluster try: - allocator.set_quota( - allocation.get_attribute(attributes.ALLOCATION_PROJECT_ID) - ) - except Exception as e: + allocator.get_project(project_id) + except http.NotFound: logger.error( - f"setting {allocation.resources.first()} quota failed: {e}" + f"{allocator.allocation_str} has Project ID {project_id}. But" + f" no project found in {resource_name}." ) continue - logger.warning( - f"Quota for allocation {allocation_str} was out of date. Reapplied!" - ) - - # Deal with OpenShift and Openshift VM - - openshift_resources = Resource.objects.filter( - resource_type__name__in=["OpenShift", "Openshift Virtualization"] - ) - openshift_allocations = Allocation.objects.filter( - resources__in=openshift_resources, - status__in=AllocationStatusChoice.objects.filter( - name__in=STATES_TO_VALIDATE - ), - ) - - for allocation in openshift_allocations: - self.check_institution_specific_code(allocation, options["apply"]) - allocation_str = f'{allocation.pk} of project "{allocation.project.title}"' - logger.debug( - f"Starting resource validation for allocation {allocation_str}." - ) - - allocator = tasks.find_allocator(allocation) - - project_id = allocation.get_attribute(attributes.ALLOCATION_PROJECT_ID) - - if not project_id: - logger.error(f"{allocation_str} is active but has no Project ID set.") - continue - - try: - allocator._get_project(project_id) - except http.NotFound: - logger.error( - f"{allocation_str} has Project ID {project_id}. But" - f" no project found in OpenShift." - ) - continue - - allocator.set_project_configuration( - project_id, dry_run=not options["apply"] - ) - - quota = allocator.get_quota(project_id) - - failed_validation = Command.sync_users( - project_id, allocation, allocator, options["apply"] - ) - Command.sync_openshift_project_labels( - project_id, allocator, options["apply"] - ) - - for attr, quotaspec in allocator.resource_quotaspecs.root.items(): - # This gives me just the plain key - key = quotaspec.quota_label - - expected_value = allocation.get_attribute(attr) - current_value = quota.get(key, None) - current_value = openshift.parse_quota_value(current_value, attr) - - if expected_value is None and current_value is not None: - msg = ( - f'Attribute "{attr}" expected on allocation {allocation_str} but not set.' - f" Current quota is {current_value}." - ) - if options["apply"]: - utils.set_attribute_on_allocation( - allocation, attr, current_value - ) - msg = f"{msg} Attribute set to match current quota." - logger.warning(msg) - else: - # We just checked the case where the quota value is set in the cluster - # but not in coldfront. This is the only case the cluster value is the - # "source of truth" for the quota value - # If the coldfront value is set, it is always the source of truth. - # But first, we need to check if the quota value is set anywhere at all. - # TODO (Quan): Refactor these if statements so that we can remove this comment block - if current_value is None and expected_value is None: - msg = ( - f"Value for quota for {attr} is not set anywhere" - f" on allocation {allocation_str}" - ) - logger.warning(msg) - - if options["apply"]: - expected_value = self.set_default_quota_on_allocation( - allocation, allocator, attr - ) - logger.warning( - f"Added default quota for {attr} to allocation {allocation_str} to {expected_value}" - ) - - if not (current_value == expected_value): - msg = ( - f"Value for quota for {attr} = {current_value} does not match expected" - f" value of {expected_value} on allocation {allocation_str}" - ) - logger.warning(msg) - if options["apply"]: - try: - allocator.set_quota(project_id) - logger.warning( - f"Quota for allocation {project_id} was out of date. Reapplied!" - ) - except Exception as e: - logger.error(f"setting openshift quota failed: {e}") - continue + allocator.set_project_configuration(project_id, apply=options["apply"]) diff --git a/src/coldfront_plugin_cloud/openshift.py b/src/coldfront_plugin_cloud/openshift.py index d6e1f605..0045d749 100644 --- a/src/coldfront_plugin_cloud/openshift.py +++ b/src/coldfront_plugin_cloud/openshift.py @@ -206,9 +206,15 @@ def get_resource_api(self, api_version: str, kind: str): ) return api - def set_project_configuration(self, project_id, dry_run=False): + def set_project_configuration(self, project_id, apply=True): + self.set_users(project_id, apply) + self.set_limitranges(project_id, apply) + self.set_project_labels(project_id, apply) + self.set_quota_config(project_id, apply) + + def set_limitranges(self, project_id, apply=True): def _recreate_limitrange(): - if not dry_run: + if apply: self._openshift_delete_limits(project_id) self._openshift_create_limits(project_id) logger.info(f"Recreated LimitRanges for namespace {project_id}.") @@ -216,7 +222,7 @@ def _recreate_limitrange(): limits = self._openshift_get_limits(project_id).get("items", []) if not limits: - if not dry_run: + if apply: self._openshift_create_limits(project_id) logger.info(f"Created default LimitRange for namespace {project_id}.") @@ -240,6 +246,46 @@ def _recreate_limitrange(): ) _recreate_limitrange() + def set_project_labels(self, project_id, apply=True): + cloud_namespace_obj = self._openshift_get_namespace(project_id) + cloud_namespace_obj_labels = cloud_namespace_obj["metadata"]["labels"] + if missing_or_incorrect_labels := [ + label_items[0] + for label_items in PROJECT_DEFAULT_LABELS.items() + if label_items not in cloud_namespace_obj_labels.items() + ]: + logger.warning( + f"Openshift project {project_id} is missing default labels: {', '.join(missing_or_incorrect_labels)}" + ) + if apply: + cloud_namespace_obj_labels.update(PROJECT_DEFAULT_LABELS) + self.patch_project(project_id, cloud_namespace_obj) + logger.warning( + f"Labels updated for Openshift project {project_id}: {', '.join(missing_or_incorrect_labels)}" + ) + + def set_quota_config(self, project_id, apply=True): + failed_validation = False + quota = self.get_quota(project_id) + + for attr, quotaspec in self.resource_quotaspecs.root.items(): + quota_key = quotaspec.quota_label + expected_value = self.allocation.get_attribute(attr) + current_value = quota.get(quota_key, None) + current_value = parse_quota_value(current_value, attr) + + failed_validation = failed_validation | self.check_and_apply_quota_attr( + attr, expected_value, current_value, apply + ) + + if failed_validation and apply: + try: + self.set_quota(project_id) + logger.info(f"Quota for {project_id} was out of date. Reapplied!") + except Exception as e: + logger.info(f"setting cluster-side quota failed: {e}") + return + def create_project(self, suggested_project_name): sanitized_project_name = utils.get_sanitized_project_name( suggested_project_name @@ -418,7 +464,7 @@ def _get_role(self, username, project_id): f"User {username} has no rolebindings in project {project_id}" ) - def _get_project(self, project_id): + def get_project(self, project_id): return self._openshift_get_project(project_id) def _delete_user(self, username): diff --git a/src/coldfront_plugin_cloud/openstack.py b/src/coldfront_plugin_cloud/openstack.py index 5b3bcfa2..abfb717d 100644 --- a/src/coldfront_plugin_cloud/openstack.py +++ b/src/coldfront_plugin_cloud/openstack.py @@ -22,6 +22,8 @@ COLDFRONT_RGW_SWIFT_INIT_USER = "coldfront-swift-init" +OPENSTACK_OBJ_KEY = "x-account-meta-quota-bytes" + def get_session_for_resource_via_password(resource, username, password, project_id): auth_url = resource.get_attribute(attributes.RESOURCE_AUTH_URL) @@ -123,8 +125,43 @@ def _get_resource_quota_labels_by_service( quota_labels.append(quota_label) return quota_labels - def set_project_configuration(self, project_id, dry_run=False): - pass + def set_project_configuration(self, project_id, apply=True): + self.set_users(project_id, apply) + self.set_quota_config(project_id, apply) + + def set_quota_config(self, project_id, apply=True): + failed_validation = False + quota = self.get_quota(project_id) + for attr, quotaspec in self.resource_quotaspecs.root.items(): + _, key = self._extract_quota_label(quotaspec) + if not key: + # Note(knikolla): Some attributes are only maintained + # for bookkeeping purposes and do not have a + # corresponding quota set on the service. + continue + + expected_value = self.allocation.get_attribute(attr) + current_value = quota.get(key, None) + if key == OPENSTACK_OBJ_KEY and expected_value <= 0: + expected_value = 1 + current_value = int( + self.object(project_id).head_account().get(OPENSTACK_OBJ_KEY) + ) + + failed_validation = failed_validation | self.check_and_apply_quota_attr( + attr, expected_value, current_value, apply + ) + + if failed_validation and apply: + try: + self.set_quota(project_id) + logger.info(f"Quota for {project_id} was out of date. Reapplied!") + except Exception as e: + logger.info(f"setting cluster-side quota failed: {e}") + return + + def get_project(self, project_id): + return self.identity.projects.get(project_id) def create_project(self, suggested_project_name) -> base.ResourceAllocator.Project: project_name = utils.get_unique_project_name( @@ -247,23 +284,25 @@ def get_quota(self, project_id): quotas = self._get_network_quota(quotas, project_id) - _, key = self._extract_quota_label( - self.resource_quotaspecs.root[attributes.QUOTA_OBJECT_GB] - ) - try: - swift = self.object(project_id).head_account() - quotas[key] = int(int(swift.get(key)) / GB_IN_BYTES) - except ksa_exceptions.catalog.EndpointNotFound: - logger.debug("No swift available, skipping its quota.") - except swiftclient.exceptions.ClientException as e: - if e.http_status == 403: - self._init_rgw_for_project(project_id) + if object_quotaspec := self.resource_quotaspecs.root.get( + attributes.QUOTA_OBJECT_GB + ): + _, key = self._extract_quota_label(object_quotaspec) + try: + swift = self.object(project_id).head_account() + except ksa_exceptions.catalog.EndpointNotFound: + logger.debug("No swift available, skipping its quota.") + except swiftclient.exceptions.ClientException as e: + if e.http_status == 403: + self._init_rgw_for_project(project_id) + else: + raise + + try: swift = self.object(project_id).head_account() quotas[key] = int(int(swift.get(key)) / GB_IN_BYTES) - else: - raise - except (ValueError, TypeError): - logger.info("No swift quota set.") + except (ValueError, TypeError): + logger.info("No swift quota set.") return quotas diff --git a/src/coldfront_plugin_cloud/tests/functional/openshift/test_allocation.py b/src/coldfront_plugin_cloud/tests/functional/openshift/test_allocation.py index cc0be792..4efb7cfd 100644 --- a/src/coldfront_plugin_cloud/tests/functional/openshift/test_allocation.py +++ b/src/coldfront_plugin_cloud/tests/functional/openshift/test_allocation.py @@ -37,7 +37,7 @@ def test_new_allocation(self): allocation.get_attribute(attributes.ALLOCATION_PROJECT_NAME) ) - allocator._get_project(project_id) + allocator.get_project(project_id) # Check default limit ranges limit_ranges = allocator._openshift_get_limits(project_id) @@ -62,7 +62,7 @@ def test_new_allocation(self): # Deleting a project is not instantaneous on OpenShift time.sleep(10) with self.assertRaises(kexc.NotFoundError): - allocator._get_project(project_id) + allocator.get_project(project_id) def test_add_remove_user(self): user = self.new_user() diff --git a/src/coldfront_plugin_cloud/tests/functional/openstack/test_allocation.py b/src/coldfront_plugin_cloud/tests/functional/openstack/test_allocation.py index 9b6614e0..024b32a9 100644 --- a/src/coldfront_plugin_cloud/tests/functional/openstack/test_allocation.py +++ b/src/coldfront_plugin_cloud/tests/functional/openstack/test_allocation.py @@ -202,9 +202,7 @@ def test_new_allocation_with_quantity(self): self.assertEqual(allocation.get_attribute(attributes.QUOTA_FLOATING_IPS), 6) self.assertEqual(allocation.get_attribute(attributes.QUOTA_VCPU), 200) - with self.assertLogs( - "coldfront_plugin_cloud.management.commands.validate_allocations" - ) as cm: + with self.assertLogs("coldfront_plugin_cloud.base") as cm: call_command("validate_allocations", apply=True) self.assertTrue( @@ -410,3 +408,69 @@ def test_existing_user(self): self.assertEqual(len(roles), 1) self.assertEqual(roles[0].role["id"], self.role_member.id) + + +class TestAllocationNewAttribute(base.TestBase): + def setUp(self): + super().setUp() + self.resource = self.new_openstack_resource( + name="Devstack", auth_url=os.getenv("OS_AUTH_URL") + ) + call_command( + "add_quota_to_resource", + display_name=attributes.QUOTA_VCPU, + resource_name=self.resource.name, + quota_label="compute.cores", + multiplier=1, + ) + + self.session = openstack.get_session_for_resource(self.resource) + self.compute = novaclient.Client(session=self.session, version=2) + + def test_allocation_new_attribute(self): + """When a new attribute is introduced, but pre-existing allocations don't have it. + Since OpenStack provides defaults for some quotas, + Coldfront may use those defaults, even after the quota is added to Coldfront""" + user = self.new_user() + project = self.new_project(pi=user) + allocation = self.new_allocation(project, self.resource, 2) + + tasks.activate_allocation(allocation.pk) + allocation.refresh_from_db() + + project_id = allocation.get_attribute(attributes.ALLOCATION_PROJECT_ID) + + self.assertEqual(allocation.get_attribute(attributes.QUOTA_VCPU), 2 * 1) + self.assertEqual(allocation.get_attribute(attributes.QUOTA_RAM), None) + + # Check Openstack does have a non-zero default ram quota + actual_nova_quota = self.compute.quotas.get(project_id) + default_ram_quota = actual_nova_quota.ram + self.assertEqual(actual_nova_quota.cores, 2) + self.assertTrue(default_ram_quota > 0) + + # Add a new attribute for Openshift + # Since Openstack already provided defaults, Coldfront should use those + call_command( + "add_quota_to_resource", + display_name=attributes.QUOTA_RAM, + resource_name=self.resource.name, + quota_label="compute.ram", + multiplier=4096, + ) + + call_command("validate_allocations", apply=True) + allocation.refresh_from_db() + + self.assertEqual(allocation.get_attribute(attributes.QUOTA_VCPU), 2 * 1) + self.assertEqual( + allocation.get_attribute(attributes.QUOTA_RAM), default_ram_quota + ) + + expected_nova_quota = { + "cores": 2, + "ram": default_ram_quota, + } + actual_nova_quota = self.compute.quotas.get(project_id) + for k, v in expected_nova_quota.items(): + self.assertEqual(actual_nova_quota.__getattr__(k), v) diff --git a/src/coldfront_plugin_cloud/tests/unit/openshift/test_project.py b/src/coldfront_plugin_cloud/tests/unit/openshift/test_project.py index 0e5a0e4c..129db973 100644 --- a/src/coldfront_plugin_cloud/tests/unit/openshift/test_project.py +++ b/src/coldfront_plugin_cloud/tests/unit/openshift/test_project.py @@ -11,7 +11,7 @@ def test_get_project(self): self.allocator.k8_client.resources.get.return_value.get.return_value = ( fake_project ) - res = self.allocator._get_project("fake-project") + res = self.allocator.get_project("fake-project") assert res == {"project": "fake-project"} @mock.patch(