diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 38353607d..c5aa0c1de 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -189,7 +189,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_meko_mck_upgrade + - name: e2e_mongodbmulticluster_meko_mck_upgrade tags: [ "patch-run" ] commands: - func: "e2e_test" @@ -250,7 +250,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_mongodb_custom_roles + - name: e2e_mongodbmulticluster_custom_roles tags: [ "patch-run" ] commands: - func: "e2e_test" @@ -922,77 +922,77 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_replica_set tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_migration + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_migration tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_member_options + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_member_options tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_up + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_up_cluster_new_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scale_down_cluster + - name: e2e_mongodbmulticluster_multi_cluster_scale_down_cluster tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_scale_down + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_deletion + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_deletion tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_mtls_test + - name: e2e_mongodbmulticluster_multi_cluster_mtls_test tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_scram + - name: e2e_mongodbmulticluster_multi_cluster_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_sts_override + - name: e2e_mongodbmulticluster_multi_sts_override tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_scram + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_scram tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_enable_tls + - name: e2e_mongodbmulticluster_multi_cluster_enable_tls tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_upgrade_downgrade + - name: e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade tags: [ "patch-run" ] commands: - func: e2e_test @@ -1003,22 +1003,22 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_tls_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_appdb_s3_based_backup_restore + - name: e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_backup_restore_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test @@ -1043,78 +1043,78 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_tls_with_x509 + - name: e2e_mongodbmulticluster_multi_cluster_tls_with_x509 tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_with_ldap_custom_roles + - name: e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_specific_namespaces + - name: e2e_mongodbmulticluster_multi_cluster_specific_namespaces tags: [ "patch-run" ] commands: - func: e2e_test # TODO: not used in any variant - - name: e2e_multi_cluster_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_multi_disaster_recovery + - name: e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_replica_set + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_2_clusters_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover + - name: e2e_mongodbmulticluster_multi_cluster_recover tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_network_partition + - name: e2e_mongodbmulticluster_multi_cluster_recover_network_partition tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_recover_clusterwide + - name: e2e_mongodbmulticluster_multi_cluster_recover_clusterwide tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_agent_flags + - name: e2e_mongodbmulticluster_multi_cluster_agent_flags tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_replica_set_ignore_unknown_users + - name: e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_validation + - name: e2e_mongodbmulticluster_multi_cluster_validation tags: [ "patch-run" ] commands: - func: e2e_test @@ -1174,12 +1174,17 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_om_appdb_no_mesh + - name: e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_pvc_resize + - name: e2e_mongodb_multi_cluster_om_appdb_no_mesh + tags: [ "patch-run" ] + commands: + - func: e2e_test + + - name: e2e_mongodbmulticluster_multi_cluster_pvc_resize tags: [ "patch-run" ] commands: - func: e2e_test @@ -1268,12 +1273,12 @@ tasks: commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_group + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group tags: [ "patch-run" ] commands: - func: e2e_test - - name: e2e_multi_cluster_oidc_m2m_user + - name: e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user tags: [ "patch-run" ] commands: - func: e2e_test diff --git a/.evergreen.yml b/.evergreen.yml index 79b181e3e..5203c4386 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -820,7 +820,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group # e2e_operator_race_with_telemetry_task_group includes the tests for testing the operator with race detector enabled @@ -848,7 +848,7 @@ task_groups: - e2e_operator_clusterwide - e2e_operator_multi_namespaces - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - - e2e_meko_mck_upgrade + - e2e_mongodbmulticluster_meko_mck_upgrade <<: *teardown_group - name: e2e_multi_cluster_kind_task_group @@ -856,37 +856,37 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_replica_set - - e2e_multi_cluster_replica_set_migration - - e2e_multi_cluster_replica_set_member_options - - e2e_multi_cluster_recover - - e2e_multi_cluster_recover_clusterwide - - e2e_multi_cluster_specific_namespaces - - e2e_multi_cluster_scram - - e2e_multi_cluster_tls_with_x509 - - e2e_multi_cluster_tls_no_mesh - - e2e_multi_cluster_enable_tls - # e2e_multi_cluster_with_ldap - # e2e_multi_cluster_with_ldap_custom_roles - - e2e_multi_cluster_mtls_test - - e2e_multi_cluster_replica_set_deletion - - e2e_multi_cluster_replica_set_scale_up - - e2e_multi_cluster_scale_up_cluster - - e2e_multi_cluster_scale_up_cluster_new_cluster - - e2e_multi_cluster_replica_set_scale_down - - e2e_multi_cluster_scale_down_cluster - - e2e_multi_sts_override - - e2e_multi_cluster_tls_with_scram - - e2e_multi_cluster_upgrade_downgrade - - e2e_multi_cluster_backup_restore - - e2e_multi_cluster_backup_restore_no_mesh - - e2e_multi_cluster_disaster_recovery - - e2e_multi_cluster_multi_disaster_recovery - - e2e_multi_cluster_recover_network_partition - - e2e_multi_cluster_validation - - e2e_multi_cluster_agent_flags - - e2e_multi_cluster_replica_set_ignore_unknown_users - - e2e_multi_cluster_pvc_resize + - e2e_mongodbmulticluster_multi_cluster_replica_set + - e2e_mongodbmulticluster_multi_cluster_replica_set_migration + - e2e_mongodbmulticluster_multi_cluster_replica_set_member_options + - e2e_mongodbmulticluster_multi_cluster_recover + - e2e_mongodbmulticluster_multi_cluster_recover_clusterwide + - e2e_mongodbmulticluster_multi_cluster_specific_namespaces + - e2e_mongodbmulticluster_multi_cluster_scram + - e2e_mongodbmulticluster_multi_cluster_tls_with_x509 + - e2e_mongodbmulticluster_multi_cluster_tls_no_mesh + - e2e_mongodbmulticluster_multi_cluster_enable_tls + # e2e_mongodbmulticluster_multi_cluster_with_ldap + # e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles + - e2e_mongodbmulticluster_multi_cluster_mtls_test + - e2e_mongodbmulticluster_multi_cluster_replica_set_deletion + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster + - e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster + - e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down + - e2e_mongodbmulticluster_multi_cluster_scale_down_cluster + - e2e_mongodbmulticluster_multi_sts_override + - e2e_mongodbmulticluster_multi_cluster_tls_with_scram + - e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade + - e2e_mongodbmulticluster_multi_cluster_backup_restore + - e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh + - e2e_mongodbmulticluster_multi_cluster_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery + - e2e_mongodbmulticluster_multi_cluster_recover_network_partition + - e2e_mongodbmulticluster_multi_cluster_validation + - e2e_mongodbmulticluster_multi_cluster_agent_flags + - e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users + - e2e_mongodbmulticluster_multi_cluster_pvc_resize - e2e_multi_cluster_sharded_geo_sharding - e2e_multi_cluster_sharded_scaling - e2e_multi_cluster_sharded_scaling_all_shard_overrides @@ -916,12 +916,12 @@ task_groups: - e2e_tls_sc_additional_certs - e2e_tls_x509_configure_all_options_sc - e2e_tls_x509_sc - - e2e_meko_mck_upgrade - - e2e_mongodb_custom_roles + - e2e_mongodbmulticluster_meko_mck_upgrade + - e2e_mongodbmulticluster_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user - - e2e_multi_cluster_oidc_m2m_group - - e2e_multi_cluster_oidc_m2m_user + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group + - e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user <<: *teardown_group @@ -930,8 +930,8 @@ task_groups: <<: *setup_group <<: *setup_and_teardown_task_cloudqa tasks: - - e2e_multi_cluster_2_clusters_replica_set - - e2e_multi_cluster_2_clusters_clusterwide + - e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set + - e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide <<: *teardown_group - name: e2e_multi_cluster_om_appdb_task_group @@ -944,11 +944,11 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_appdb_tls_operator_upgrade_v1_32_to_mck - e2e_om_appdb_flags_and_config @@ -999,11 +999,11 @@ task_groups: - e2e_multi_cluster_om_validation - e2e_multi_cluster_appdb - e2e_multi_cluster_appdb_cleanup - - e2e_multi_cluster_appdb_s3_based_backup_restore + - e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore - e2e_multi_cluster_appdb_disaster_recovery - e2e_multi_cluster_appdb_disaster_recovery_force_reconfigure - e2e_multi_cluster_om_networking_clusterwide - - e2e_multi_cluster_om_appdb_no_mesh + - e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh # Reused OM tests with AppDB Multi-Cluster topology - e2e_om_appdb_flags_and_config - e2e_om_appdb_upgrade diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py new file mode 100644 index 000000000..371015395 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodbmulticluster_custom_roles.py @@ -0,0 +1,271 @@ +from kubetester import ( + create_or_update_configmap, + find_fixture, + read_configmap, + try_load, + wait_until, +) +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind +from pytest import fixture, mark +from tests.authentication.shared import custom_roles as testhelper +from tests.multicluster.conftest import cluster_spec_list + + +@fixture(scope="function") +def first_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-first" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def second_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-second" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def third_project(namespace: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{namespace}-third" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="function") +def mongodb_role_with_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-with-empty-strings.yaml"), cluster_scoped=True + ) + + if try_load(resource): + return resource + + return resource + + +@fixture(scope="function") +def mongodb_role_without_empty_strings() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + find_fixture("cluster-mongodb-role-without-empty-strings.yaml"), cluster_scoped=True + ) + + if try_load(resource): + return resource + + return resource + + +@fixture(scope="function") +def replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + first_project: str, +) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("replica-set-scram.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["members"] = 1 + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = first_project + + return resource + + +@fixture(scope="function") +def sharded_cluster( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + second_project: str, +) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("sharded-cluster-scram-sha-1.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["mongodsPerShardCount"] = 1 + resource["spec"]["mongosCount"] = 1 + resource["spec"]["configServerCount"] = 1 + + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = second_project + + return resource + + +@fixture(scope="function") +def mc_replica_set( + namespace: str, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + third_project: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(find_fixture("mongodbmulticluster-multi.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["security"] = { + "roleRefs": [ + { + "name": mongodb_role_with_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + { + "name": mongodb_role_without_empty_strings.get_name(), + "kind": ClusterMongoDBRoleKind, + }, + ] + } + resource["spec"]["opsManager"]["configMapRef"]["name"] = third_project + resource["spec"]["clusterSpecList"] = cluster_spec_list(["kind-e2e-cluster-1"], [1]) + + return resource + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_create_resources( + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, +): + testhelper.test_create_resources( + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + replica_set, + sharded_cluster, + mc_replica_set, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_automation_config_has_roles( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.test_automation_config_has_roles( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +def assert_expected_roles( + mc_replica_set: MongoDBMulti, + replica_set: MongoDB, + sharded_cluster: MongoDB, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.assert_expected_roles( + mc_replica_set, + replica_set, + sharded_cluster, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_change_inherited_role( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, + mongodb_role_without_empty_strings: ClusterMongoDBRole, +): + testhelper.test_change_inherited_role( + replica_set, + sharded_cluster, + mc_replica_set, + mongodb_role_with_empty_strings, + mongodb_role_without_empty_strings, + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_deleting_role_does_not_remove_access( + replica_set: MongoDB, + sharded_cluster: MongoDB, + mc_replica_set: MongoDBMulti, + mongodb_role_with_empty_strings: ClusterMongoDBRole, +): + testhelper.test_deleting_role_does_not_remove_access( + replica_set, sharded_cluster, mc_replica_set, mongodb_role_with_empty_strings + ) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): + testhelper.test_removing_role_from_resources(replica_set, sharded_cluster, mc_replica_set) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): + testhelper.test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_replicaset_is_failed(replica_set: MongoDB): + testhelper.test_replicaset_is_failed(replica_set) + + +@mark.e2e_mongodbmulticluster_custom_roles +def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): + testhelper.test_replicaset_is_reconciled_without_rolerefs(replica_set) diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py index 9665e6169..52b3980ac 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/shared/custom_roles.py @@ -76,171 +76,12 @@ def get_expected_role(role_name: str) -> dict: # fmt: on -@fixture(scope="function") -def first_project(namespace: str) -> str: - cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{namespace}-first" - return create_or_update_configmap( - namespace=namespace, - name=project_name, - data={ - "baseUrl": cm["baseUrl"], - "projectName": project_name, - "orgId": cm["orgId"], - }, - ) - - -@fixture(scope="function") -def second_project(namespace: str) -> str: - cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{namespace}-second" - return create_or_update_configmap( - namespace=namespace, - name=project_name, - data={ - "baseUrl": cm["baseUrl"], - "projectName": project_name, - "orgId": cm["orgId"], - }, - ) - - -@fixture(scope="function") -def third_project(namespace: str) -> str: - cm = read_configmap(namespace=namespace, name="my-project") - project_name = f"{namespace}-third" - return create_or_update_configmap( - namespace=namespace, - name=project_name, - data={ - "baseUrl": cm["baseUrl"], - "projectName": project_name, - "orgId": cm["orgId"], - }, - ) - - -@fixture(scope="function") -def mongodb_role_with_empty_strings() -> ClusterMongoDBRole: - resource = ClusterMongoDBRole.from_yaml( - find_fixture("cluster-mongodb-role-with-empty-strings.yaml"), cluster_scoped=True - ) - - if try_load(resource): - return resource - - return resource - - -@fixture(scope="function") -def mongodb_role_without_empty_strings() -> ClusterMongoDBRole: - resource = ClusterMongoDBRole.from_yaml( - find_fixture("cluster-mongodb-role-without-empty-strings.yaml"), cluster_scoped=True - ) - - if try_load(resource): - return resource - - return resource - - -@fixture(scope="function") -def replica_set( - namespace: str, - mongodb_role_with_empty_strings: ClusterMongoDBRole, - mongodb_role_without_empty_strings: ClusterMongoDBRole, - first_project: str, -) -> MongoDB: - resource = MongoDB.from_yaml(find_fixture("replica-set-scram.yaml"), namespace=namespace) - - if try_load(resource): - return resource - - resource["spec"]["members"] = 1 - resource["spec"]["security"]["roleRefs"] = [ - { - "name": mongodb_role_with_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - { - "name": mongodb_role_without_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - ] - resource["spec"]["opsManager"]["configMapRef"]["name"] = first_project - - return resource - - -@fixture(scope="function") -def sharded_cluster( - namespace: str, - mongodb_role_with_empty_strings: ClusterMongoDBRole, - mongodb_role_without_empty_strings: ClusterMongoDBRole, - second_project: str, -) -> MongoDB: - resource = MongoDB.from_yaml(find_fixture("sharded-cluster-scram-sha-1.yaml"), namespace=namespace) - - if try_load(resource): - return resource - - resource["spec"]["mongodsPerShardCount"] = 1 - resource["spec"]["mongosCount"] = 1 - resource["spec"]["configServerCount"] = 1 - - resource["spec"]["security"]["roleRefs"] = [ - { - "name": mongodb_role_with_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - { - "name": mongodb_role_without_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - ] - resource["spec"]["opsManager"]["configMapRef"]["name"] = second_project - - return resource - - -@fixture(scope="function") -def mc_replica_set( - namespace: str, - mongodb_role_with_empty_strings: ClusterMongoDBRole, - mongodb_role_without_empty_strings: ClusterMongoDBRole, - third_project: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(find_fixture("mongodb-multi.yaml"), namespace=namespace) - - if try_load(resource): - return resource - - resource["spec"]["security"] = { - "roleRefs": [ - { - "name": mongodb_role_with_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - { - "name": mongodb_role_without_empty_strings.get_name(), - "kind": ClusterMongoDBRoleKind, - }, - ] - } - resource["spec"]["opsManager"]["configMapRef"]["name"] = third_project - resource["spec"]["clusterSpecList"] = cluster_spec_list(["kind-e2e-cluster-1"], [1]) - - return resource - - -@mark.e2e_mongodb_custom_roles def test_create_resources( mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, replica_set: MongoDB, sharded_cluster: MongoDB, - mc_replica_set: MongoDBMulti, + mc_replica_set: MongoDBMulti | MongoDB, ): mongodb_role_with_empty_strings.update() mongodb_role_without_empty_strings.update() @@ -254,11 +95,10 @@ def test_create_resources( mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) -@mark.e2e_mongodb_custom_roles def test_automation_config_has_roles( replica_set: MongoDB, sharded_cluster: MongoDB, - mc_replica_set: MongoDBMulti, + mc_replica_set: MongoDBMulti | MongoDB, mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, ): @@ -272,7 +112,7 @@ def test_automation_config_has_roles( def assert_expected_roles( - mc_replica_set: MongoDBMulti, + mc_replica_set: MongoDBMulti | MongoDB, replica_set: MongoDB, sharded_cluster: MongoDB, mongodb_role_with_empty_strings: ClusterMongoDBRole, @@ -306,11 +146,10 @@ def assert_expected_roles( ) -@mark.e2e_mongodb_custom_roles def test_change_inherited_role( replica_set: MongoDB, sharded_cluster: MongoDB, - mc_replica_set: MongoDBMulti, + mc_replica_set: MongoDBMulti | MongoDB, mongodb_role_with_empty_strings: ClusterMongoDBRole, mongodb_role_without_empty_strings: ClusterMongoDBRole, ): @@ -328,11 +167,10 @@ def is_role_changed(ac_tester: AutomationConfigTester): wait_until(lambda: is_role_changed(mc_replica_set.get_automation_config_tester())) -@mark.e2e_mongodb_custom_roles def test_deleting_role_does_not_remove_access( replica_set: MongoDB, sharded_cluster: MongoDB, - mc_replica_set: MongoDBMulti, + mc_replica_set: MongoDBMulti | MongoDB, mongodb_role_with_empty_strings: ClusterMongoDBRole, ): mongodb_role_with_empty_strings.delete() @@ -355,8 +193,9 @@ def test_deleting_role_does_not_remove_access( mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=2) -@mark.e2e_mongodb_custom_roles -def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): +def test_removing_role_from_resources( + replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti | MongoDB +): sharded_cluster["spec"]["security"]["roleRefs"] = None sharded_cluster.update() @@ -367,12 +206,10 @@ def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: Mon wait_until(lambda: len(mc_replica_set.get_automation_config_tester().automation_config["roles"]) == 0, timeout=120) -@mark.e2e_mongodb_custom_roles def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() -@mark.e2e_mongodb_custom_roles def test_replicaset_is_failed(replica_set: MongoDB): replica_set.assert_reaches_phase( Phase.Failed, @@ -380,7 +217,6 @@ def test_replicaset_is_failed(replica_set: MongoDB): ) -@mark.e2e_mongodb_custom_roles def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): replica_set["spec"]["security"]["roleRefs"] = None replica_set.update() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-x509-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodb-x509-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-central-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-central-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-cluster.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-cluster.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-dr.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-dr.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-pvc-resize.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-pvc-resize.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-split-horizon.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-split-horizon.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi-sts-override.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi-sts-override.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/mongodb-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/mongodbmulticluster-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-group.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-group.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/mongodb-multi-m2m-user.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/mongodbmulticluster-multi-m2m-user.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/oidc/oidc-user-multi.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/oidc/oidc-user-multi.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml similarity index 100% rename from docker/mongodb-kubernetes-tests/tests/multicluster/fixtures/split-horizon-node-ports/split-horizon-node-port.yaml rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/fixtures/split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py similarity index 91% rename from docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py index 0cb42037d..111d90c1e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -19,14 +19,14 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture from tests.multicluster.conftest import cluster_spec_list +from ..shared import manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-rs" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" @fixture(scope="module") @@ -39,7 +39,7 @@ def cert_additional_domains() -> list[str]: @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names: List[str]) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["persistent"] = False # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) @@ -130,7 +130,7 @@ def server_certs( def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) def test_create_mongodb_multi( @@ -141,4 +141,11 @@ def test_create_mongodb_multi( member_cluster_clients: List[MultiClusterClient], member_cluster_names: List[str], ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py index 277f3fc9f..0a4131db8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_clusterwide_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_clusterwide_replicaset.py @@ -4,9 +4,6 @@ import pytest from kubetester import ( create_or_update_configmap, - create_or_update_secret, - read_configmap, - read_secret, ) from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version @@ -14,14 +11,12 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_2_cluster_clusterwide_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +MDB_RESOURCE = "multi-replica-set" @pytest.fixture(scope="module") @@ -41,7 +36,7 @@ def mongodb_multi_a_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -58,7 +53,7 @@ def mongodb_multi_b_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set", mdbb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) @@ -161,16 +156,12 @@ def mongodb_multi_b( return resource -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -180,34 +171,23 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_namespaces( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + mdbb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -215,24 +195,12 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - skip_central_cluster=False, + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -240,35 +208,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.reload() - mongodb_multi_a["spec"]["authentication"] = ( - { - "agents": {"mode": "SCRAM"}, - "enabled": True, - "modes": ["SCRAM"], - }, - ) + testhelper.test_enable_mongodb_multi_nsa_auth(mongodb_multi_a) -@pytest.mark.e2e_multi_cluster_2_clusters_clusterwide +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_clusterwide def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py index fd3d273aa..3198d8cce 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_2_cluster_replicaset.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_2_cluster_replicaset.py @@ -7,12 +7,11 @@ from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list -from .conftest import cluster_spec_list +from ..shared import multi_2_cluster_replicaset as testhelper CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" @@ -23,7 +22,7 @@ def mongodb_multi_unmarshalled( namespace: str, member_cluster_names: List[str], custom_mdb_version: str ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1]) resource.set_version(ensure_ent_version(custom_mdb_version)) return resource @@ -66,42 +65,30 @@ def mongodb_multi( return resource.create() -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): - clients = cluster_clients + testhelper.test_create_kube_config_file(cluster_clients, member_cluster_names) - assert len(clients) == 2 - assert member_cluster_names[0] in clients - assert member_cluster_names[1] in clients - -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_statefulset_is_created_across_multiple_clusters( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - cluster_one_client = member_cluster_clients[0] - cluster_one_sts = statefulsets[cluster_one_client.cluster_name] - assert cluster_one_sts.status.ready_replicas == 2 - - cluster_two_client = member_cluster_clients[1] - cluster_two_sts = statefulsets[cluster_two_client.cluster_name] - assert cluster_two_sts.status.ready_replicas == 1 + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) @skip_if_local -@pytest.mark.e2e_multi_cluster_2_clusters_replica_set +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_2_clusters_replica_set def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py new file mode 100644 index 000000000..b267cc19f --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_agent_flags.py @@ -0,0 +1,54 @@ +from typing import List + +import kubernetes +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_agent_flags as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # override agent startup flags + resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} + resource["spec"]["agent"]["logLevel"] = "DEBUG" + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(multi_cluster_operator, mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_multi_replicaset_has_agent_flags( + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_multi_replicaset_has_agent_flags(namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_agent_flags +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py new file mode 100644 index 000000000..207495ce9 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_automated_disaster_recovery.py @@ -0,0 +1,104 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_automated_disaster_recovery as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_mongodb_multi_leaves_running_state( + mongodb_multi: MongoDBMulti, +): + testhelper.test_mongodb_multi_leaves_running_state(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_replica_reaches_running(mongodb_multi: MongoDBMulti): + testhelper.test_replica_reaches_running(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +@mark.e2e_mongodbmulticluster_multi_cluster_multi_disaster_recovery +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): + testhelper.test_number_numbers_in_ac(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_disaster_recovery +def test_sts_count_in_member_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_sts_count_in_member_cluster(mongodb_multi, member_cluster_names, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py index 981df49d4..3d7ce4bf6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore.py @@ -1,17 +1,12 @@ -import datetime -import time from typing import Dict, List, Optional import kubernetes import kubernetes.client import pymongo import pytest -from kubernetes import client from kubetester import ( create_or_update_configmap, create_or_update_secret, - get_default_storage_class, - read_service, try_load, ) from kubetester.certs import create_ops_manager_tls_certs @@ -25,20 +20,14 @@ from kubetester.omtester import OMTester from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import ( - assert_data_got_restored, - update_coredns_hosts, wait_for_primary, ) -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} +from ..shared import multi_cluster_backup_restore as testhelper MONGODB_PORT = 30000 - - -HEAD_PATH = "/head/" OPLOG_RS_NAME = "my-mongodb-oplog" BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" USER_PASSWORD = "/qwerty@!#:" @@ -67,18 +56,6 @@ def ops_manager_certs( ) -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - def new_om_data_store( mdb: MongoDB, id: str, @@ -139,7 +116,7 @@ def oplog_replica_set( name=OPLOG_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="development", mdb_name=OPLOG_RS_NAME, @@ -172,7 +149,7 @@ def blockstore_replica_set( name=BLOCKSTORE_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="blockstore", mdb_name=BLOCKSTORE_RS_NAME, @@ -241,12 +218,12 @@ def oplog_user( yield resource.update() -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore class TestOpsManagerCreation: """ name: Ops Manager successful creation with backup and oplog stores enabled @@ -259,47 +236,23 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) -@mark.e2e_multi_cluster_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore class TestBackupDatabasesAdded: """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to running state""" @@ -309,33 +262,16 @@ def test_backup_mdbs_created( oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -395,7 +331,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -429,88 +365,39 @@ def mongodb_multi_one( return resource.update() - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_setup_om_connection( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], ): - """ - The base_url makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, ops_manager, central_cluster_client, member_cluster_clients ) - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - backup_completion_time = project_one.get_latest_backup_completion_time() - print("\nbackup_completion_time: {}".format(backup_completion_time)) - - pit_millis = backup_completion_time + 1500 - - print(f"Restoring back to: {pit_millis}") + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection, mdb_client) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py similarity index 67% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py index 41707aa04..2f34a04e5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_backup_restore_no_mesh.py @@ -1,9 +1,7 @@ # This test sets up ops manager in a multicluster "no-mesh" environment. # It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. -import datetime -import time -from typing import List, Optional, Tuple +from typing import List, Tuple import kubernetes import kubernetes.client @@ -12,8 +10,6 @@ from kubetester import ( create_or_update_configmap, create_or_update_secret, - get_default_storage_class, - read_service, try_load, ) from kubetester.certs import create_ops_manager_tls_certs @@ -27,14 +23,11 @@ from kubetester.omtester import OMTester from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager -from kubetester.phase import Phase from pytest import fixture, mark -from tests.conftest import assert_data_got_restored, update_coredns_hosts -TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} +from ..shared import multi_cluster_backup_restore_no_mesh as testhelper MONGODB_PORT = 30000 -HEAD_PATH = "/head/" OPLOG_RS_NAME = "my-mongodb-oplog" BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" USER_PASSWORD = "/qwerty@!#:" @@ -63,33 +56,6 @@ def ops_manager_certs( ) -def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): - name = f"{mdb_name}-config" - data = { - "baseUrl": om.om_status().get_url(), - "projectName": project_name, - "sslMMSCAConfigMap": custom_ca, - "orgId": "", - } - - create_or_update_configmap(om.namespace, name, data, client) - - -def new_om_data_store( - mdb: MongoDB, - id: str, - assignment_enabled: bool = True, - user_name: Optional[str] = None, - password: Optional[str] = None, -) -> dict: - return { - "id": id, - "uri": mdb.mongo_uri(user_name=user_name, password=password), - "ssl": mdb.is_tls_enabled(), - "assignmentEnabled": assignment_enabled, - } - - @fixture(scope="module") def ops_manager( namespace: str, @@ -135,7 +101,7 @@ def oplog_replica_set( name=OPLOG_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="development", mdb_name=OPLOG_RS_NAME, @@ -168,7 +134,7 @@ def blockstore_replica_set( name=BLOCKSTORE_RS_NAME, ) - create_project_config_map( + testhelper.create_project_config_map( om=ops_manager, project_name="blockstore", mdb_name=BLOCKSTORE_RS_NAME, @@ -297,114 +263,58 @@ def disable_istio( return None -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_update_coredns( replica_set_external_hosts: List[Tuple[str, str]], cluster_clients: dict[str, kubernetes.client.ApiClient], ): - """ - This test updates the coredns config in the member clusters to allow connecting to the other replica set members - through an external address. - """ - for cluster_name, cluster_api in cluster_clients.items(): - update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + testhelper.test_update_coredns(replica_set_external_hosts, cluster_clients) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestOpsManagerCreation: - """ - name: Ops Manager successful creation with backup and oplog stores enabled - description: | - Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state - eventually as it will wait for oplog db to be created - """ - def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() - ops_manager["spec"]["backup"]["members"] = 1 - - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Pending, - msg_regexp="The MongoDB object .+ doesn't exist", - timeout=1800, - ) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_daemon_statefulset( self, ops_manager: MongoDBOpsManager, ): - def stateful_set_becomes_ready(): - stateful_set = ops_manager.read_backup_statefulset() - return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 - - KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) - - stateful_set = ops_manager.read_backup_statefulset() - # pod template has volume mount request - assert (HEAD_PATH, "head") in ( - (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts - ) + testhelper.TestOpsManagerCreation.test_daemon_statefulset(self, ops_manager) def test_backup_daemon_services_created( self, namespace, central_cluster_client: kubernetes.client.ApiClient, ): - """Backup creates two additional services for queryable backup""" - services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items - - backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] - - assert len(backup_services) >= 3 + testhelper.TestOpsManagerCreation.test_backup_daemon_services_created(self, namespace, central_cluster_client) -@mark.e2e_multi_cluster_backup_restore_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh class TestBackupDatabasesAdded: - """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to - running state""" - def test_backup_mdbs_created( self, oplog_replica_set: MongoDB, blockstore_replica_set: MongoDB, ): - """Creates mongodb databases all at once""" - oplog_replica_set.assert_reaches_phase(Phase.Running) - blockstore_replica_set.assert_reaches_phase(Phase.Running) + testhelper.TestBackupDatabasesAdded.test_backup_mdbs_created(self, oplog_replica_set, blockstore_replica_set) def test_oplog_user_created(self, oplog_user: MongoDBUser): - oplog_user.assert_reaches_phase(Phase.Updated) + testhelper.TestBackupDatabasesAdded.test_oplog_user_created(self, oplog_user) def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): - """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" - ops_manager.backup_status().assert_reaches_phase( - Phase.Failed, - msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " - "must be specified using 'mongodbUserRef'", - ) + testhelper.TestBackupDatabasesAdded.test_om_failed_oplog_no_user_ref(self, ops_manager) def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): - ops_manager.load() - ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} - ops_manager.update() - - ops_manager.backup_status().assert_reaches_phase( - Phase.Running, - timeout=200, - ignore_errors=True, - ) - - assert ops_manager.backup_status().get_message() is None + testhelper.TestBackupDatabasesAdded.test_fix_om(self, ops_manager, oplog_user) class TestBackupForMongodb: @@ -469,7 +379,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -574,7 +484,7 @@ def mongodb_multi_one( return resource.update() - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_setup_om_connection( self, replica_set_external_hosts: List[Tuple[str, str]], @@ -582,96 +492,35 @@ def test_setup_om_connection( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], ): - """ - test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. - """ - ops_manager.load() - external_svc_name = ops_manager.external_svc_name() - svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) - # we have no hostName, but the ip is resolvable. - ip = svc.status.load_balancer.ingress[0].ip - - interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" - - # let's make sure that every client can connect to OM. - hosts = replica_set_external_hosts[:] - hosts.append((ip, interconnected_field)) - - for c in member_cluster_clients: - update_coredns_hosts( - host_mappings=hosts, - api_client=c.api_client, - cluster_name=c.cluster_name, - ) - - # let's make sure that the operator can connect to OM via that given address. - update_coredns_hosts( - host_mappings=[(ip, interconnected_field)], - api_client=central_cluster_client, - cluster_name="central-cluster", + testhelper.TestBackupForMongodb.test_setup_om_connection( + self, replica_set_external_hosts, ops_manager, central_cluster_client, member_cluster_clients ) - new_address = f"https://{interconnected_field}:8443" - # updating the central url app setting to point at the external address, - # this allows agents in other clusters to communicate correctly with this OM instance. - ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address - ops_manager.update() - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @skip_if_local - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_add_test_data(self, mongodb_multi_one_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_one_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) + + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) - project_one.create_restore_job_pit(pit_millis) - - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti): - # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status - # right away. - # But the agent might still do work on the cluster, so we need to wait for that to happen. - mongodb_multi_one.assert_reaches_phase(Phase.Pending) - mongodb_multi_one.assert_reaches_phase(Phase.Running) + testhelper.TestBackupForMongodb.test_mdb_ready(self, mongodb_multi_one) - @mark.e2e_multi_cluster_backup_restore_no_mesh + @mark.e2e_mongodbmulticluster_multi_cluster_backup_restore_no_mesh def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py index 16067e7ca..b4dbc3e97 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_cli_recover.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_cli_recover.py @@ -7,14 +7,10 @@ from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase -from tests.conftest import ( - run_kube_config_creation_tool, - run_multi_cluster_recovery_tool, -) -from tests.constants import MULTI_CLUSTER_OPERATOR_NAME from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_cli_recover as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -27,7 +23,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -64,74 +60,43 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], namespace: str, ): - run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) - # deploy the operator without the final cluster - operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) - operator.assert_is_running() + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_add_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_add_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() + testhelper.test_mongodb_multi_recovers_adding_cluster(mongodb_multi, member_cluster_names) - mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) - -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, central_cluster_client: kubernetes.client.ApiClient, ): - return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) - assert return_code == 0 - operator = Operator( - name=MULTI_CLUSTER_OPERATOR_NAME, - namespace=namespace, - api_client=central_cluster_client, - ) - operator._wait_for_operator_ready() - operator.assert_is_running() + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) -@pytest.mark.e2e_multi_cluster_recover +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_recover def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): - mongodb_multi.load() - - last_transition_time = mongodb_multi.get_status_last_transition_time() - - mongodb_multi["spec"]["clusterSpecList"].pop(0) - mongodb_multi.update() - mongodb_multi.assert_state_transition_happens(last_transition_time) - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py index bda9be3c2..d173b70d4 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_clusterwide.py @@ -1,25 +1,23 @@ import os -import time from typing import Dict, List import kubernetes from kubernetes import client -from kubetester import create_or_update_configmap, create_or_update_secret, read_secret -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import ( _install_multi_cluster_operator, run_kube_config_creation_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import cluster_spec_list -from ..constants import MULTI_CLUSTER_OPERATOR_NAME -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_namespace +from ..shared import multi_cluster_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" @fixture(scope="module") @@ -44,7 +42,7 @@ def mongodb_multi_a( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -61,7 +59,7 @@ def mongodb_multi_b( member_cluster_names: List[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) resource.set_version(custom_mdb_version) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -75,7 +73,7 @@ def unmanaged_mongodb_multi( unmanaged_mdb_ns: str, member_cluster_names: List[str], ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", unmanaged_mdb_ns) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, unmanaged_mdb_ns) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -114,7 +112,7 @@ def install_operator( ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_namespaces( namespace: str, mdba_ns: str, @@ -125,38 +123,19 @@ def test_create_namespaces( evergreen_task_id: str, multi_cluster_operator_installation_config: Dict[str, str], ): - image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] - image_pull_secret_data = read_secret(namespace, image_pull_secret_name) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, + testhelper.test_create_namespaces( + namespace, mdba_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( - central_cluster_client, - member_cluster_clients, - evergreen_task_id, mdbb_ns, - image_pull_secret_name, - image_pull_secret_data, - ) - - create_namespace( + unmanaged_mdb_ns, central_cluster_client, member_cluster_clients, evergreen_task_id, - unmanaged_mdb_ns, - image_pull_secret_name, - image_pull_secret_data, + multi_cluster_operator_installation_config, ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -164,32 +143,22 @@ def test_prepare_namespace( mdba_ns: str, mdbb_ns: str, ): - prepare_multi_cluster_namespaces( - mdba_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, - ) - - prepare_multi_cluster_namespaces( - mdbb_ns, - multi_cluster_operator_installation_config, - member_cluster_clients, - central_cluster_name, + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns ) -@mark.e2e_multi_cluster_clusterwide +@mark.e2e_mongodbmulticluster_multi_cluster_clusterwide def test_deploy_operator(multi_cluster_operator_clustermode: Operator): - multi_cluster_operator_clustermode.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator_clustermode) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_deploy_operator(install_operator: Operator): - install_operator.assert_is_running() + testhelper.test_deploy_operator(install_operator) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -197,35 +166,21 @@ def test_copy_configmap_and_secret_across_ns( mdba_ns: str, mdbb_ns: str, ): - data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) - data["projectName"] = mdba_ns - create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) - - data["projectName"] = mdbb_ns - create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) - - data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) - create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) - create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + testhelper.test_copy_configmap_and_secret_across_ns( + namespace, central_cluster_client, multi_cluster_operator_installation_config, mdba_ns, mdbb_ns + ) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti): - mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsa(mongodb_multi_a) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti): - mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_create_mongodb_multi_nsb(mongodb_multi_b) -@mark.e2e_multi_cluster_specific_namespaces +@mark.e2e_mongodbmulticluster_multi_cluster_specific_namespaces def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti): - """ - For an unmanaged resource, the status should not be updated! - """ - for i in range(10): - time.sleep(5) - - unmanaged_mongodb_multi.reload() - assert "status" not in unmanaged_mongodb_multi + testhelper.test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py new file mode 100644 index 000000000..863499e1e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_dr_connect.py @@ -0,0 +1,69 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + +from ..shared import multi_cluster_dr_connect as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping +# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_mongodbmulticluster_multi_cluster_dr local=true +@pytest.fixture(scope="module") +def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi-dr.yaml"), MDB_RESOURCE, namespace) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + # return resource.load() + return resource.create() + + +@pytest.fixture(scope="module") +def mongodb_multi_collection(mongodb_multi: MongoDBMulti): + collection = mongodb_multi.tester().client["testdb"] + return collection["testcollection"] + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_kube_config_file(cluster_clients: Dict): + testhelper.test_create_kube_config_file(cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +@pytest.mark.flaky(reruns=100, reruns_delay=6) +def test_add_test_data(mongodb_multi_collection): + testhelper.test_add_test_data(mongodb_multi_collection) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_delete_member_3_cluster(): + testhelper.test_delete_member_3_cluster() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable_after_deletetion(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_dr +def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): + testhelper.test_add_test_data_after_deletion(mongodb_multi_collection, capsys) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py similarity index 64% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py index 86ae862d7..ef31eaff6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_enable_tls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_enable_tls.py @@ -1,29 +1,25 @@ from typing import List import kubernetes -from kubetester import read_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_enable_tls as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) return resource @@ -57,17 +53,17 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_create_mongodb_multi(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi(mongodb_multi, namespace) -@mark.e2e_multi_cluster_enable_tls +@mark.e2e_mongodbmulticluster_multi_cluster_enable_tls def test_enabled_tls_mongodb_multi( mongodb_multi: MongoDBMulti, namespace: str, @@ -75,20 +71,6 @@ def test_enabled_tls_mongodb_multi( multi_cluster_issuer_ca_configmap: str, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.load() - mongodb_multi["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) - - # assert the presence of the generated pem certificates in each member cluster - for client in member_cluster_clients: - read_secret( - namespace=namespace, - name=BUNDLE_PEM_SECRET_NAME, - api_client=client.api_client, - ) + testhelper.test_enabled_tls_mongodb_multi( + mongodb_multi, namespace, server_certs, multi_cluster_issuer_ca_configmap, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py index af460bbbe..bdd3f69fa 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap.py @@ -1,29 +1,25 @@ -import time from typing import Dict, List import kubernetes -from kubetester import create_secret, wait_until -from kubetester.automation_config_tester import AutomationConfigTester +from kubetester import create_secret from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, Role, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.conftest import get_multi_cluster_operator_installation_config from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") @@ -39,7 +35,7 @@ def mongodb_multi_unmarshalled( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which # cause MDB process to exit. It might be a good idea to try uncommenting it after migrating to newer EVG hosts. @@ -168,179 +164,86 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic - recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). - """ - mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + testhelper.test_mongodb_multi_pending(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. - Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. - """ - - def wait_for_ac_exists() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - _ = ac["ldap"]["transportSecurity"] - _ = ac["version"] - return True - except KeyError: - return False - - wait_until(wait_for_ac_exists, timeout=200) - current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] - - def wait_for_ac_pushed() -> bool: - ac = mongodb_multi.get_automation_config_tester().automation_config - try: - transport_security = ac["ldap"]["transportSecurity"] - new_version = ac["version"] - if transport_security != "none": - return False - if new_version <= current_version: - return False - return True - except KeyError: - return False - - wait_until(wait_for_ac_pushed, timeout=500) - - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource.update() + testhelper.test_turn_tls_on_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti): - """ - This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet - goes into running state. - """ - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + testhelper.test_multi_replicaset_CLOUDP_229222(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti): - """ - This function restores the initial desired security configuration to carry on with the next tests normally. - """ - resource = mongodb_multi.load() - - resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] - resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" - resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" - - resource.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_restore_mongodb_multi_ldap_configuration(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ldap_user_created_and_can_authenticate(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_ldap_user_created_and_can_authenticate(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - expected_roles = { - ("admin", "clusterAdmin"), - ("admin", "readWriteAnyDatabase"), - ("admin", "dbAdminAnyDatabase"), - } - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_expected_users(1) - ac.assert_has_user(user_ldap["spec"]["username"]) - ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) - ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) - ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) - - assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] - assert "timeoutMS" in ac.automation_config["ldap"] - assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 - assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + testhelper.test_ops_manager_state_correctly_updated(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_ldap_agent(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_names): - mongodb_multi.reload() - mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_names) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_new_ldap_user_can_authenticate_after_scaling( mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str ): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - attempts=10, - ) + testhelper.test_new_ldap_user_can_authenticate_after_scaling(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_disable_agent_auth(mongodb_multi: MongoDBMulti): - mongodb_multi.reload() - mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False - mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_disable_agent_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_mongodb_multi_connectivity_with_no_auth(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_deployment_reachable() + testhelper.test_deployment_is_reachable_with_no_auth(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py index 6f472e6f2..89f1937f8 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_ldap_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_ldap_custom_roles.py @@ -2,31 +2,27 @@ import kubernetes from kubetester import create_secret -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_static_containers -from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM, LDAPUser, OpenLDAP +from kubetester.ldap import LDAPUser, OpenLDAP from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser, generic_user from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_ldap_custom_roles as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-replica-set-ldap" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "mms-user-1" -PASSWORD = "my-password" -LDAP_NAME = "openldap" @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) # This test has always been tested with 5.0.5-ent. After trying to unify its variant and upgrading it # to MDB 6 we realized that our EVG hosts contain outdated docker and seccomp libraries in the host which @@ -151,84 +147,44 @@ def user_ldap( @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_create_mongodb_multi_with_ldap(mongodb_multi) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_create_ldap_user(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser): - user_ldap.assert_reaches_phase(Phase.Updated) - ac = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) - ac.assert_expected_users(1) + testhelper.test_create_ldap_user(mongodb_multi, user_ldap) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_collection(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo", - collection="foo2", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_collection(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_ldap_user_can_write_to_other_database(mongodb_multi: MongoDBMulti, user_ldap: MongoDBUser, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_ldap_authentication( - username=user_ldap["spec"]["username"], - password=user_ldap.password, - tls_ca_file=ca_path, - db="foo2", - collection="foo", - attempts=10, - ) + testhelper.test_ldap_user_can_write_to_other_database(mongodb_multi, user_ldap, ca_path) @skip_if_static_containers -@mark.e2e_multi_cluster_with_ldap_custom_roles +@mark.e2e_mongodbmulticluster_multi_cluster_with_ldap_custom_roles def test_automation_config_has_roles(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - role = { - "role": "cn=users,ou=groups,dc=example,dc=org", - "db": "admin", - "privileges": [ - {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, - { - "actions": ["insert", "find"], - "resource": {"collection": "", "db": "admin"}, - }, - ], - "authenticationRestrictions": [], - } - tester.assert_expected_role(role_index=0, expected_value=role) + testhelper.test_automation_config_has_roles(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py index ef8a2c582..ccaae8106 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_group.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_group.py @@ -2,15 +2,14 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase -from kubetester.mongodb_multi import MongoDBMulti, MultiClusterClient -from kubetester.mongotester import ReplicaSetTester +from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_group as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -21,7 +20,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-group.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-group.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -38,21 +39,16 @@ def mongodb_multi( return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_group +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_group class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(0) - tester.assert_authoritative_set(True) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py similarity index 65% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py index 3faa266f4..c5fc0b9bd 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_oidc_m2m_user.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_oidc_m2m_user.py @@ -2,16 +2,15 @@ import kubetester.oidc as oidc import pytest from kubetester import try_load -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB, Phase from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser -from kubetester.mongotester import ReplicaSetTester from kubetester.operator import Operator from pytest import fixture +from ..shared import multi_cluster_oidc_m2m_user as testhelper + MDB_RESOURCE = "oidc-multi-replica-set" @@ -22,7 +21,9 @@ def mongodb_multi( member_cluster_names, custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("oidc/mongodb-multi-m2m-user.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("oidc/mongodbmulticluster-multi-m2m-user.yaml"), MDB_RESOURCE, namespace + ) if try_load(resource): return resource @@ -49,24 +50,19 @@ def oidc_user(namespace) -> MongoDBUser: return resource.update() -@pytest.mark.e2e_multi_cluster_oidc_m2m_user +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_oidc_m2m_user class TestOIDCMultiCluster(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.TestOIDCMultiCluster.test_deploy_operator(self, multi_cluster_operator) def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_oidc_replica_set(self, mongodb_multi) def test_create_user(self, oidc_user: MongoDBUser): - oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + testhelper.TestOIDCMultiCluster.test_create_user(self, oidc_user) def test_assert_connectivity(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_oidc_authentication() + testhelper.TestOIDCMultiCluster.test_assert_connectivity(self, mongodb_multi) def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti): - tester = mongodb_multi.get_automation_config_tester() - tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) - tester.assert_authentication_enabled(2) - tester.assert_expected_users(1) - tester.assert_authoritative_set(True) + testhelper.TestOIDCMultiCluster.test_ops_manager_state_updated_correctly(self, mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py new file mode 100644 index 000000000..b88229e62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_pvc_resize.py @@ -0,0 +1,55 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_pvc_resize as testhelper + +RESOURCE_NAME = "multi-replica-set-pvc-resize" + + +@pytest.fixture(scope="module") +def mongodb_multi( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace + ) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + try_load(resource) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_resize_pvc_state_changes(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_pvc_resize +def test_mongodb_multi_resize_finished( + mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_mongodb_multi_resize_finished(mongodb_multi, namespace, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py new file mode 100644 index 000000000..aec36d2cb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_clusterwide.py @@ -0,0 +1,236 @@ +import os +from typing import Dict, List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.conftest import ( + _install_multi_cluster_operator, + run_kube_config_creation_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME +from tests.multicluster.conftest import ( + cluster_spec_list, +) + +from ..shared import multi_cluster_recover_clusterwide as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@fixture(scope="module") +def mdba_ns(namespace: str): + return "{}-mdb-ns-a".format(namespace) + + +@fixture(scope="module") +def mdbb_ns(namespace: str): + return "{}-mdb-ns-b".format(namespace) + + +@fixture(scope="module") +def mongodb_multi_a( + central_cluster_client: kubernetes.client.ApiClient, + mdba_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdba_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def mongodb_multi_b( + central_cluster_client: kubernetes.client.ApiClient, + mdbb_ns: str, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, mdbb_ns) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@fixture(scope="module") +def install_operator( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: Dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], + mdba_ns: str, + mdbb_ns: str, +) -> Operator: + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + member_cluster_namespaces = mdba_ns + "," + mdbb_ns + run_kube_config_creation_tool( + member_cluster_names, + namespace, + namespace, + member_cluster_names, + True, + service_account_name=MULTI_CLUSTER_OPERATOR_NAME, + operator_name=OPERATOR_NAME, + ) + + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + "operator.createOperatorServiceAccount": "false", + "operator.watchNamespace": member_cluster_namespaces, + "multiCluster.performFailOver": "false", + }, + central_cluster_name, + operator_name=MULTI_CLUSTER_OPERATOR_NAME, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_label_operator_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + testhelper.test_create_namespaces( + namespace, + mdba_ns, + mdbb_ns, + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + multi_cluster_operator_installation_config, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_cluster_role_and_binding( + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_cluster_role_and_binding(central_cluster_client, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_deploy_operator(install_operator: Operator): + testhelper.test_deploy_operator(install_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_prepare_namespace( + multi_cluster_operator_installation_config, member_cluster_clients, central_cluster_name, mdba_ns, mdbb_ns + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + mdba_ns: str, + mdbb_ns: str, +): + testhelper.test_copy_configmap_and_secret_across_ns(namespace, central_cluster_client, mdba_ns, mdbb_ns) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): + testhelper.test_create_mongodb_multi_nsa_nsb(mongodb_multi_a, mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a: MongoDBMulti, + mongodb_multi_b: MongoDBMulti, + mdba_ns: str, + mdbb_ns: str, + member_cluster_names: list[str], + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_database_statefulsets_in_failed_cluster( + mongodb_multi_a, mongodb_multi_b, mdba_ns, mdbb_ns, member_cluster_names, member_cluster_clients + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster( + member_cluster_names, namespace, mdba_ns, mdbb_ns, central_cluster_client + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): + testhelper.test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_clusterwide +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): + testhelper.test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py new file mode 100644 index 000000000..e4d2f322a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_recover_network_partition.py @@ -0,0 +1,92 @@ +from typing import List + +import kubernetes +from kubeobject import CustomObject +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_recover_network_partition as testhelper + +RESOURCE_NAME = "multi-replica-set" + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource.api = client.CustomObjectsApi(central_cluster_client) + + return resource + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): + testhelper.test_label_namespace(namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_service_entry(service_entries: List[CustomObject]): + testhelper.test_create_service_entry(service_entries) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): + testhelper.test_deploy_operator(multi_cluster_operator_manual_remediation) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_update_service_entry_block_failed_cluster_traffic( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], +): + testhelper.test_update_service_entry_block_failed_cluster_traffic( + namespace, central_cluster_client, member_cluster_names + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti, + member_cluster_names: list[str], +): + testhelper.test_delete_database_statefulset_in_failed_cluster(mongodb_multi, member_cluster_names) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_enters_failed_state( + mongodb_multi: MongoDBMulti, + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_mongodb_multi_enters_failed_state(mongodb_multi, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: client.ApiClient, +): + testhelper.test_recover_operator_remove_cluster(member_cluster_names, namespace, central_cluster_client) + + +@mark.e2e_mongodbmulticluster_multi_cluster_recover_network_partition +def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): + testhelper.test_mongodb_multi_recovers_removing_cluster(mongodb_multi, member_cluster_names) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py new file mode 100644 index 000000000..607250b58 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set.py @@ -0,0 +1,138 @@ +from typing import Dict, List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.conftest import ( + setup_log_rotate_for_agents, +) +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set as testhelper + +MONGODB_PORT = 30000 +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-central-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + additional_mongod_config = { + "systemLog": {"logAppend": True, "verbosity": 4}, + "operationProfiling": {"mode": "slowOp"}, + "net": {"port": MONGODB_PORT}, + } + + resource["spec"]["additionalMongodConfig"] = additional_mongod_config + setup_log_rotate_for_agents(resource) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.set_architecture_annotation() + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulset_is_created_across_multiple_clusters(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_pvc_not_created( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_pvc_not_created(mongodb_multi, member_cluster_clients, namespace) + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_is_reachable(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_headless_service_creation( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_headless_service_creation(mongodb_multi, namespace, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_update_additional_options(mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_options_were_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_delete_member_cluster_sts( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_delete_member_cluster_sts(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set +def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_cleanup_on_mdbm_delete(mongodb_multi, member_cluster_clients) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py new file mode 100644 index 000000000..760edc3ad --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_deletion.py @@ -0,0 +1,63 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_deletion as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + + if try_load(resource): + return resource + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): + testhelper.test_automation_config_has_been_updated(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_delete_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_deployment_has_been_removed_from_automation_config(): + testhelper.test_deployment_has_been_removed_from_automation_config() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_deletion +def test_kubernetes_resources_have_been_cleaned_up( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_kubernetes_resources_have_been_cleaned_up(mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py similarity index 56% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py index 6178377ea..363eada59 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_ignore_unknown_users.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users.py @@ -1,13 +1,14 @@ import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_ignore_unknown_users as testhelper + +MDB_RESOURCE = "multi-replica-set" + @fixture(scope="module") def mongodb_multi( @@ -18,8 +19,8 @@ def mongodb_multi( ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, namespace, ) resource.set_version(custom_mdb_version) @@ -34,26 +35,21 @@ def mongodb_multi( return resource.update() -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_replica_set(multi_cluster_operator, mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_false(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(False) + testhelper.test_authoritative_set_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti): - mongodb_multi.load() - mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + testhelper.test_set_ignore_unknown_users_false(mongodb_multi) -@mark.e2e_multi_cluster_replica_set_ignore_unknown_users +@mark.e2e_mongodbmulticluster_multi_cluster_replica_set_ignore_unknown_users def test_authoritative_set_true(mongodb_multi: MongoDBMulti): - tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - tester.assert_authoritative_set(True) + testhelper.test_authoritative_set_true(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py new file mode 100644 index 000000000..c7e29d666 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_member_options.py @@ -0,0 +1,125 @@ +from typing import Dict + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_member_options as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + member_options = [ + [ + { + "votes": 1, + "priority": "0.3", + "tags": { + "cluster": "cluster-1", + "region": "weur", + }, + }, + { + "votes": 1, + "priority": "0.7", + "tags": { + "cluster": "cluster-1", + "region": "eeur", + }, + }, + ], + [ + { + "votes": 1, + "priority": "0.2", + "tags": { + "cluster": "cluster-2", + "region": "apac", + }, + }, + ], + [ + { + "votes": 1, + "priority": "1.3", + "tags": { + "cluster": "cluster-3", + "region": "nwus", + }, + }, + { + "votes": 1, + "priority": "2.7", + "tags": { + "cluster": "cluster-3", + "region": "seus", + }, + }, + ], + ] + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): + testhelper.test_create_kube_config_file(cluster_clients, central_cluster_name, member_cluster_names) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_member_options_ac(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_update_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_member_votes_to_0(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_recover_valid_member_options(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_member_options +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): + testhelper.test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py similarity index 53% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py index 5f43629d0..fc52981d5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_migration.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_migration.py @@ -4,9 +4,7 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import assert_statefulset_architecture from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import get_default_architecture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.multicluster_client import MultiClusterClient @@ -14,6 +12,8 @@ from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_replica_set_migration as testhelper + MDBM_RESOURCE = "multi-replica-set-migration" @@ -25,7 +25,7 @@ def mongodb_multi( custom_mdb_version, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource["spec"]["version"] = custom_mdb_version resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -46,46 +46,28 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_create_mongodb_multi_running(mongodb_multi) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_migrate_architecture(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - """ - If the E2E is running with default architecture as non-static, - then the test will migrate to static and vice versa. - """ - original_default_architecture = get_default_architecture() - target_architecture = "non-static" if original_default_architecture == "static" else "static" - - mongodb_multi.trigger_architecture_migration() - - mongodb_multi.load() - assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture - - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) - - statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) - for statefulset in statefulsets.values(): - assert_statefulset_architecture(statefulset, target_architecture) + testhelper.test_migrate_architecture(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_replica_set_migration +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_migration def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py new file mode 100644 index 000000000..960ced828 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_down.py @@ -0,0 +1,112 @@ +from typing import List + +import kubernetes +import pytest +from kubetester import try_load +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_down as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # start at one member in each cluster + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + if try_load(mongodb_multi_unmarshalled): + return mongodb_multi_unmarshalled + + return mongodb_multi_unmarshalled.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_down +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py new file mode 100644 index 000000000..cebe85d8e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_scale_up.py @@ -0,0 +1,115 @@ +from typing import List + +import kubernetes +import kubetester +import pytest +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_scale_up as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: List[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + # we have created certs for all 5 members, but want to start at only 3. + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 + mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_replica_set_scale_up +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py new file mode 100644 index 000000000..2f593dc62 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_replica_set_test_mtls.py @@ -0,0 +1,86 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_replica_set_test_mtls as testhelper + +MDB_RESOURCE = "multi-replica-set" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + # TODO: incorporate this into the base class. + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + resource.update() + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_mongo_pod_in_separate_namespace( + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + namespace: str, +): + testhelper.test_create_mongo_pod_in_separate_namespace(member_cluster_clients, evergreen_task_id, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_fails_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_fails_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_enable_istio_injection( + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_enable_istio_injection(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_delete_existing_mongo_pod(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): + testhelper.test_create_pod_with_istio_sidecar(member_cluster_clients, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_mtls_test +def test_connectivity_succeeds_from_second_namespace( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_connectivity_succeeds_from_second_namespace(mongodb_multi, member_cluster_clients, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py new file mode 100644 index 000000000..f7f3e9620 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_down_cluster.py @@ -0,0 +1,106 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_down_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_scale_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_statefulsets_have_been_scaled_down_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_scaled_down_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_down_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py similarity index 60% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py index 3acc73dff..b5146c652 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster.py @@ -8,20 +8,18 @@ random_k8s_name, read_configmap, try_load, - wait_until, ) -from kubetester.automation_config_tester import AutomationConfigTester from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_scale_up_cluster as testhelper + RESOURCE_NAME = "multi-replica-set" BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" @@ -54,7 +52,7 @@ def mongodb_multi_unmarshalled( member_cluster_names: list[str], custom_mdb_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) resource.set_version(custom_mdb_version) # ensure certs are created for the members during scale up resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [3, 1, 2]) @@ -97,97 +95,68 @@ def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) - return mongodb_multi_unmarshalled -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.test_create_mongodb_multi(mongodb_multi) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_created_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - # read all statefulsets except the last one - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(3) + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - mongodb_multi["spec"]["clusterSpecList"].append( - {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} - ) - mongodb_multi.update() - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + testhelper.test_scale_mongodb_multi(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_statefulsets_have_been_scaled_up_correctly( mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient], ): - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + testhelper.test_statefulsets_have_been_scaled_up_correctly(mongodb_multi, member_cluster_clients) -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): - ac = AutomationConfigTester() - ac.assert_processes_size(5) + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() @skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) # From here on, the tests are for verifying that we can change the project of the MongoDBMulti resource even with # non-sequential member ids in the replicaset. -@pytest.mark.e2e_multi_cluster_scale_up_cluster +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): def test_scale_up_first_cluster( self, mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] ): - # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. - # multi-replica-set-0-0 : 0 - # multi-replica-set-0-1 : 1 - # multi-replica-set-0-2 : 5 - # multi-replica-set-1-0 : 2 - # multi-replica-set-2-0 : 3 - # multi-replica-set-2-1 : 4 - - mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 - mongodb_multi.update() - - mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_scale_up_first_cluster( + self, mongodb_multi, member_cluster_clients + ) def test_change_project(self, mongodb_multi: MongoDBMulti, new_project_configmap: str): - oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap - mongodb_multi.update() - - mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) - mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) - - newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) - - # Assert that the replica set member ids have not changed after changing the project. - assert oldRsMembers == newRsMembers + testhelper.TestNonSequentialMemberIdsInReplicaSet.test_change_project( + self, mongodb_multi, new_project_configmap + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py new file mode 100644 index 000000000..1fd805bbf --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster.py @@ -0,0 +1,130 @@ +from typing import Callable, List + +import kubernetes +import pytest +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scale_up_cluster_new_cluster as testhelper + +RESOURCE_NAME = "multi-replica-set" +BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" + + +@pytest.fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + multi_cluster_issuer_ca_configmap: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), RESOURCE_NAME, namespace) + resource.set_version(custom_mdb_version) + # ensure certs are created for the members during scale up + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource["spec"]["security"] = { + "certsSecretPrefix": "prefix", + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource + + +@pytest.fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@pytest.fixture(scope="module") +def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() + return mongodb_multi_unmarshalled.create() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_before_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_before_scaling() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): + testhelper.test_delete_deployment(namespace, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_re_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + testhelper.test_re_deploy_operator(install_multi_cluster_operator_set_members_fn, member_cluster_names, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_add_new_cluster_to_mongodb_multi_resource( + mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] +): + testhelper.test_add_new_cluster_to_mongodb_multi_resource(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_statefulsets_have_been_created_correctly_after_cluster_addition( + mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_ops_manager_has_been_updated_correctly_after_scaling(): + testhelper.test_ops_manager_has_been_updated_correctly_after_scaling() + + +@skip_if_local +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scale_up_cluster_new_cluster +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_is_reachable(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py new file mode 100644 index 000000000..d9941c298 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_scram.py @@ -0,0 +1,144 @@ +from typing import List + +import kubernetes +import pytest +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_scram as testhelper + +MDB_RESOURCE = "multi-replica-set-scram" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@pytest.fixture(scope="function") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names, + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + + resource["spec"]["security"] = { + "authentication": { + "agents": {"mode": "MONGODB-CR"}, + "enabled": True, + "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], + } + } + + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.fixture(scope="function") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return resource + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi_with_scram(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_reaches_updated( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, +): + testhelper.test_user_reaches_updated(central_cluster_client, mongodb_user) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity_using_user_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_change_password_and_check_connectivity( + namespace: str, + mongodb_multi: MongoDBMulti, + central_cluster_client: kubernetes.client.ApiClient, +): + testhelper.test_change_password_and_check_connectivity(namespace, mongodb_multi, central_cluster_client) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): + testhelper.test_user_cannot_authenticate_with_old_password(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_connection_string_secret_was_created( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_connection_string_secret_was_created(namespace, mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_om_configured_correctly(): + testhelper.test_om_configured_correctly() + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): + testhelper.test_replica_set_connectivity(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients + ) + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py index c32b8c38a..7d8be4df1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_split_horizon.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_split_horizon.py @@ -1,30 +1,25 @@ from typing import List import kubernetes -import yaml from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti -from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark +from ..shared import multi_cluster_split_horizon as testhelper + CERT_SECRET_PREFIX = "clustercert" MDB_RESOURCE = "multi-cluster-replica-set" BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -USER_NAME = "my-user-1" -PASSWORD_SECRET_NAME = "mms-user-1-password" -USER_PASSWORD = "my-password" # This test will set up an environment which will configure a resource with split horizon enabled. # Steps to run this test. # 1. Change the nodenames under "additional_domains" -# 2. Run this test with: `make e2e test=e2e_multi_cluster_split_horizon light=true local=true`. +# 2. Run this test with: `make e2e test=e2e_mongodbmulticluster_multi_cluster_split_horizon light=true local=true`. # 3. Wait for the test to pass (this means the environment is set up.) # 4. Exec into any database pod and note the contents of the files referenced by the fields # * net.tls.certificateKeyFile @@ -55,7 +50,9 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-split-horizon.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-split-horizon.yaml"), MDB_RESOURCE, namespace + ) return resource @@ -102,48 +99,25 @@ def mongodb_multi( return resource.create() -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_deploy_mongodb_multi_with_tls( mongodb_multi: MongoDBMulti, namespace: str, ): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace) -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_create_node_ports(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): - for mcc in member_cluster_clients: - with open( - yaml_fixture(f"split-horizon-node-ports/split-horizon-node-port.yaml"), - "r", - ) as f: - service_body = yaml.safe_load(f.read()) - - # configure labels and selectors - service_body["metadata"]["labels"][ - "mongodbmulticluster" - ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" - service_body["metadata"]["labels"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - service_body["spec"]["selector"][ - "statefulset.kubernetes.io/pod-name" - ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" - - KubernetesTester.create_service( - mongodb_multi.namespace, - body=service_body, - api_client=mcc.api_client, - ) + testhelper.test_create_node_ports(mongodb_multi, member_cluster_clients) @skip_if_local -@mark.e2e_multi_cluster_split_horizon +@mark.e2e_mongodbmulticluster_multi_cluster_split_horizon def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): - tester = mongodb_multi.tester() - tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + testhelper.test_tls_connectivity(mongodb_multi, ca_path) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py new file mode 100644 index 000000000..1d21aff50 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_sts_override.py @@ -0,0 +1,59 @@ +from typing import List + +import kubernetes +import pytest +from kubernetes import client +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator + +from ..shared import multi_cluster_sts_override as testhelper + +MDB_RESOURCE = "multi-replica-set-sts-override" + + +@pytest.fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml( + yaml_fixture("mongodbmulticluster-multi-sts-override.yaml"), + MDB_RESOURCE, + namespace, + ) + resource.set_version(custom_mdb_version) + + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.update() + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): + testhelper.test_create_mongodb_multi(mongodb_multi) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): + testhelper.test_statefulset_overrides(mongodb_multi, member_cluster_clients) + + +@pytest.mark.e2e_mongodbmulticluster_multi_sts_override +def test_access_modes_pvc( + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + namespace: str, +): + testhelper.test_access_modes_pvc(mongodb_multi, member_cluster_clients, namespace) + + +def assert_container_in_sts(container_name: str, sts: client.V1StatefulSet): + testhelper.assert_container_in_sts(container_name, sts) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py new file mode 100644 index 000000000..0747f020d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_no_mesh.py @@ -0,0 +1,198 @@ +from typing import List + +import kubernetes +from kubernetes import client +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_no_mesh as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, member_cluster_names: List[str], custom_mdb_version: str +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(custom_mdb_version) + resource["spec"]["persistent"] = False + # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. + resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) + + resource["spec"]["externalAccess"] = {} + resource["spec"]["clusterSpecList"][0]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-1.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing0", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][1]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-2.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing1", + "port": 27019, + }, + ], + } + }, + } + resource["spec"]["clusterSpecList"][2]["externalAccess"] = { + "externalDomain": "kind-e2e-cluster-3.interconnected", + "externalService": { + "spec": { + "type": "LoadBalancer", + "publishNotReadyAddresses": False, + "ports": [ + { + "name": "mongodb", + "port": 27017, + }, + { + "name": "backup", + "port": 27018, + }, + { + "name": "testing2", + "port": 27019, + }, + ], + } + }, + } + + return resource + + +@fixture(scope="module") +def disable_istio( + multi_cluster_operator: Operator, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + for mcc in member_cluster_clients: + api = client.CoreV1Api(api_client=mcc.api_client) + labels = {"istio-injection": "disabled"} + ns = api.read_namespace(name=namespace) + ns.metadata.labels.update(labels) + api.replace_namespace(name=namespace, body=ns) + return None + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + disable_istio, + namespace: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + mongodb_multi_unmarshalled["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + + return mongodb_multi_unmarshalled.update() + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): + testhelper.test_update_coredns(cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + testhelper.test_create_mongodb_multi( + mongodb_multi, + namespace, + server_certs, + multi_cluster_issuer_ca_configmap, + member_cluster_clients, + member_cluster_names, + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_service_overrides( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_service_overrides(namespace, mongodb_multi, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_no_mesh +def test_placeholders_in_external_services( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], +): + testhelper.test_placeholders_in_external_services(namespace, mongodb_multi, member_cluster_clients) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py new file mode 100644 index 000000000..43ac7a25c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_scram.py @@ -0,0 +1,175 @@ +from typing import List + +import kubernetes +from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs +from kubetester.kubetester import ensure_ent_version +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.kubetester import skip_if_local +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + +from ..shared import multi_cluster_tls_with_scram as testhelper + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" +USER_NAME = "my-user-1" +USER_RESOURCE = "multi-replica-set-scram-user" +PASSWORD_SECRET_NAME = "mms-user-1-password" + + +@fixture(scope="module") +def mongodb_multi_unmarshalled( + namespace: str, + member_cluster_names: list[str], + custom_mdb_version: str, +) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) + resource.set_version(ensure_ent_version(custom_mdb_version)) + resource["spec"]["clusterSpecList"] = cluster_spec_list( + member_cluster_names=member_cluster_names, members=[2, 1, 2] + ) + + return resource + + +@fixture(scope="module") +def server_certs( + multi_cluster_issuer: str, + mongodb_multi_unmarshalled: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + central_cluster_client: kubernetes.client.ApiClient, +): + + return create_multi_cluster_mongodb_tls_certs( + multi_cluster_issuer, + BUNDLE_SECRET_NAME, + member_cluster_clients, + central_cluster_client, + mongodb_multi_unmarshalled, + ) + + +@fixture(scope="module") +def mongodb_multi( + central_cluster_client: kubernetes.client.ApiClient, + server_certs: str, + mongodb_multi_unmarshalled: MongoDBMulti, + multi_cluster_issuer_ca_configmap: str, +) -> MongoDBMulti: + + resource = mongodb_multi_unmarshalled + resource["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@fixture(scope="module") +def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: + resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) + + resource["spec"]["username"] = USER_NAME + resource["spec"]["passwordSecretKeyRef"] = { + "name": PASSWORD_SECRET_NAME, + "key": "password", + } + resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE + resource["spec"]["mongodbResourceRef"]["namespace"] = namespace + resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) + return resource.create() + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_operator(multi_cluster_operator: Operator): + testhelper.test_deploy_operator(multi_cluster_operator) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti, + namespace: str, + member_cluster_clients: List[MultiClusterClient], +): + + testhelper.test_deploy_mongodb_multi_with_tls(mongodb_multi, namespace, member_cluster_clients) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_update_mongodb_multi_tls_with_scram( + mongodb_multi: MongoDBMulti, + namespace: str, +): + + testhelper.test_update_mongodb_multi_tls_with_scram(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_create_mongodb_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_user: MongoDBUser, + namespace: str, +): + testhelper.test_create_mongodb_user(central_cluster_client, mongodb_user, namespace) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_tls_connectivity(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): + testhelper.test_replica_set_connectivity_with_scram_and_tls(mongodb_multi, ca_path) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@skip_if_local +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_replica_set_connectivity_from_connection_string_standard_srv( + namespace: str, + mongodb_multi: MongoDBMulti, + member_cluster_clients: List[MultiClusterClient], + ca_path: str, +): + testhelper.test_replica_set_connectivity_from_connection_string_standard_srv( + namespace, mongodb_multi, member_cluster_clients, ca_path + ) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_enable_x509( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_enable_x509(mongodb_multi, namespace) + + +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_scram +def test_mongodb_multi_tls_automation_config_was_updated( + mongodb_multi: MongoDBMulti, + namespace: str, +): + testhelper.test_mongodb_multi_tls_automation_config_was_updated(mongodb_multi, namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py index c0c421b3f..57d3a4866 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_x509.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_tls_with_x509.py @@ -1,24 +1,22 @@ -import tempfile from typing import List import kubernetes -from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert from kubetester.certs_mongodb_multi import ( create_multi_cluster_mongodb_x509_tls_certs, create_multi_cluster_x509_agent_certs, ) -from kubetester.kubetester import KubernetesTester, ensure_ent_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import skip_if_local from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator -from kubetester.phase import Phase from pytest import fixture, mark from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_tls_with_x509 as testhelper + # TODO This test needs to re-introduce certificate rotation and enabling authentication step by step # See https://jira.mongodb.org/browse/CLOUDP-311366 @@ -31,7 +29,7 @@ @fixture(scope="module") def mongodb_multi_unmarshalled(namespace: str, member_cluster_names, custom_mdb_version: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) @@ -131,35 +129,32 @@ def mongodb_x509_user(central_cluster_client: kubernetes.client.ApiClient, names return resource -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti, namespace: str): - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + testhelper.test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi, namespace) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti): - ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) - ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) - ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") - ac_tester.assert_internal_cluster_authentication_enabled() + testhelper.test_ops_manager_state_was_updated_correctly(mongodb_multi) -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_create_mongodb_x509_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_x509_user: MongoDBUser, namespace: str, ): - mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + testhelper.test_create_mongodb_x509_user(central_cluster_client, mongodb_x509_user, namespace) @skip_if_local -@mark.e2e_multi_cluster_tls_with_x509 +@mark.e2e_mongodbmulticluster_multi_cluster_tls_with_x509 def test_x509_user_connectivity( mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient, @@ -167,23 +162,12 @@ def test_x509_user_connectivity( namespace: str, ca_path: str, ): - with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: - create_multi_cluster_x509_user_cert( - multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name - ) - tester = mongodb_multi.tester() - tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + testhelper.test_x509_user_connectivity( + mongodb_multi, central_cluster_client, multi_cluster_issuer, namespace, ca_path + ) # TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms # keeps the resources reachable and in Running state. def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): - cert = Certificate(name=certificate_name, namespace=namespace) - cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) - cert.load() - cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate - cert.update() - # FIXME the assertions below need to be replaced with a robust check that the agents are ready - # and the TLS certificates are rotated. - mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) + testhelper.assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py index 4aba05a9c..947d72785 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_upgrade_downgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_upgrade_downgrade.py @@ -2,14 +2,15 @@ import pymongo import pytest from kubetester import try_load -from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import MongoDBBackgroundTester from kubetester.operator import Operator -from kubetester.phase import Phase from tests.multicluster.conftest import cluster_spec_list +from ..shared import multi_cluster_upgrade_downgrade as testhelper + MDBM_RESOURCE = "multi-replica-set-upgrade" @@ -21,7 +22,7 @@ def mongodb_multi( custom_mdb_prev_version: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDBM_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(yaml_fixture("mongodbmulticluster-multi.yaml"), MDBM_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_prev_version)) resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -42,60 +43,43 @@ def mdb_health_checker(mongodb_multi: MongoDBMulti) -> MongoDBBackgroundTester: ) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_deploy_operator(multi_cluster_operator: Operator): - multi_cluster_operator.assert_is_running() + testhelper.test_deploy_operator(multi_cluster_operator) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.update() - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_create_mongodb_multi_running(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): - mdb_health_checker.start() + testhelper.test_start_background_checker(mdb_health_checker) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_upgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str, custom_mdb_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + testhelper.test_mongodb_multi_upgrade(mongodb_multi, custom_mdb_prev_version, custom_mdb_version) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) - -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_upgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti, custom_mdb_prev_version: str): - mongodb_multi.load() - mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) - mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) - mongodb_multi.update() - - mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) - mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + testhelper.test_mongodb_multi_downgrade(mongodb_multi, custom_mdb_prev_version) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti): - tester = mongodb_multi.tester() - tester.assert_connectivity() + testhelper.test_downgraded_replica_set_is_reachable(mongodb_multi) -@pytest.mark.e2e_multi_cluster_upgrade_downgrade +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_upgrade_downgrade def test_mdb_healthy_throughout_change_version( mdb_health_checker: MongoDBBackgroundTester, ): - mdb_health_checker.assert_healthiness() + testhelper.test_mdb_healthy_throughout_change_version(mdb_health_checker) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py new file mode 100644 index 000000000..bba02ec46 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/mongodbmulticluster/mongodbmulticluster_multi_cluster_validation.py @@ -0,0 +1,22 @@ +import kubernetes +import pytest +from kubetester.kubetester import KubernetesTester +from kubetester.operator import Operator +from tests.multicluster.shared import multi_cluster_validation as testhelper + +MDBM_RESOURCE = "mongodbmulticluster-multi-cluster.yaml" + + +@pytest.mark.e2e_mongodbmulticluster_multi_cluster_validation +class TestWebhookValidation(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + testhelper.TestWebhookValidation.test_deploy_operator(self, multi_cluster_operator) + + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_unique_cluster_names(self, central_cluster_client, MDBM_RESOURCE) + + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_only_one_schema(self, central_cluster_client, MDBM_RESOURCE) + + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): + testhelper.TestWebhookValidation.test_non_empty_clusterspec_list(self, central_cluster_client, MDBM_RESOURCE) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py index 10d0064f6..5e4dc5e6e 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_reconcile_races.py @@ -73,7 +73,7 @@ def get_replica_set(ops_manager, namespace: str, idx: int) -> MongoDB: def get_mdbmc(ops_manager, namespace: str, idx: int) -> MongoDBMulti: name = f"mdb-{idx}-mc" resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), namespace=namespace, name=name, ).configure(ops_manager, name, api_client=get_central_cluster_client()) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py new file mode 100644 index 000000000..3679de73d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/manual_multi_cluster_tls_no_mesh_2_clusters_eks_gke.py @@ -0,0 +1,22 @@ +from typing import List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py new file mode 100644 index 000000000..a786f9951 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_clusterwide_replicaset.py @@ -0,0 +1,121 @@ +from typing import Dict, List + +import kubernetes +from kubetester import ( + create_or_update_configmap, + create_or_update_secret, + read_configmap, + read_secret, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name, api_client=central_cluster_client) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + skip_central_cluster=False, + ) + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_enable_mongodb_multi_nsa_auth(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.reload() + mongodb_multi_a["spec"]["authentication"] = ( + { + "agents": {"mode": "SCRAM"}, + "enabled": True, + "modes": ["SCRAM"], + }, + ) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py new file mode 100644 index 000000000..402c69947 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_2_cluster_replicaset.py @@ -0,0 +1,43 @@ +from typing import Dict, List + +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_create_kube_config_file(cluster_clients: Dict, member_cluster_names: List[str]): + clients = cluster_clients + + assert len(clients) == 2 + assert member_cluster_names[0] in clients + assert member_cluster_names[1] in clients + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_statefulset_is_created_across_multiple_clusters( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + cluster_one_client = member_cluster_clients[0] + cluster_one_sts = statefulsets[cluster_one_client.cluster_name] + assert cluster_one_sts.status.ready_replicas == 2 + + cluster_two_client = member_cluster_clients[1] + cluster_two_sts = statefulsets[cluster_two_client.cluster_name] + assert cluster_two_sts.status.ready_replicas == 1 + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py index 1a552f39c..5b887aa53 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_agent_flags.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_agent_flags.py @@ -1,43 +1,19 @@ from typing import List -import kubernetes from kubetester import client from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders -from tests.multicluster.conftest import cluster_spec_list -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # override agent startup flags - resource["spec"]["agent"] = {"startupOptions": {"logFile": "/var/log/mongodb-mms-automation/customLogFile"}} - resource["spec"]["agent"]["logLevel"] = "DEBUG" - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@mark.e2e_multi_cluster_agent_flags -def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_agent_flags def test_multi_replicaset_has_agent_flags( namespace: str, member_cluster_clients: List[MultiClusterClient], @@ -58,10 +34,9 @@ def test_multi_replicaset_has_agent_flags( assert result != "0" -@mark.e2e_multi_cluster_agent_flags def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py index 57928b907..c7511d639 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_automated_disaster_recovery.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_automated_disaster_recovery.py @@ -5,39 +5,20 @@ from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import get_member_cluster_api_client - -from .conftest import cluster_spec_list, create_service_entries_objects +from tests.multicluster.conftest import ( + create_service_entries_objects, +) FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@mark.e2e_multi_cluster_disaster_recovery def test_label_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -48,26 +29,20 @@ def test_label_namespace(namespace: str, central_cluster_client: kubernetes.clie api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_disaster_recovery def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_disaster_recovery def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -82,16 +57,16 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_disaster_recovery def test_mongodb_multi_leaves_running_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, ): mongodb_multi.load() mongodb_multi.assert_abandons_phase(Phase.Running, timeout=300) -@mark.e2e_multi_cluster_disaster_recovery -def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: list[str]): +def test_delete_database_statefulset_in_failed_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str] +): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) sts_name = f"{mongodb_multi.name}-{failed_cluster_idx}" try: @@ -115,22 +90,17 @@ def test_delete_database_statefulset_in_failed_cluster(mongodb_multi: MongoDBMul ) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@mark.e2e_multi_cluster_disaster_recovery -def test_replica_reaches_running(mongodb_multi: MongoDBMulti): +def test_replica_reaches_running(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_disaster_recovery -@mark.e2e_multi_cluster_multi_disaster_recovery -def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): +def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) desiredmembers = 0 for c in mongodb_multi["spec"]["clusterSpecList"]: @@ -140,9 +110,8 @@ def test_number_numbers_in_ac(mongodb_multi: MongoDBMulti): assert len(processes) == desiredmembers -@mark.e2e_multi_cluster_disaster_recovery def test_sts_count_in_member_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], member_cluster_clients: List[MultiClusterClient], ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py new file mode 100644 index 000000000..d118e2b66 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore.py @@ -0,0 +1,219 @@ +import datetime +import time +from typing import List + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import ( + assert_data_got_restored, + update_coredns_hosts, +) + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +MONGODB_PORT = 30000 + +HEAD_PATH = "/head/" +OPLOG_RS_NAME = "my-mongodb-oplog" +BLOCKSTORE_RS_NAME = "my-mongodb-blockstore" +USER_PASSWORD = "/qwerty@!#:" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + def test_setup_om_connection( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + The base_url makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1200) + + def test_add_test_data(self, mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + backup_completion_time = project_one.get_latest_backup_completion_time() + print("\nbackup_completion_time: {}".format(backup_completion_time)) + + pit_millis = backup_completion_time + 1500 + + print(f"Restoring back to: {pit_millis}") + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(self, mongodb_multi_one_collection, mdb_client): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py new file mode 100644 index 000000000..68bbaa38d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_backup_restore_no_mesh.py @@ -0,0 +1,258 @@ +# This test sets up ops manager in a multicluster "no-mesh" environment. +# It tests the back-up functionality with a multi-cluster replica-set when the replica-set is deployed outside of a service-mesh context. + +import datetime +import time +from typing import List, Optional, Tuple + +import kubernetes +import kubernetes.client +from kubernetes import client +from kubetester import ( + create_or_update_configmap, + get_default_storage_class, + read_service, +) +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.multicluster_client import MultiClusterClient +from kubetester.omtester import OMTester +from kubetester.operator import Operator +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.conftest import assert_data_got_restored, update_coredns_hosts + +TEST_DATA = {"_id": "unique_id", "name": "John", "address": "Highway 37", "age": 30} + +HEAD_PATH = "/head/" + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +def new_om_data_store( + mdb: MongoDB, + id: str, + assignment_enabled: bool = True, + user_name: Optional[str] = None, + password: Optional[str] = None, +) -> dict: + return { + "id": id, + "uri": mdb.mongo_uri(user_name=user_name, password=password), + "ssl": mdb.is_tls_enabled(), + "assignmentEnabled": assignment_enabled, + } + + +def test_update_coredns( + replica_set_external_hosts: List[Tuple[str, str]], + cluster_clients: dict[str, kubernetes.client.ApiClient], +): + """ + This test updates the coredns config in the member clusters to allow connecting to the other replica set members + through an external address. + """ + for cluster_name, cluster_api in cluster_clients.items(): + update_coredns_hosts(replica_set_external_hosts, cluster_name, api_client=cluster_api) + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. The OM is expected to get to 'Pending' state + eventually as it will wait for oplog db to be created + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["headDB"]["storageClass"] = get_default_storage_class() + ops_manager["spec"]["backup"]["members"] = 1 + + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Pending, + msg_regexp="The MongoDB object .+ doesn't exist", + timeout=1800, + ) + + def test_daemon_statefulset( + self, + ops_manager: MongoDBOpsManager, + ): + def stateful_set_becomes_ready(): + stateful_set = ops_manager.read_backup_statefulset() + return stateful_set.status.ready_replicas == 1 and stateful_set.status.current_replicas == 1 + + KubernetesTester.wait_until(stateful_set_becomes_ready, timeout=300) + + stateful_set = ops_manager.read_backup_statefulset() + # pod template has volume mount request + assert (HEAD_PATH, "head") in ( + (mount.mount_path, mount.name) for mount in stateful_set.spec.template.spec.containers[0].volume_mounts + ) + + def test_backup_daemon_services_created( + self, + namespace, + central_cluster_client: kubernetes.client.ApiClient, + ): + """Backup creates two additional services for queryable backup""" + services = client.CoreV1Api(api_client=central_cluster_client).list_namespaced_service(namespace).items + + backup_services = [s for s in services if s.metadata.name.startswith("om-backup")] + + assert len(backup_services) >= 3 + + +class TestBackupDatabasesAdded: + """name: Creates mongodb resources for oplog and blockstore and waits until OM resource gets to + running state""" + + def test_backup_mdbs_created( + self, + oplog_replica_set: MongoDB, + blockstore_replica_set: MongoDB, + ): + """Creates mongodb databases all at once""" + oplog_replica_set.assert_reaches_phase(Phase.Running) + blockstore_replica_set.assert_reaches_phase(Phase.Running) + + def test_oplog_user_created(self, oplog_user: MongoDBUser): + oplog_user.assert_reaches_phase(Phase.Updated) + + def test_om_failed_oplog_no_user_ref(self, ops_manager: MongoDBOpsManager): + """Waits until Backup is in failed state as blockstore doesn't have reference to the user""" + ops_manager.backup_status().assert_reaches_phase( + Phase.Failed, + msg_regexp=".*is configured to use SCRAM-SHA authentication mode, the user " + "must be specified using 'mongodbUserRef'", + ) + + def test_fix_om(self, ops_manager: MongoDBOpsManager, oplog_user: MongoDBUser): + ops_manager.load() + ops_manager["spec"]["backup"]["opLogStores"][0]["mongodbUserRef"] = {"name": oplog_user.name} + ops_manager.update() + + ops_manager.backup_status().assert_reaches_phase( + Phase.Running, + timeout=200, + ignore_errors=True, + ) + + assert ops_manager.backup_status().get_message() is None + + +class TestBackupForMongodb: + + def test_setup_om_connection( + self, + replica_set_external_hosts: List[Tuple[str, str]], + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + ): + """ + test_setup_om_connection makes OM accessible from member clusters via a special interconnected dns address. + """ + ops_manager.load() + external_svc_name = ops_manager.external_svc_name() + svc = read_service(ops_manager.namespace, external_svc_name, api_client=central_cluster_client) + # we have no hostName, but the ip is resolvable. + ip = svc.status.load_balancer.ingress[0].ip + + interconnected_field = f"om-backup.{ops_manager.namespace}.interconnected" + + # let's make sure that every client can connect to OM. + hosts = replica_set_external_hosts[:] + hosts.append((ip, interconnected_field)) + + for c in member_cluster_clients: + update_coredns_hosts( + host_mappings=hosts, + api_client=c.api_client, + cluster_name=c.cluster_name, + ) + + # let's make sure that the operator can connect to OM via that given address. + update_coredns_hosts( + host_mappings=[(ip, interconnected_field)], + api_client=central_cluster_client, + cluster_name="central-cluster", + ) + + new_address = f"https://{interconnected_field}:8443" + # updating the central url app setting to point at the external address, + # this allows agents in other clusters to communicate correctly with this OM instance. + ops_manager["spec"]["configuration"]["mms.centralUrl"] = new_address + ops_manager.update() + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1500) + + def test_add_test_data(self, mongodb_multi_one_collection): + max_attempts = 100 + while max_attempts > 0: + try: + mongodb_multi_one_collection.insert_one(TEST_DATA) + return + except Exception as e: + print(e) + max_attempts -= 1 + time.sleep(6) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_mdb_ready(self, mongodb_multi_one: MongoDBMulti | MongoDB): + # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status + # right away. + # But the agent might still do work on the cluster, so we need to wait for that to happen. + mongodb_multi_one.assert_reaches_phase(Phase.Pending) + mongodb_multi_one.assert_reaches_phase(Phase.Running) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=900) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py new file mode 100644 index 000000000..5116d323d --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_cli_recover.py @@ -0,0 +1,81 @@ +from typing import Callable, List + +import kubernetes +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.conftest import ( + run_kube_config_creation_tool, + run_multi_cluster_recovery_tool, +) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME + + +def test_deploy_operator( + install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], + member_cluster_names: List[str], + namespace: str, +): + run_kube_config_creation_tool(member_cluster_names[:-1], namespace, namespace, member_cluster_names) + # deploy the operator without the final cluster + operator = install_multi_cluster_operator_set_members_fn(member_cluster_names[:-1]) + operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_add_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names, namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_adding_cluster(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str]): + mongodb_multi.load() + + mongodb_multi["spec"]["clusterSpecList"].append({"clusterName": member_cluster_names[-1], "members": 2}) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_recover_operator_remove_cluster( + member_cluster_names: List[str], + namespace: str, + central_cluster_client: kubernetes.client.ApiClient, +): + return_code = run_multi_cluster_recovery_tool(member_cluster_names[1:], namespace, namespace) + assert return_code == 0 + operator = Operator( + name=MULTI_CLUSTER_OPERATOR_NAME, + namespace=namespace, + api_client=central_cluster_client, + ) + operator._wait_for_operator_ready() + operator.assert_is_running() + + +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): + mongodb_multi.load() + + last_transition_time = mongodb_multi.get_status_last_transition_time() + + mongodb_multi["spec"]["clusterSpecList"].pop(0) + mongodb_multi.update() + mongodb_multi.assert_state_transition_happens(last_transition_time) + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py new file mode 100644 index 000000000..3ab1c88b1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_clusterwide.py @@ -0,0 +1,123 @@ +import time +from typing import Dict, List + +import kubernetes +from kubernetes import client +from kubetester import create_or_update_configmap, create_or_update_secret, read_secret +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import create_namespace + + +def test_create_namespaces( + namespace: str, + mdba_ns: str, + mdbb_ns: str, + unmanaged_mdb_ns: str, + central_cluster_client: kubernetes.client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + evergreen_task_id: str, + multi_cluster_operator_installation_config: Dict[str, str], +): + image_pull_secret_name = multi_cluster_operator_installation_config["registry.imagePullSecrets"] + image_pull_secret_data = read_secret(namespace, image_pull_secret_name) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdba_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + mdbb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + create_namespace( + central_cluster_client, + member_cluster_clients, + evergreen_task_id, + unmanaged_mdb_ns, + image_pull_secret_name, + image_pull_secret_data, + ) + + +def test_prepare_namespace( + multi_cluster_operator_installation_config: Dict[str, str], + member_cluster_clients: List[MultiClusterClient], + central_cluster_name: str, + mdba_ns: str, + mdbb_ns: str, +): + prepare_multi_cluster_namespaces( + mdba_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + prepare_multi_cluster_namespaces( + mdbb_ns, + multi_cluster_operator_installation_config, + member_cluster_clients, + central_cluster_name, + ) + + +def test_deploy_operator(multi_cluster_operator_clustermode: Operator): + multi_cluster_operator_clustermode.assert_is_running() + + +def test_deploy_operator(install_operator: Operator): + install_operator.assert_is_running() + + +def test_copy_configmap_and_secret_across_ns( + namespace: str, + central_cluster_client: client.ApiClient, + multi_cluster_operator_installation_config: Dict[str, str], + mdba_ns: str, + mdbb_ns: str, +): + data = KubernetesTester.read_configmap(namespace, "my-project", api_client=central_cluster_client) + data["projectName"] = mdba_ns + create_or_update_configmap(mdba_ns, "my-project", data, api_client=central_cluster_client) + + data["projectName"] = mdbb_ns + create_or_update_configmap(mdbb_ns, "my-project", data, api_client=central_cluster_client) + + data = read_secret(namespace, "my-credentials", api_client=central_cluster_client) + create_or_update_secret(mdba_ns, "my-credentials", data, api_client=central_cluster_client) + create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) + + +def test_create_mongodb_multi_nsa(mongodb_multi_a: MongoDBMulti | MongoDB): + mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_nsb(mongodb_multi_b: MongoDBMulti | MongoDB): + mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_mongodb_multi_unmanaged(unmanaged_mongodb_multi: MongoDBMulti | MongoDB): + """ + For an unmanaged resource, the status should not be updated! + """ + for i in range(10): + time.sleep(5) + + unmanaged_mongodb_multi.reload() + assert "status" not in unmanaged_mongodb_multi diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py index c2cc0d988..bb7d7d467 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_dr_connect.py @@ -2,9 +2,7 @@ import time from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase @@ -14,52 +12,28 @@ CLUSTER_TO_DELETE = "member-3a" -# this test is intended to run locally, using telepresence. Make sure to configure the cluster_context to api-server mapping -# in the "cluster_host_mapping" fixture before running it. It is intented to be run locally with the command: make e2e-telepresence test=e2e_multi_cluster_dr local=true -@pytest.fixture(scope="module") -def mongodb_multi(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-dr.yaml"), "multi-replica-set", namespace) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - # return resource.load() - return resource.create() - - -@pytest.fixture(scope="module") -def mongodb_multi_collection(mongodb_multi: MongoDBMulti): - collection = mongodb_multi.tester().client["testdb"] - return collection["testcollection"] - - -@pytest.mark.e2e_multi_cluster_dr def test_create_kube_config_file(cluster_clients: Dict): clients = cluster_clients assert len(clients) == 4 -@pytest.mark.e2e_multi_cluster_dr def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_dr -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr -@pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): mongodb_multi_collection.insert_one(TEST_DATA) -@pytest.mark.e2e_multi_cluster_dr def test_delete_member_3_cluster(): # delete 3rd cluster with gcloud command # gcloud container clusters delete member-3a --zone us-west1-a @@ -79,13 +53,11 @@ def test_delete_member_3_cluster(): ) -@pytest.mark.e2e_multi_cluster_dr -def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable_after_deletetion(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_dr def test_add_test_data_after_deletion(mongodb_multi_collection, capsys): max_attempts = 100 while max_attempts > 0: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py new file mode 100644 index 000000000..3f83581e8 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_enable_tls.py @@ -0,0 +1,46 @@ +from typing import List + +from kubetester import read_secret +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + +CERT_SECRET_PREFIX = "clustercert" +MDB_RESOURCE = "multi-cluster-replica-set" +BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_enabled_tls_mongodb_multi( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, + server_certs: str, + multi_cluster_issuer_ca_configmap: str, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.load() + mongodb_multi["spec"]["security"] = { + "certsSecretPrefix": CERT_SECRET_PREFIX, + "tls": { + "ca": multi_cluster_issuer_ca_configmap, + }, + } + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1300) + + # assert the presence of the generated pem certificates in each member cluster + for client in member_cluster_clients: + read_secret( + namespace=namespace, + name=BUNDLE_PEM_SECRET_NAME, + api_client=client.api_client, + ) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py new file mode 100644 index 000000000..edae362ed --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap.py @@ -0,0 +1,163 @@ +from kubetester import wait_until +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase +from tests.multicluster.conftest import cluster_spec_list + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_mongodb_multi_pending(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The resource needs to enter the "Pending" state and without the automatic + recovery, it would stay like this forever (since we wouldn't push the new AC with a fix). + """ + mongodb_multi.assert_reaches_phase(Phase.Pending, timeout=100) + + +def test_turn_tls_on_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The user attempts to fix the AutomationConfig. + Before updating the AutomationConfig, we need to ensure the operator pushed the wrong one to Ops Manager. + """ + + def wait_for_ac_exists() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + _ = ac["ldap"]["transportSecurity"] + _ = ac["version"] + return True + except KeyError: + return False + + wait_until(wait_for_ac_exists, timeout=200) + current_version = mongodb_multi.get_automation_config_tester().automation_config["version"] + + def wait_for_ac_pushed() -> bool: + ac = mongodb_multi.get_automation_config_tester().automation_config + try: + transport_security = ac["ldap"]["transportSecurity"] + new_version = ac["version"] + if transport_security != "none": + return False + if new_version <= current_version: + return False + return True + except KeyError: + return False + + wait_until(wait_for_ac_pushed, timeout=500) + + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource.update() + + +def test_multi_replicaset_CLOUDP_229222(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function tests CLOUDP-229222. The recovery mechanism kicks in and pushes Automation Config. The ReplicaSet + goes into running state. + """ + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1900) + + +def test_restore_mongodb_multi_ldap_configuration(mongodb_multi: MongoDBMulti | MongoDB): + """ + This function restores the initial desired security configuration to carry on with the next tests normally. + """ + resource = mongodb_multi.load() + + resource["spec"]["security"]["authentication"]["modes"] = ["LDAP"] + resource["spec"]["security"]["authentication"]["ldap"]["transportSecurity"] = "tls" + resource["spec"]["security"]["authentication"]["agents"]["mode"] = "LDAP" + + resource.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=True) + ac.assert_expected_users(1) + + +def test_ldap_user_created_and_can_authenticate( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_ops_manager_state_correctly_updated(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + expected_roles = { + ("admin", "clusterAdmin"), + ("admin", "readWriteAnyDatabase"), + ("admin", "dbAdminAnyDatabase"), + } + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_expected_users(1) + ac.assert_has_user(user_ldap["spec"]["username"]) + ac.assert_user_has_roles(user_ldap["spec"]["username"], expected_roles) + ac.assert_authentication_mechanism_enabled("PLAIN", active_auth_mechanism=True) + ac.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=1) + + assert "userCacheInvalidationInterval" in ac.automation_config["ldap"] + assert "timeoutMS" in ac.automation_config["ldap"] + assert ac.automation_config["ldap"]["userCacheInvalidationInterval"] == 60 + assert ac.automation_config["ldap"]["timeoutMS"] == 12345 + + +def test_deployment_is_reachable_with_ldap_agent(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names): + mongodb_multi.reload() + mongodb_multi["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_new_ldap_user_can_authenticate_after_scaling( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + attempts=10, + ) + + +def test_disable_agent_auth(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.reload() + mongodb_multi["spec"]["security"]["authentication"]["enabled"] = False + mongodb_multi["spec"]["security"]["authentication"]["agents"]["enabled"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_mongodb_multi_connectivity_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_deployment_is_reachable_with_no_auth(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_deployment_reachable() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py new file mode 100644 index 000000000..db3b9582c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_ldap_custom_roles.py @@ -0,0 +1,80 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.ldap import LDAP_AUTHENTICATION_MECHANISM +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_with_ldap(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_ldap_user(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser): + user_ldap.assert_reaches_phase(Phase.Updated) + ac = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac.assert_authentication_mechanism_enabled(LDAP_AUTHENTICATION_MECHANISM, active_auth_mechanism=False) + ac.assert_expected_users(1) + + +def test_ldap_user_can_write_to_database(mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_collection( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo", + collection="foo2", + attempts=10, + ) + + +def test_ldap_user_can_write_to_other_database( + mongodb_multi: MongoDBMulti | MongoDB, user_ldap: MongoDBUser, ca_path: str +): + tester = mongodb_multi.tester() + tester.assert_ldap_authentication( + username=user_ldap["spec"]["username"], + password=user_ldap.password, + tls_ca_file=ca_path, + db="foo2", + collection="foo", + attempts=10, + ) + + +def test_automation_config_has_roles(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + role = { + "role": "cn=users,ou=groups,dc=example,dc=org", + "db": "admin", + "privileges": [ + {"actions": ["insert"], "resource": {"collection": "foo", "db": "foo"}}, + { + "actions": ["insert", "find"], + "resource": {"collection": "", "db": "admin"}, + }, + ], + "authenticationRestrictions": [], + } + tester.assert_expected_role(role_index=0, expected_value=role) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py new file mode 100644 index 000000000..28620bb2c --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_group.py @@ -0,0 +1,23 @@ +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator + + +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(0) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py new file mode 100644 index 000000000..ee01979e0 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_oidc_m2m_user.py @@ -0,0 +1,27 @@ +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator + + +class TestOIDCMultiCluster(KubernetesTester): + def test_deploy_operator(self, multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + def test_create_oidc_replica_set(self, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + def test_create_user(self, oidc_user: MongoDBUser): + oidc_user.assert_reaches_phase(Phase.Updated, timeout=800) + + def test_assert_connectivity(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_oidc_authentication() + + def test_ops_manager_state_updated_correctly(self, mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.get_automation_config_tester() + tester.assert_authentication_mechanism_enabled("MONGODB-OIDC", active_auth_mechanism=False) + tester.assert_authentication_enabled(2) + tester.assert_expected_users(1) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py similarity index 57% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py index adc9cc668..2fdcb2b54 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_pvc_resize.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_pvc_resize.py @@ -1,50 +1,26 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester import get_statefulset, try_load -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb_multi import MongoDBMulti +from kubetester import get_statefulset +from kubetester.mongodb_multi import MongoDB, MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list RESOURCE_NAME = "multi-replica-set-pvc-resize" RESIZED_STORAGE_SIZE = "2Gi" -@pytest.fixture(scope="module") -def mongodb_multi( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi-pvc-resize.yaml"), RESOURCE_NAME, namespace) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - try_load(resource) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource - - -@pytest.mark.e2e_multi_cluster_pvc_resize def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_pvc_resize -def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti | MongoDB): # Update the resource mongodb_multi.load() mongodb_multi["spec"]["statefulSet"]["spec"]["volumeClaimTemplates"][0]["spec"]["resources"]["requests"][ @@ -55,9 +31,8 @@ def test_mongodb_multi_resize_pvc_state_changes(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_pvc_resize def test_mongodb_multi_resize_finished( - mongodb_multi: MongoDBMulti, namespace: str, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient] ): statefulsets = [] for i, c in enumerate(member_cluster_clients): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py similarity index 66% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py index 31d4fb5a3..87bc17ae6 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_clusterwide.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_clusterwide.py @@ -1,4 +1,3 @@ -import os from typing import Dict, List import kubernetes @@ -13,112 +12,26 @@ read_secret, statefulset_is_deleted, ) -from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import run_periodically +from kubetester.kubetester import KubernetesTester, run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( - _install_multi_cluster_operator, - run_kube_config_creation_tool, run_multi_cluster_recovery_tool, ) +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster import prepare_multi_cluster_namespaces +from tests.multicluster.conftest import ( + create_service_entries_objects, +) -from ..constants import MULTI_CLUSTER_OPERATOR_NAME, OPERATOR_NAME -from . import prepare_multi_cluster_namespaces -from .conftest import cluster_spec_list, create_service_entries_objects from .multi_cluster_clusterwide import create_namespace FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -@fixture(scope="module") -def mdba_ns(namespace: str): - return "{}-mdb-ns-a".format(namespace) - - -@fixture(scope="module") -def mdbb_ns(namespace: str): - return "{}-mdb-ns-b".format(namespace) - - -@fixture(scope="module") -def mongodb_multi_a( - central_cluster_client: kubernetes.client.ApiClient, - mdba_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdba_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def mongodb_multi_b( - central_cluster_client: kubernetes.client.ApiClient, - mdbb_ns: str, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", mdbb_ns) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@fixture(scope="module") -def install_operator( - namespace: str, - central_cluster_name: str, - multi_cluster_operator_installation_config: Dict[str, str], - central_cluster_client: client.ApiClient, - member_cluster_clients: List[MultiClusterClient], - member_cluster_names: List[str], - mdba_ns: str, - mdbb_ns: str, -) -> Operator: - os.environ["HELM_KUBECONTEXT"] = central_cluster_name - member_cluster_namespaces = mdba_ns + "," + mdbb_ns - run_kube_config_creation_tool( - member_cluster_names, - namespace, - namespace, - member_cluster_names, - True, - service_account_name=MULTI_CLUSTER_OPERATOR_NAME, - operator_name=OPERATOR_NAME, - ) - - return _install_multi_cluster_operator( - namespace, - multi_cluster_operator_installation_config, - central_cluster_client, - member_cluster_clients, - { - "operator.deployment_name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.name": MULTI_CLUSTER_OPERATOR_NAME, - "operator.createOperatorServiceAccount": "false", - "operator.watchNamespace": member_cluster_namespaces, - "multiCluster.performFailOver": "false", - }, - central_cluster_name, - operator_name=MULTI_CLUSTER_OPERATOR_NAME, - ) - - -@mark.e2e_multi_cluster_recover_clusterwide def test_label_operator_namespace(namespace: str, central_cluster_client: kubernetes.client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -129,7 +42,6 @@ def test_label_operator_namespace(namespace: str, central_cluster_client: kubern api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_namespaces( namespace: str, mdba_ns: str, @@ -161,13 +73,11 @@ def test_create_namespaces( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_cluster_role_and_binding( central_cluster_client: kubernetes.client.ApiClient, member_cluster_clients: List[MultiClusterClient], @@ -188,12 +98,10 @@ def test_delete_cluster_role_and_binding( delete_cluster_role_binding(name, client.api_client) -@mark.e2e_multi_cluster_recover_clusterwide def test_deploy_operator(install_operator: Operator): install_operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide def test_prepare_namespace( multi_cluster_operator_installation_config: Dict[str, str], member_cluster_clients: List[MultiClusterClient], @@ -216,7 +124,6 @@ def test_prepare_namespace( ) -@mark.e2e_multi_cluster_recover_clusterwide def test_copy_configmap_and_secret_across_ns( namespace: str, central_cluster_client: client.ApiClient, @@ -235,13 +142,11 @@ def test_copy_configmap_and_secret_across_ns( create_or_update_secret(mdbb_ns, "my-credentials", data, api_client=central_cluster_client) -@mark.e2e_multi_cluster_recover_clusterwide -def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti, mongodb_multi_b: MongoDBMulti): +def test_create_mongodb_multi_nsa_nsb(mongodb_multi_a: MongoDBMulti | MongoDB, mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) mongodb_multi_b.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -258,10 +163,9 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_clusterwide def test_delete_database_statefulsets_in_failed_cluster( - mongodb_multi_a: MongoDBMulti, - mongodb_multi_b: MongoDBMulti, + mongodb_multi_a: MongoDBMulti | MongoDB, + mongodb_multi_b: MongoDBMulti | MongoDB, mdba_ns: str, mdbb_ns: str, member_cluster_names: list[str], @@ -307,19 +211,16 @@ def test_delete_database_statefulsets_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_enters_failed_stated(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_enters_failed_stated(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_clusterwide def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -338,8 +239,7 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti): +def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMulti | MongoDB): mongodb_multi_a.load() mongodb_multi_a["metadata"]["annotations"]["failedClusters"] = None @@ -349,8 +249,7 @@ def test_mongodb_multi_nsa_recovers_removing_cluster(mongodb_multi_a: MongoDBMul mongodb_multi_a.assert_reaches_phase(Phase.Running, timeout=1500) -@mark.e2e_multi_cluster_recover_clusterwide -def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti): +def test_mongodb_multi_nsb_recovers_removing_cluster(mongodb_multi_b: MongoDBMulti | MongoDB): mongodb_multi_b.load() mongodb_multi_b["metadata"]["annotations"]["failedClusters"] = None diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py similarity index 71% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py index 28b910efb..8e25bf490 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_recover_network_partition.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_recover_network_partition.py @@ -4,41 +4,23 @@ from kubeobject import CustomObject from kubernetes import client from kubetester import delete_statefulset, statefulset_is_deleted -from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import run_periodically +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.conftest import ( get_member_cluster_api_client, run_multi_cluster_recovery_tool, ) - -from ..constants import MULTI_CLUSTER_OPERATOR_NAME -from .conftest import cluster_spec_list, create_service_entries_objects +from tests.constants import MULTI_CLUSTER_OPERATOR_NAME +from tests.multicluster.conftest import ( + create_service_entries_objects, +) FAILED_MEMBER_CLUSTER_NAME = "kind-e2e-cluster-3" -RESOURCE_NAME = "multi-replica-set" -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource.api = client.CustomObjectsApi(central_cluster_client) - - return resource - - -@mark.e2e_multi_cluster_recover_network_partition def test_label_namespace(namespace: str, central_cluster_client: client.ApiClient): api = client.CoreV1Api(api_client=central_cluster_client) @@ -50,24 +32,20 @@ def test_label_namespace(namespace: str, central_cluster_client: client.ApiClien api.replace_namespace(name=namespace, body=ns) -@mark.e2e_multi_cluster_recover_network_partition def test_create_service_entry(service_entries: List[CustomObject]): for service_entry in service_entries: service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_deploy_operator(multi_cluster_operator_manual_remediation: Operator): multi_cluster_operator_manual_remediation.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@mark.e2e_multi_cluster_recover_network_partition def test_update_service_entry_block_failed_cluster_traffic( namespace: str, central_cluster_client: kubernetes.client.ApiClient, @@ -86,9 +64,8 @@ def test_update_service_entry_block_failed_cluster_traffic( service_entry.update() -@mark.e2e_multi_cluster_recover_network_partition def test_delete_database_statefulset_in_failed_cluster( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: list[str], ): failed_cluster_idx = member_cluster_names.index(FAILED_MEMBER_CLUSTER_NAME) @@ -114,9 +91,8 @@ def test_delete_database_statefulset_in_failed_cluster( ) -@mark.e2e_multi_cluster_recover_network_partition def test_mongodb_multi_enters_failed_state( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, central_cluster_client: client.ApiClient, ): @@ -124,7 +100,6 @@ def test_mongodb_multi_enters_failed_state( mongodb_multi.assert_reaches_phase(Phase.Failed, timeout=100) -@mark.e2e_multi_cluster_recover_network_partition def test_recover_operator_remove_cluster( member_cluster_names: List[str], namespace: str, @@ -141,8 +116,9 @@ def test_recover_operator_remove_cluster( operator.assert_is_running() -@mark.e2e_multi_cluster_recover_network_partition -def test_mongodb_multi_recovers_removing_cluster(mongodb_multi: MongoDBMulti, member_cluster_names: List[str]): +def test_mongodb_multi_recovers_removing_cluster( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_names: List[str] +): mongodb_multi.load() last_transition_time = mongodb_multi.get_status_last_transition_time() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py similarity index 73% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py index 599015d46..175eaf857 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set.py @@ -6,57 +6,18 @@ from kubernetes.client.rest import ApiException from kubetester import delete_statefulset, get_statefulset, wait_until from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests.conftest import ( assert_log_rotation_process, - member_cluster_clients, - setup_log_rotate_for_agents, ) -from tests.multicluster.conftest import cluster_spec_list MONGODB_PORT = 30000 -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-central-sts-override.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - additional_mongod_config = { - "systemLog": {"logAppend": True, "verbosity": 4}, - "operationProfiling": {"mode": "slowOp"}, - "net": {"port": MONGODB_PORT}, - } - - resource["spec"]["additionalMongodConfig"] = additional_mongod_config - setup_log_rotate_for_agents(resource) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.set_architecture_annotation() - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -66,19 +27,16 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2000) -@pytest.mark.e2e_multi_cluster_replica_set def test_statefulset_is_created_across_multiple_clusters( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): def statefulsets_are_ready(): @@ -105,9 +63,8 @@ def statefulsets_are_ready(): wait_until(statefulsets_are_ready, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set def test_pvc_not_created( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -118,15 +75,12 @@ def test_pvc_not_created( assert e.value.reason == "Not Found" -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester(port=MONGODB_PORT) tester.assert_connectivity() -@pytest.mark.e2e_multi_cluster_replica_set -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 cluster_one_client = member_cluster_clients[0] @@ -134,9 +88,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar1", cluster_one_sts) -@pytest.mark.e2e_multi_cluster_replica_set def test_headless_service_creation( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -157,8 +110,7 @@ def test_headless_service_creation( assert len(ep_two.subsets[0].addresses) == mongodb_multi.get_item_spec(cluster_two_client.cluster_name)["members"] -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options(mongodb_multi: MongoDBMulti): +def test_mongodb_options(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 4 @@ -168,8 +120,9 @@ def test_mongodb_options(mongodb_multi: MongoDBMulti): assert_log_rotation_process(process) -@pytest.mark.e2e_multi_cluster_replica_set -def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_client: kubernetes.client.ApiClient): +def test_update_additional_options( + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient +): mongodb_multi["spec"]["additionalMongodConfig"]["systemLog"]["verbosity"] = 2 mongodb_multi["spec"]["additionalMongodConfig"]["net"]["maxIncomingConnections"] = 100 # update uses json merge+patch which means that deleting keys is done by setting them to None @@ -180,8 +133,7 @@ def test_update_additional_options(mongodb_multi: MongoDBMulti, central_cluster_ mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set -def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): +def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti | MongoDB): automation_config_tester = mongodb_multi.get_automation_config_tester() for process in automation_config_tester.get_replica_set_processes(mongodb_multi.name): assert process["args2_6"]["systemLog"]["verbosity"] == 2 @@ -192,10 +144,9 @@ def test_mongodb_options_were_updated(mongodb_multi: MongoDBMulti): assert "mode" not in process["args2_6"]["operationProfiling"] -@pytest.mark.e2e_multi_cluster_replica_set def test_delete_member_cluster_sts( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): sts_name = "{}-0".format(mongodb_multi.name) @@ -223,8 +174,9 @@ def check_if_sts_was_recreated() -> bool: mongodb_multi.assert_reaches_phase(Phase.Running, timeout=400) -@pytest.mark.e2e_multi_cluster_replica_set -def test_cleanup_on_mdbm_delete(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_cleanup_on_mdbm_delete( + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] +): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) cluster_one_client = member_cluster_clients[0] cluster_one_sts = statefulsets[cluster_one_client.cluster_name] diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py index fd387cdda..55b769a83 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_deletion.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_deletion.py @@ -1,57 +1,34 @@ from typing import List import kubernetes -import pytest -from kubetester import try_load, wait_until +from kubetester import wait_until from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase from tests import test_logger -from tests.multicluster.conftest import cluster_spec_list logger = test_logger.get_test_logger(__name__) -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - - if try_load(resource): - return resource - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - return resource.update() - - -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti): +def test_automation_config_has_been_updated(mongodb_multi: MongoDBMulti | MongoDB): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) processes = tester.get_replica_set_processes(mongodb_multi.name) assert len(processes) == 5 -@pytest.mark.e2e_multi_cluster_replica_set_deletion -def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_delete_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.delete() def wait_for_deleted() -> bool: @@ -68,7 +45,6 @@ def wait_for_deleted() -> bool: wait_until(wait_for_deleted, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_deployment_has_been_removed_from_automation_config(): def wait_until_automation_config_is_clean() -> bool: tester = AutomationConfigTester(KubernetesTester.get_automation_config()) @@ -82,9 +58,8 @@ def wait_until_automation_config_is_clean() -> bool: wait_until(wait_until_automation_config_is_clean, timeout=60) -@pytest.mark.e2e_multi_cluster_replica_set_deletion def test_kubernetes_resources_have_been_cleaned_up( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): def wait_until_secrets_are_removed() -> bool: try: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py new file mode 100644 index 000000000..a9e3c17ff --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_ignore_unknown_users.py @@ -0,0 +1,27 @@ +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_replica_set(multi_cluster_operator: Operator, mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_false(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(False) + + +def test_set_ignore_unknown_users_false(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.load() + mongodb_multi["spec"]["security"]["authentication"]["ignoreUnknownUsers"] = False + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) + + +def test_authoritative_set_true(mongodb_multi: MongoDBMulti | MongoDB): + tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + tester.assert_authoritative_set(True) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py index 7990943e8..5ca347cbb 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_member_options.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_member_options.py @@ -1,83 +1,11 @@ from typing import Dict -import kubernetes -import pytest -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - - -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), - "multi-replica-set", - namespace, - ) - resource.set_version(custom_mdb_version) - member_options = [ - [ - { - "votes": 1, - "priority": "0.3", - "tags": { - "cluster": "cluster-1", - "region": "weur", - }, - }, - { - "votes": 1, - "priority": "0.7", - "tags": { - "cluster": "cluster-1", - "region": "eeur", - }, - }, - ], - [ - { - "votes": 1, - "priority": "0.2", - "tags": { - "cluster": "cluster-2", - "region": "apac", - }, - }, - ], - [ - { - "votes": 1, - "priority": "1.3", - "tags": { - "cluster": "cluster-3", - "region": "nwus", - }, - }, - { - "votes": 1, - "priority": "2.7", - "tags": { - "cluster": "cluster-3", - "region": "seus", - }, - }, - ], - ] - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2], member_options) - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_replica_set_member_options + + def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: str, member_cluster_names: str): clients = cluster_clients @@ -87,18 +15,15 @@ def test_create_kube_config_file(cluster_clients: Dict, central_cluster_name: st assert central_cluster_name in clients -@pytest.mark.e2e_multi_cluster_replica_set_member_options def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() config = mongodb_multi.get_automation_config_tester().automation_config rs = config["replicaSets"] @@ -129,8 +54,7 @@ def test_mongodb_multi_member_options_ac(mongodb_multi: MongoDBMulti): assert member5["tags"] == {"cluster": "cluster-3", "region": "seus"} -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["memberConfig"][0] = { @@ -158,8 +82,7 @@ def test_mongodb_multi_update_member_options(mongodb_multi: MongoDBMulti): } -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -175,8 +98,7 @@ def test_mongodb_multi_set_member_votes_to_0(mongodb_multi: MongoDBMulti): assert updated_member["priority"] == 0.0 -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][1]["memberConfig"][0]["votes"] = 0 @@ -188,8 +110,7 @@ def test_mongodb_multi_set_invalid_votes_and_priority(mongodb_multi: MongoDBMult ) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # A member with priority 0.0 could still be a voting member. It cannot become primary and cannot trigger elections. # https://www.mongodb.com/docs/v5.0/core/replica-set-priority-0-member/#priority-0-replica-set-members @@ -199,8 +120,7 @@ def test_mongodb_multi_set_recover_valid_member_options(mongodb_multi: MongoDBMu mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) -@pytest.mark.e2e_multi_cluster_replica_set_member_options -def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti): +def test_mongodb_multi_set_only_one_vote_per_member(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][2]["memberConfig"][1]["votes"] = 3 diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py new file mode 100644 index 000000000..50d4c8efb --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_migration.py @@ -0,0 +1,52 @@ +from typing import List + +from kubetester.kubetester import ( + assert_statefulset_architecture, + get_default_architecture, +) +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_migrate_architecture(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + """ + If the E2E is running with default architecture as non-static, + then the test will migrate to static and vice versa. + """ + original_default_architecture = get_default_architecture() + target_architecture = "non-static" if original_default_architecture == "static" else "static" + + mongodb_multi.trigger_architecture_migration() + + mongodb_multi.load() + assert mongodb_multi["metadata"]["annotations"]["mongodb.com/v1.architecture"] == target_architecture + + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=1800) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) + for statefulset in statefulsets.values(): + assert_statefulset_architecture(statefulset, target_architecture) + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py similarity index 50% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py index 7ba868397..2c6500e67 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_down.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_down.py @@ -1,84 +1,24 @@ from typing import List -import kubernetes -import pytest -from kubetester import try_load from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # start at one member in each cluster - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - if try_load(mongodb_multi_unmarshalled): - return mongodb_multi_unmarshalled - - return mongodb_multi_unmarshalled.update() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -95,14 +35,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 1 # Testing scaling down to zero is required to test fix for https://jira.mongodb.org/browse/CLOUDP-324655 @@ -113,9 +51,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -132,14 +69,11 @@ def test_statefulsets_have_been_scaled_down_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_replica_set_scale_down def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_down -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py similarity index 63% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py index 7640c2c4a..4b18b23b0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_scale_up.py @@ -1,84 +1,25 @@ from typing import List -import kubernetes import kubetester -import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: List[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - # we have created certs for all 5 members, but want to start at only 3. - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][0]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][1]["members"] = 1 - mongodb_multi_unmarshalled["spec"]["clusterSpecList"][2]["members"] = 1 - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -107,14 +48,12 @@ def fn(): kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster three") -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 2 mongodb_multi["spec"]["clusterSpecList"][1]["members"] = 1 @@ -124,9 +63,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_statefulsets_have_been_scaled_up_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): # Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready); @@ -161,14 +99,11 @@ def fn(): ) -@pytest.mark.e2e_multi_cluster_replica_set_scale_up def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_replica_set_scale_up -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py similarity index 84% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py index 5c720264c..03a7fbbf5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_test_mtls.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_replica_set_test_mtls.py @@ -1,46 +1,23 @@ from typing import List import kubernetes -import pytest from kubetester import wait_until from kubetester.kubetester import KubernetesTester, create_testing_namespace -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), "multi-replica-set", namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - # TODO: incorporate this into the base class. - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - resource.update() - return resource - - -@pytest.mark.e2e_multi_cluster_mtls_test def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_mtls_test -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_mongo_pod_in_separate_namespace( member_cluster_clients: List[MultiClusterClient], evergreen_task_id: str, @@ -96,9 +73,8 @@ def pod_is_ready() -> bool: wait_until(pod_is_ready, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_fails_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): @@ -126,7 +102,6 @@ def test_connectivity_fails_from_second_namespace( ], f"no expected failure messages found in result: {result}" -@pytest.mark.e2e_multi_cluster_mtls_test def test_enable_istio_injection( member_cluster_clients: List[MultiClusterClient], namespace: str, @@ -138,7 +113,6 @@ def test_enable_istio_injection( corev1.patch_namespace(f"{namespace}-mongo", ns) -@pytest.mark.e2e_multi_cluster_mtls_test def test_delete_existing_mongo_pod(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -154,7 +128,6 @@ def pod_is_deleted() -> bool: wait_until(pod_is_deleted, timeout=120) -@pytest.mark.e2e_multi_cluster_mtls_test def test_create_pod_with_istio_sidecar(member_cluster_clients: List[MultiClusterClient], namespace: str): cluster_1_client = member_cluster_clients[0] corev1 = kubernetes.client.CoreV1Api(api_client=cluster_1_client.api_client) @@ -191,9 +164,8 @@ def two_containers_are_present() -> bool: wait_until(two_containers_are_present, timeout=60) -@pytest.mark.e2e_multi_cluster_mtls_test def test_connectivity_succeeds_from_second_namespace( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py similarity index 54% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py index 38350027f..fe0b26b95 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_down_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_down_cluster.py @@ -3,76 +3,24 @@ import kubernetes import pytest from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list - -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - return mongodb_multi_unmarshalled.create() -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) @@ -92,14 +40,12 @@ def test_statefulsets_have_been_created_correctly( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.load() # remove first and last cluster mongodb_multi["spec"]["clusterSpecList"] = [mongodb_multi["spec"]["clusterSpecList"][1]] @@ -108,9 +54,8 @@ def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800, ignore_errors=True) -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_statefulsets_have_been_scaled_down_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): statefulsets = mongodb_multi.read_statefulsets([member_cluster_clients[1]]) @@ -130,15 +75,12 @@ def test_statefulsets_have_been_scaled_down_correctly( assert e.value.reason == "Not Found" -@pytest.mark.e2e_multi_cluster_scale_down_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(1) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_down_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): # there should only be one member in cluster 2 so there is just a single service. tester = mongodb_multi.tester(service_names=[f"{mongodb_multi.name}-1-0-svc"]) tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py new file mode 100644 index 000000000..c2a8da08b --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster.py @@ -0,0 +1,96 @@ +from typing import List + +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + +def test_statefulsets_have_been_created_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + # read all statefulsets except the last one + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients[:-1]) + + +def test_ops_manager_has_been_updated_correctly_before_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(3) + + +def test_scale_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + mongodb_multi["spec"]["clusterSpecList"].append( + {"members": 2, "clusterName": member_cluster_clients[2].cluster_name} + ) + mongodb_multi.update() + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=120) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1800) + + +def test_statefulsets_have_been_scaled_up_correctly( + mongodb_multi: MongoDBMulti | MongoDB, + member_cluster_clients: List[MultiClusterClient], +): + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients, timeout=60) + + +def test_ops_manager_has_been_updated_correctly_after_scaling(): + ac = AutomationConfigTester() + ac.assert_processes_size(5) + + +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) + + +# From here on, the tests are for verifying that we can change the project of the MongoDBMulti | MongoDB resource even with +# non-sequential member ids in the replicaset. + + +class TestNonSequentialMemberIdsInReplicaSet(KubernetesTester): + + def test_scale_up_first_cluster( + self, mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] + ): + # Scale up the first cluster to 3 members. This will lead to non-sequential member ids in the replicaset. + # multi-replica-set-0-0 : 0 + # multi-replica-set-0-1 : 1 + # multi-replica-set-0-2 : 5 + # multi-replica-set-1-0 : 2 + # multi-replica-set-2-0 : 3 + # multi-replica-set-2-1 : 4 + + mongodb_multi["spec"]["clusterSpecList"][0]["members"] = 3 + mongodb_multi.update() + + mongodb_multi.assert_statefulsets_are_ready(member_cluster_clients) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=600) + + def test_change_project(self, mongodb_multi: MongoDBMulti | MongoDB, new_project_configmap: str): + oldRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + mongodb_multi["spec"]["opsManager"]["configMapRef"]["name"] = new_project_configmap + mongodb_multi.update() + + mongodb_multi.assert_abandons_phase(phase=Phase.Running, timeout=300) + mongodb_multi.assert_reaches_phase(phase=Phase.Running, timeout=600) + + newRsMembers = mongodb_multi.get_automation_config_tester().get_replica_set_members(mongodb_multi.name) + + # Assert that the replica set member ids have not changed after changing the project. + assert oldRsMembers == newRsMembers diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py similarity index 58% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py index f25821c85..575ded0d0 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scale_up_cluster_new_cluster.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scale_up_cluster_new_cluster.py @@ -1,12 +1,9 @@ from typing import Callable, List import kubernetes -import pytest from kubernetes import client from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongotester import with_tls from kubetester.multicluster_client import MultiClusterClient @@ -14,58 +11,8 @@ from kubetester.phase import Phase from tests.conftest import run_kube_config_creation_tool from tests.constants import MULTI_CLUSTER_OPERATOR_NAME -from tests.multicluster.conftest import cluster_spec_list -RESOURCE_NAME = "multi-replica-set" -BUNDLE_SECRET_NAME = f"prefix-{RESOURCE_NAME}-cert" - -@pytest.fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - multi_cluster_issuer_ca_configmap: str, - central_cluster_client: kubernetes.client.ApiClient, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), RESOURCE_NAME, namespace) - resource.set_version(custom_mdb_version) - # ensure certs are created for the members during scale up - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource["spec"]["security"] = { - "certsSecretPrefix": "prefix", - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource - - -@pytest.fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@pytest.fixture(scope="module") -def mongodb_multi(mongodb_multi_unmarshalled: MongoDBMulti, server_certs: str) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["clusterSpecList"].pop() - return mongodb_multi_unmarshalled.create() - - -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -77,14 +24,12 @@ def test_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -100,20 +45,17 @@ def test_statefulsets_have_been_created_correctly( assert cluster_two_sts.status.ready_replicas == 1 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_before_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(3) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_delete_deployment(namespace: str, central_cluster_client: kubernetes.client.ApiClient): client.AppsV1Api(api_client=central_cluster_client).delete_namespaced_deployment( MULTI_CLUSTER_OPERATOR_NAME, namespace ) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_re_deploy_operator( install_multi_cluster_operator_set_members_fn: Callable[[List[str]], Operator], member_cluster_names: List[str], @@ -126,9 +68,8 @@ def test_re_deploy_operator( operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_add_new_cluster_to_mongodb_multi_resource( - mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient] + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient] ): mongodb_multi.load() mongodb_multi["spec"]["clusterSpecList"].append( @@ -138,9 +79,8 @@ def test_add_new_cluster_to_mongodb_multi_resource( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_statefulsets_have_been_created_correctly_after_cluster_addition( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): clients = {c.cluster_name: c for c in member_cluster_clients} @@ -159,14 +99,11 @@ def test_statefulsets_have_been_created_correctly_after_cluster_addition( assert cluster_three_sts.status.ready_replicas == 2 -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster def test_ops_manager_has_been_updated_correctly_after_scaling(): ac = AutomationConfigTester() ac.assert_processes_size(5) -@skip_if_local -@pytest.mark.e2e_multi_cluster_scale_up_cluster_new_cluster -def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py similarity index 69% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py index 402355fa1..b71812c41 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_scram.py @@ -1,20 +1,17 @@ from typing import List import kubernetes -import pytest from kubetester import create_or_update_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester from kubetester.kubetester import KubernetesTester -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from tests.multicluster.conftest import cluster_spec_list -MDB_RESOURCE = "multi-replica-set-scram" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -23,52 +20,10 @@ NEW_USER_PASSWORD = "my-new-password7" -@pytest.fixture(scope="function") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names, - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - - resource["spec"]["security"] = { - "authentication": { - "agents": {"mode": "MONGODB-CR"}, - "enabled": True, - "modes": ["SCRAM-SHA-1", "SCRAM-SHA-256", "MONGODB-CR"], - } - } - - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 1, 2]) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.fixture(scope="function") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("scram-sha-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return resource - - -@pytest.mark.e2e_multi_cluster_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_cluster_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -85,13 +40,11 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Pending, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi_with_scram(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=800) -@pytest.mark.e2e_multi_cluster_scram def test_user_reaches_updated( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -99,16 +52,14 @@ def test_user_reaches_updated( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity_using_user_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_change_password_and_check_connectivity( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, central_cluster_client: kubernetes.client.ApiClient, ): create_or_update_secret( @@ -125,8 +76,7 @@ def test_change_password_and_check_connectivity( ) -@pytest.mark.e2e_multi_cluster_scram -def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti): +def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_scram_sha_authentication_fails( password=USER_PASSWORD, @@ -135,10 +85,9 @@ def test_user_cannot_authenticate_with_old_password(mongodb_multi: MongoDBMulti) ) -@pytest.mark.e2e_multi_cluster_scram def test_connection_string_secret_was_created( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for client in member_cluster_clients: @@ -153,7 +102,6 @@ def test_connection_string_secret_was_created( assert "connectionString.standardSrv" in secret_data -@pytest.mark.e2e_multi_cluster_scram def test_om_configured_correctly(): expected_roles = { ("admin", "clusterAdmin"), @@ -170,16 +118,14 @@ def test_om_configured_correctly(): tester.assert_authentication_mechanism_enabled("MONGODB-CR", active_auth_mechanism=False) -@pytest.mark.e2e_multi_cluster_scram -def test_replica_set_connectivity(mongodb_multi: MongoDBMulti): +def test_replica_set_connectivity(mongodb_multi: MongoDBMulti | MongoDB): tester = mongodb_multi.tester() tester.assert_connectivity(db="admin", opts=[with_scram(USER_NAME, NEW_USER_PASSWORD)]) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( @@ -195,10 +141,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@pytest.mark.e2e_multi_cluster_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): secret_data = read_secret( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py new file mode 100644 index 000000000..7a0a70e2e --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_split_horizon.py @@ -0,0 +1,53 @@ +from typing import List + +import yaml +from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import with_tls +from kubetester.multicluster_client import MultiClusterClient +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls( + mongodb_multi: MongoDBMulti | MongoDB, + namespace: str, +): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_create_node_ports(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): + for mcc in member_cluster_clients: + with open( + yaml_fixture(f"split-horizon-node-ports/mongodbmulticluster-split-horizon-node-port.yaml"), + "r", + ) as f: + service_body = yaml.safe_load(f.read()) + + # configure labels and selectors + service_body["metadata"]["labels"][ + "mongodbmulticluster" + ] = f"{mongodb_multi.namespace}-{mongodb_multi.name}" + service_body["metadata"]["labels"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + service_body["spec"]["selector"][ + "statefulset.kubernetes.io/pod-name" + ] = f"{mongodb_multi.name}-{mcc.cluster_index}-0" + + KubernetesTester.create_service( + mongodb_multi.namespace, + body=service_body, + api_client=mcc.api_client, + ) + + +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): + tester = mongodb_multi.tester() + tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py similarity index 61% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py index 3a762c580..e05fb74f1 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_sts_override.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_sts_override.py @@ -1,45 +1,22 @@ from typing import List -import kubernetes -import pytest from kubernetes import client -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -@pytest.fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-sts-override.yaml"), - "multi-replica-set-sts-override", - namespace, - ) - resource.set_version(custom_mdb_version) - - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.update() - - -@pytest.mark.e2e_multi_sts_override def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@pytest.mark.e2e_multi_sts_override -def test_create_mongodb_multi(mongodb_multi: MongoDBMulti): +def test_create_mongodb_multi(mongodb_multi: MongoDBMulti | MongoDB): mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@pytest.mark.e2e_multi_sts_override -def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clients: List[MultiClusterClient]): +def test_statefulset_overrides(mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient]): statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients) # assert sts.podspec override in cluster1 @@ -54,9 +31,8 @@ def test_statefulset_overrides(mongodb_multi: MongoDBMulti, member_cluster_clien assert_container_in_sts("sidecar2", cluster_two_sts) -@pytest.mark.e2e_multi_sts_override def test_access_modes_pvc( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], namespace: str, ): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py similarity index 52% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py index 9932aac29..5b79a3734 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_no_mesh.py @@ -3,158 +3,15 @@ import kubernetes from kubernetes import client from kubetester import get_service -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import fixture as yaml_fixture +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark from tests.common.placeholders import placeholders from tests.conftest import update_coredns_hosts -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" -BUNDLE_PEM_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert-pem" - -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, member_cluster_names: List[str], custom_mdb_version: str -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(custom_mdb_version) - resource["spec"]["persistent"] = False - # These domains map 1:1 to the CoreDNS file. Please be mindful when updating them. - resource["spec"]["clusterSpecList"] = cluster_spec_list(member_cluster_names, [2, 2, 2]) - - resource["spec"]["externalAccess"] = {} - resource["spec"]["clusterSpecList"][0]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-1.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing0", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][1]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-2.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing1", - "port": 27019, - }, - ], - } - }, - } - resource["spec"]["clusterSpecList"][2]["externalAccess"] = { - "externalDomain": "kind-e2e-cluster-3.interconnected", - "externalService": { - "spec": { - "type": "LoadBalancer", - "publishNotReadyAddresses": False, - "ports": [ - { - "name": "mongodb", - "port": 27017, - }, - { - "name": "backup", - "port": 27018, - }, - { - "name": "testing2", - "port": 27019, - }, - ], - } - }, - } - - return resource - - -@fixture(scope="module") -def disable_istio( - multi_cluster_operator: Operator, - namespace: str, - member_cluster_clients: List[MultiClusterClient], -): - for mcc in member_cluster_clients: - api = client.CoreV1Api(api_client=mcc.api_client) - labels = {"istio-injection": "disabled"} - ns = api.read_namespace(name=namespace) - ns.metadata.labels.update(labels) - api.replace_namespace(name=namespace, body=ns) - return None - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - disable_istio, - namespace: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - mongodb_multi_unmarshalled["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - mongodb_multi_unmarshalled.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - - return mongodb_multi_unmarshalled.update() - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@mark.e2e_multi_cluster_tls_no_mesh def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]): hosts = [ ("172.18.255.211", "test.kind-e2e-cluster-1.interconnected"), @@ -202,14 +59,12 @@ def test_update_coredns(cluster_clients: dict[str, kubernetes.client.ApiClient]) update_coredns_hosts(hosts, cluster_name, api_client=cluster_api) -@mark.e2e_multi_cluster_tls_no_mesh def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_no_mesh def test_create_mongodb_multi( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, server_certs: str, multi_cluster_issuer_ca_configmap: str, @@ -219,10 +74,9 @@ def test_create_mongodb_multi( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400, ignore_errors=True) -@mark.e2e_multi_cluster_tls_no_mesh def test_service_overrides( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_idx, member_cluster_client in enumerate(member_cluster_clients): @@ -250,10 +104,9 @@ def test_service_overrides( assert ports[2].port == 27019 -@mark.e2e_multi_cluster_tls_no_mesh def test_placeholders_in_external_services( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ): for cluster_spec_item in mongodb_multi["spec"]["clusterSpecList"]: diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py similarity index 55% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py index 978ae0832..39f1ebcc5 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_tls_with_scram.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_scram.py @@ -3,22 +3,15 @@ import kubernetes from kubetester import create_secret, read_secret from kubetester.automation_config_tester import AutomationConfigTester -from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs -from kubetester.kubetester import KubernetesTester, ensure_ent_version -from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import skip_if_local +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB from kubetester.mongodb_multi import MongoDBMulti from kubetester.mongodb_user import MongoDBUser from kubetester.mongotester import with_scram, with_tls from kubetester.multicluster_client import MultiClusterClient from kubetester.operator import Operator from kubetester.phase import Phase -from pytest import fixture, mark -from tests.multicluster.conftest import cluster_spec_list -CERT_SECRET_PREFIX = "clustercert" -MDB_RESOURCE = "multi-cluster-replica-set" -BUNDLE_SECRET_NAME = f"{CERT_SECRET_PREFIX}-{MDB_RESOURCE}-cert" USER_NAME = "my-user-1" USER_RESOURCE = "multi-replica-set-scram-user" USER_DATABASE = "admin" @@ -26,80 +19,12 @@ USER_PASSWORD = "my-password" -@fixture(scope="module") -def mongodb_multi_unmarshalled( - namespace: str, - member_cluster_names: list[str], - custom_mdb_version: str, -) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(yaml_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) - resource.set_version(ensure_ent_version(custom_mdb_version)) - resource["spec"]["clusterSpecList"] = cluster_spec_list( - member_cluster_names=member_cluster_names, members=[2, 1, 2] - ) - - return resource - - -@fixture(scope="module") -def server_certs( - multi_cluster_issuer: str, - mongodb_multi_unmarshalled: MongoDBMulti, - member_cluster_clients: List[MultiClusterClient], - central_cluster_client: kubernetes.client.ApiClient, -): - - return create_multi_cluster_mongodb_tls_certs( - multi_cluster_issuer, - BUNDLE_SECRET_NAME, - member_cluster_clients, - central_cluster_client, - mongodb_multi_unmarshalled, - ) - - -@fixture(scope="module") -def mongodb_multi( - central_cluster_client: kubernetes.client.ApiClient, - server_certs: str, - mongodb_multi_unmarshalled: MongoDBMulti, - multi_cluster_issuer_ca_configmap: str, -) -> MongoDBMulti: - - resource = mongodb_multi_unmarshalled - resource["spec"]["security"] = { - "certsSecretPrefix": CERT_SECRET_PREFIX, - "tls": { - "ca": multi_cluster_issuer_ca_configmap, - }, - } - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@fixture(scope="module") -def mongodb_user(central_cluster_client: kubernetes.client.ApiClient, namespace: str) -> MongoDBUser: - resource = MongoDBUser.from_yaml(yaml_fixture("mongodb-user.yaml"), USER_RESOURCE, namespace) - - resource["spec"]["username"] = USER_NAME - resource["spec"]["passwordSecretKeyRef"] = { - "name": PASSWORD_SECRET_NAME, - "key": "password", - } - resource["spec"]["mongodbResourceRef"]["name"] = MDB_RESOURCE - resource["spec"]["mongodbResourceRef"]["namespace"] = namespace - resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) - return resource.create() - - -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_operator(multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() -@mark.e2e_multi_cluster_tls_with_scram def test_deploy_mongodb_multi_with_tls( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, member_cluster_clients: List[MultiClusterClient], ): @@ -107,9 +32,8 @@ def test_deploy_mongodb_multi_with_tls( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_update_mongodb_multi_tls_with_scram( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -118,7 +42,6 @@ def test_update_mongodb_multi_tls_with_scram( mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) -@mark.e2e_multi_cluster_tls_with_scram def test_create_mongodb_user( central_cluster_client: kubernetes.client.ApiClient, mongodb_user: MongoDBUser, @@ -134,16 +57,12 @@ def test_create_mongodb_user( mongodb_user.assert_reaches_phase(Phase.Updated, timeout=100) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_tls_connectivity(mongodb_multi: MongoDBMulti, ca_path: str): +def test_tls_connectivity(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity(opts=[with_tls(use_tls=True, ca_path=ca_path)]) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram -def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti, ca_path: str): +def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti | MongoDB, ca_path: str): tester = mongodb_multi.tester() tester.assert_connectivity( db="admin", @@ -154,11 +73,9 @@ def test_replica_set_connectivity_with_scram_and_tls(mongodb_multi: MongoDBMulti ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -178,11 +95,9 @@ def test_replica_set_connectivity_from_connection_string_standard( ) -@skip_if_local -@mark.e2e_multi_cluster_tls_with_scram def test_replica_set_connectivity_from_connection_string_standard_srv( namespace: str, - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, member_cluster_clients: List[MultiClusterClient], ca_path: str, ): @@ -202,9 +117,8 @@ def test_replica_set_connectivity_from_connection_string_standard_srv( ) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_enable_x509( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): mongodb_multi.load() @@ -219,9 +133,8 @@ def test_mongodb_multi_tls_enable_x509( mongodb_multi.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=1000) -@mark.e2e_multi_cluster_tls_with_scram def test_mongodb_multi_tls_automation_config_was_updated( - mongodb_multi: MongoDBMulti, + mongodb_multi: MongoDBMulti | MongoDB, namespace: str, ): tester = AutomationConfigTester(KubernetesTester.get_automation_config()) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py new file mode 100644 index 000000000..2c3e9dc31 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_tls_with_x509.py @@ -0,0 +1,63 @@ +import tempfile + +import kubernetes +from kubetester.automation_config_tester import AutomationConfigTester +from kubetester.certs import Certificate, create_multi_cluster_x509_user_cert +from kubetester.kubetester import KubernetesTester +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_user import MongoDBUser +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_deploy_mongodb_multi_with_tls_and_authentication(mongodb_multi: MongoDBMulti | MongoDB, namespace: str): + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1200) + + +def test_ops_manager_state_was_updated_correctly(mongodb_multi: MongoDBMulti | MongoDB): + ac_tester = AutomationConfigTester(KubernetesTester.get_automation_config()) + ac_tester.assert_authentication_enabled(expected_num_deployment_auth_mechanisms=2) + ac_tester.assert_authentication_mechanism_enabled("MONGODB-X509") + ac_tester.assert_internal_cluster_authentication_enabled() + + +def test_create_mongodb_x509_user( + central_cluster_client: kubernetes.client.ApiClient, + mongodb_x509_user: MongoDBUser, + namespace: str, +): + mongodb_x509_user.assert_reaches_phase(Phase.Updated, timeout=100) + + +def test_x509_user_connectivity( + mongodb_multi: MongoDBMulti | MongoDB, + central_cluster_client: kubernetes.client.ApiClient, + multi_cluster_issuer: str, + namespace: str, + ca_path: str, +): + with tempfile.NamedTemporaryFile(delete=False, mode="w") as cert_file: + create_multi_cluster_x509_user_cert( + multi_cluster_issuer, namespace, central_cluster_client, path=cert_file.name + ) + tester = mongodb_multi.tester() + tester.assert_x509_authentication(cert_file_name=cert_file.name, tlsCAFile=ca_path) + + +# TODO Replace and use this method to check that certificate rotation after enabling TLS and authentication mechanisms +# keeps the resources reachable and in Running state. +def assert_certificate_rotation(central_cluster_client, mongodb_multi, namespace, certificate_name): + cert = Certificate(name=certificate_name, namespace=namespace) + cert.api = kubernetes.client.CustomObjectsApi(api_client=central_cluster_client) + cert.load() + cert["spec"]["dnsNames"].append("foo") # Append DNS to cert to rotate the certificate + cert.update() + # FIXME the assertions below need to be replaced with a robust check that the agents are ready + # and the TLS certificates are rotated. + mongodb_multi.assert_abandons_phase(Phase.Running, timeout=100) + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=1500) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py new file mode 100644 index 000000000..b85c08789 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_upgrade_downgrade.py @@ -0,0 +1,59 @@ +from kubetester.kubetester import ensure_ent_version, fcv_from_version +from kubetester.mongodb import MongoDB +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongotester import MongoDBBackgroundTester +from kubetester.operator import Operator +from kubetester.phase import Phase + + +def test_deploy_operator(multi_cluster_operator: Operator): + multi_cluster_operator.assert_is_running() + + +def test_create_mongodb_multi_running(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.update() + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_start_background_checker(mdb_health_checker: MongoDBBackgroundTester): + mdb_health_checker.start() + + +def test_mongodb_multi_upgrade( + mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str, custom_mdb_version: str +): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_version)) + + +def test_upgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mongodb_multi_downgrade(mongodb_multi: MongoDBMulti | MongoDB, custom_mdb_prev_version: str): + mongodb_multi.load() + mongodb_multi["spec"]["version"] = ensure_ent_version(custom_mdb_prev_version) + mongodb_multi["spec"]["featureCompatibilityVersion"] = fcv_from_version(custom_mdb_prev_version) + mongodb_multi.update() + + mongodb_multi.assert_reaches_phase(Phase.Running, timeout=700) + mongodb_multi.tester().assert_version(ensure_ent_version(custom_mdb_prev_version)) + + +def test_downgraded_replica_set_is_reachable(mongodb_multi: MongoDBMulti | MongoDB): + tester = mongodb_multi.tester() + tester.assert_connectivity() + + +def test_mdb_healthy_throughout_change_version( + mdb_health_checker: MongoDBBackgroundTester, +): + mdb_health_checker.assert_healthiness() diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py similarity index 79% rename from docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py rename to docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py index d24fecf3c..1d4e34986 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/shared/multi_cluster_validation.py @@ -1,18 +1,16 @@ import kubernetes -import pytest import yaml from kubetester.kubetester import KubernetesTester from kubetester.kubetester import fixture as yaml_fixture from kubetester.operator import Operator -@pytest.mark.e2e_multi_cluster_validation class TestWebhookValidation(KubernetesTester): def test_deploy_operator(self, multi_cluster_operator: Operator): multi_cluster_operator.assert_is_running() - def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"].append({"clusterName": "kind-e2e-cluster-1", "members": 1}) self.create_custom_resource_from_object( @@ -22,8 +20,8 @@ def test_unique_cluster_names(self, central_cluster_client: kubernetes.client.Ap api_client=central_cluster_client, ) - def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["cloudManager"] = {"configMapRef": {"name": " my-project"}} self.create_custom_resource_from_object( @@ -33,8 +31,8 @@ def test_only_one_schema(self, central_cluster_client: kubernetes.client.ApiClie api_client=central_cluster_client, ) - def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient): - resource = yaml.safe_load(open(yaml_fixture("mongodb-multi-cluster.yaml"))) + def test_non_empty_clusterspec_list(self, central_cluster_client: kubernetes.client.ApiClient, fixture: str): + resource = yaml.safe_load(open(yaml_fixture(fixture))) resource["spec"]["clusterSpecList"] = [] self.create_custom_resource_from_object( diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py similarity index 67% rename from docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py rename to docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py index 0d573066c..dddb83999 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_mongodbmulticluster_appdb_s3_based_backup_restore.py @@ -25,6 +25,9 @@ from tests.conftest import assert_data_got_restored from tests.constants import AWS_REGION from tests.multicluster.conftest import cluster_spec_list +from tests.multicluster_appdb.shared import ( + multicluster_appdb_s3_based_backup_restore as testhelper, +) @fixture(scope="module") @@ -53,7 +56,7 @@ def multi_cluster_s3_replica_set( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), "multi-replica-set", namespace + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace ).configure(ops_manager, "s3metadata", api_client=central_cluster_client) resource["spec"]["clusterSpecList"] = cluster_spec_list(appdb_member_cluster_names, [1, 2]) @@ -94,7 +97,7 @@ def ops_manager( @mark.usefixtures("multi_cluster_operator") -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestOpsManagerCreation: """ name: Ops Manager successful creation with backup and oplog stores enabled @@ -106,51 +109,31 @@ def test_create_om( self, ops_manager: MongoDBOpsManager, ): - ops_manager["spec"]["backup"]["members"] = 1 - ops_manager.update() - - ops_manager.appdb_status().assert_reaches_phase(Phase.Running) - ops_manager.om_status().assert_reaches_phase(Phase.Running) + testhelper.TestOpsManagerCreation.test_create_om(self, ops_manager) def test_om_is_running( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - # at this point AppDB is used as the "metadatastore" - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_healthiness() + testhelper.TestOpsManagerCreation.test_om_is_running(self, ops_manager, central_cluster_client) def test_add_metadatastore( self, multi_cluster_s3_replica_set: MongoDBMulti, ops_manager: MongoDBOpsManager, ): - multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) - - # configure metadatastore in om, use dedicate MDB instead of AppDB - ops_manager.load() - ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} - ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { - "name": multi_cluster_s3_replica_set.name - } - ops_manager.update() - - ops_manager.om_status().assert_reaches_phase(Phase.Running) - ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + testhelper.TestOpsManagerCreation.test_add_metadatastore(self, multi_cluster_s3_replica_set, ops_manager) def test_om_s3_stores( self, ops_manager: MongoDBOpsManager, central_cluster_client: kubernetes.client.ApiClient, ): - om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) - om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) - om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + testhelper.TestOpsManagerCreation.test_om_s3_stores(self, ops_manager, central_cluster_client) -@mark.e2e_multi_cluster_appdb_s3_based_backup_restore +@mark.e2e_mongodbmulticluster_multi_cluster_appdb_s3_based_backup_restore class TestBackupForMongodb: @fixture(scope="module") def project_one( @@ -184,7 +167,7 @@ def mongodb_multi_one( custom_mdb_version: str, ) -> MongoDBMulti: resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi.yaml"), + yaml_fixture("mongodbmulticluster-multi.yaml"), "multi-replica-set-one", namespace, # the project configmap should be created in the central cluster. @@ -202,38 +185,20 @@ def mongodb_multi_one( return resource.update() def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): - # we might fail connection in the beginning since we set a custom dns in coredns - mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + testhelper.TestBackupForMongodb.test_mongodb_multi_one_running_state(self, mongodb_multi_one) @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - mongodb_multi_one_collection.insert_one(TEST_DATA) + testhelper.TestBackupForMongodb.test_add_test_data(self, mongodb_multi_one_collection) def test_mdb_backed_up(self, project_one: OMTester): - project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + testhelper.TestBackupForMongodb.test_mdb_backed_up(self, project_one) def test_change_mdb_data(self, mongodb_multi_one_collection): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - time.sleep(30) - mongodb_multi_one_collection.insert_one({"foo": "bar"}) + testhelper.TestBackupForMongodb.test_change_mdb_data(self, mongodb_multi_one_collection) def test_pit_restore(self, project_one: OMTester): - now_millis = time_to_millis(datetime.datetime.now()) - print("\nCurrent time (millis): {}".format(now_millis)) - - pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) - pit_millis = time_to_millis(pit_datetme) - print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) - - project_one.create_restore_job_pit(pit_millis) + testhelper.TestBackupForMongodb.test_pit_restore(self, project_one) def test_data_got_restored(self, mongodb_multi_one_collection): - assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) - - -def time_to_millis(date_time) -> int: - """https://stackoverflow.com/a/11111177/614239""" - epoch = datetime.datetime.utcfromtimestamp(0) - pit_millis = (date_time - epoch).total_seconds() * 1000 - return pit_millis + testhelper.TestBackupForMongodb.test_data_got_restored(self, mongodb_multi_one_collection) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py new file mode 100644 index 000000000..1190abdf0 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/shared/multicluster_appdb_s3_based_backup_restore.py @@ -0,0 +1,125 @@ +import datetime +import time + +import kubernetes.client +import pymongo +import pytest +from kubetester import create_or_update_configmap +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.omtester import OMTester +from kubetester.opsmanager import MongoDBOpsManager +from kubetester.phase import Phase +from tests.common.constants import ( + S3_BLOCKSTORE_NAME, + S3_OPLOG_NAME, + TEST_DATA, +) +from tests.conftest import assert_data_got_restored +from tests.constants import AWS_REGION + + +def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, client, custom_ca): + name = f"{mdb_name}-config" + data = { + "baseUrl": om.om_status().get_url(), + "projectName": project_name, + "sslMMSCAConfigMap": custom_ca, + "orgId": "", + } + + create_or_update_configmap(om.namespace, name, data, client) + + +class TestOpsManagerCreation: + """ + name: Ops Manager successful creation with backup and oplog stores enabled + description: | + Creates an Ops Manager instance with backup enabled. + """ + + def test_create_om( + self, + ops_manager: MongoDBOpsManager, + ): + ops_manager["spec"]["backup"]["members"] = 1 + ops_manager.update() + + ops_manager.appdb_status().assert_reaches_phase(Phase.Running) + ops_manager.om_status().assert_reaches_phase(Phase.Running) + + def test_om_is_running( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + # at this point AppDB is used as the "metadatastore" + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_healthiness() + + def test_add_metadatastore( + self, + multi_cluster_s3_replica_set: MongoDBMulti, + ops_manager: MongoDBOpsManager, + ): + multi_cluster_s3_replica_set.assert_reaches_phase(Phase.Running, timeout=1000) + + # configure metadatastore in om, use dedicate MDB instead of AppDB + ops_manager.load() + ops_manager["spec"]["backup"]["s3Stores"][0]["mongodbResourceRef"] = {"name": multi_cluster_s3_replica_set.name} + ops_manager["spec"]["backup"]["s3OpLogStores"][0]["mongodbResourceRef"] = { + "name": multi_cluster_s3_replica_set.name + } + ops_manager.update() + + ops_manager.om_status().assert_reaches_phase(Phase.Running) + ops_manager.backup_status().assert_reaches_phase(Phase.Running, ignore_errors=True) + + def test_om_s3_stores( + self, + ops_manager: MongoDBOpsManager, + central_cluster_client: kubernetes.client.ApiClient, + ): + om_tester = ops_manager.get_om_tester(api_client=central_cluster_client) + om_tester.assert_s3_stores([{"id": S3_BLOCKSTORE_NAME, "s3RegionOverride": AWS_REGION}]) + om_tester.assert_oplog_s3_stores([{"id": S3_OPLOG_NAME, "s3RegionOverride": AWS_REGION}]) + + +class TestBackupForMongodb: + + def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): + # we might fail connection in the beginning since we set a custom dns in coredns + mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + + @pytest.mark.flaky(reruns=100, reruns_delay=6) + def test_add_test_data(self, mongodb_multi_one_collection): + mongodb_multi_one_collection.insert_one(TEST_DATA) + + def test_mdb_backed_up(self, project_one: OMTester): + project_one.wait_until_backup_snapshots_are_ready(expected_count=1) + + def test_change_mdb_data(self, mongodb_multi_one_collection): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + time.sleep(30) + mongodb_multi_one_collection.insert_one({"foo": "bar"}) + + def test_pit_restore(self, project_one: OMTester): + now_millis = time_to_millis(datetime.datetime.now()) + print("\nCurrent time (millis): {}".format(now_millis)) + + pit_datetme = datetime.datetime.now() - datetime.timedelta(seconds=15) + pit_millis = time_to_millis(pit_datetme) + print("Restoring back to the moment 15 seconds ago (millis): {}".format(pit_millis)) + + project_one.create_restore_job_pit(pit_millis) + + def test_data_got_restored(self, mongodb_multi_one_collection): + assert_data_got_restored(TEST_DATA, mongodb_multi_one_collection, timeout=1200) + + +def time_to_millis(date_time) -> int: + """https://stackoverflow.com/a/11111177/614239""" + epoch = datetime.datetime.utcfromtimestamp(0) + pit_millis = (date_time - epoch).total_seconds() * 1000 + return pit_millis diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py similarity index 95% rename from docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py rename to docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py index 34d05184f..46f233601 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_mongodbmulticluster_om_appdb_no_mesh.py @@ -127,7 +127,7 @@ def s3_bucket_oplog(namespace: str, aws_s3_client: AwsS3Client) -> str: return next(create_s3_bucket_oplog(namespace, aws_s3_client, api_client=get_central_cluster_client())) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_configure_dns(disable_istio): host_mappings = [ ( @@ -198,12 +198,12 @@ def test_configure_dns(disable_istio): ) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_disable_istio(disable_istio): logger.info("Istio disabled") -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_configure_nginx(namespace: str): cluster_client = get_central_cluster_client() @@ -424,7 +424,7 @@ def mongodb_multi( ops_manager: MongoDBOpsManager, multi_cluster_issuer_ca_configmap: str, ) -> MongoDBMulti: - resource = MongoDBMulti.from_yaml(_fixture("mongodb-multi.yaml"), MDB_RESOURCE, namespace) + resource = MongoDBMulti.from_yaml(_fixture("mongodbmulticluster-multi.yaml"), MDB_RESOURCE, namespace) resource.set_version(ensure_ent_version(custom_mdb_version)) resource.set_architecture_annotation() resource.api = kubernetes.client.CustomObjectsApi(central_cluster_client) @@ -535,12 +535,12 @@ def create_project_config_map(om: MongoDBOpsManager, mdb_name, project_name, cli create_or_update_configmap(om.namespace, name, data, client) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_deploy_operator(multi_cluster_operator_with_monitored_appdb: Operator): multi_cluster_operator_with_monitored_appdb.assert_is_running() -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_deploy_ops_manager(ops_manager: MongoDBOpsManager): ops_manager.update() ops_manager.om_status().assert_reaches_phase(Phase.Running) @@ -549,26 +549,26 @@ def test_deploy_ops_manager(ops_manager: MongoDBOpsManager): ops_manager.assert_appdb_monitoring_group_was_created() -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_create_mongodb_multi(server_certs: str, mongodb_multi: MongoDBMulti): mongodb_multi.update() mongodb_multi.assert_reaches_phase(Phase.Running, timeout=2400, ignore_errors=True) @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): mongodb_multi_collection.insert_one(TEST_DATA) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_mdb_backed_up(ops_manager: MongoDBOpsManager): ops_manager.get_om_tester(project_name="mongodb").wait_until_backup_snapshots_are_ready(expected_count=1) @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_change_mdb_data(mongodb_multi_collection): now_millis = time_to_millis(datetime.datetime.now(tz=datetime.UTC)) print("\nCurrent time (millis): {}".format(now_millis)) @@ -576,7 +576,7 @@ def test_change_mdb_data(mongodb_multi_collection): mongodb_multi_collection.insert_one({"foo": "bar"}) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_pit_restore(ops_manager: MongoDBOpsManager): now_millis = time_to_millis(datetime.datetime.now(tz=datetime.UTC)) print("\nCurrent time (millis): {}".format(now_millis)) @@ -588,7 +588,7 @@ def test_pit_restore(ops_manager: MongoDBOpsManager): ops_manager.get_om_tester(project_name="mongodb").create_restore_job_pit(pit_millis) -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_mdb_ready(mongodb_multi: MongoDBMulti): # Note: that we are not waiting for the restore jobs to get finished as PIT restore jobs get FINISHED status # right away. @@ -598,7 +598,7 @@ def test_mdb_ready(mongodb_multi: MongoDBMulti): @skip_if_local -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_data_got_restored(mongodb_multi_collection): assert_data_got_restored(TEST_DATA, mongodb_multi_collection, timeout=600) @@ -609,7 +609,7 @@ def time_to_millis(date_time) -> int: return pit_millis -@mark.e2e_multi_cluster_om_appdb_no_mesh +@mark.e2e_mongodbmulticluster_multi_cluster_om_appdb_no_mesh def test_telemetry_configmap(namespace: str): config = KubernetesTester.read_configmap(namespace, TELEMETRY_CONFIGMAP_NAME) diff --git a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py similarity index 93% rename from docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py rename to docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py index 3520b90cc..41b4c0dfb 100644 --- a/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mck_upgrade.py +++ b/docker/mongodb-kubernetes-tests/tests/upgrades/meko_mongodbmulticluster_mck_upgrade.py @@ -54,7 +54,7 @@ def replica_set( ) -> MongoDB: if is_multi_cluster(): resource = MongoDBMulti.from_yaml( - yaml_fixture("mongodb-multi-cluster.yaml"), + yaml_fixture("mongodbmulticluster-multi-cluster.yaml"), "multi-replica-set", namespace, ) @@ -100,26 +100,26 @@ def replica_set( # Installs the latest officially released version of MEKO, from Quay -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_latest_official_operator(official_meko_operator: Operator, namespace: str): official_meko_operator.assert_is_running() # Dumping deployments in logs ensures we are using the correct operator version log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_install_replicaset(replica_set: MongoDB): replica_set.assert_reaches_phase(phase=Phase.Running, timeout=1000 if is_multi_cluster() else 600) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_downscale_latest_official_operator(namespace: str): deployment_name = LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME downscale_operator_deployment(deployment_name, namespace) # Upgrade to MCK -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_upgrade_operator( namespace: str, operator_installation_config, @@ -151,19 +151,19 @@ def test_upgrade_operator( log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_replicaset_reconciled(replica_set: MongoDB): replica_set.assert_abandons_phase(phase=Phase.Running, timeout=300) replica_set.assert_reaches_phase(phase=Phase.Running, timeout=800) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_uninstall_latest_official_operator(namespace: str): helm_uninstall(LEGACY_MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else LEGACY_OPERATOR_NAME) log_deployments_info(namespace) -@mark.e2e_meko_mck_upgrade +@mark.e2e_mongodbmulticluster_meko_mck_upgrade def test_operator_still_running(namespace: str, central_cluster_client: client.ApiClient, member_cluster_names): operator_name = MULTI_CLUSTER_OPERATOR_NAME if is_multi_cluster() else OPERATOR_NAME operator_instance = Operator(